From 360afc64d09d55b9864885112bcb7dbee02352d9 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 19:58:56 -0700 Subject: [PATCH 001/162] Added VAE paper to the bibliography. --- docs/Bibliography.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/Bibliography.md b/docs/Bibliography.md index 2bcf3548..7e7303ea 100644 --- a/docs/Bibliography.md +++ b/docs/Bibliography.md @@ -4,6 +4,13 @@ Below, I present a most likely incomplete list of works I referred to when I was on this library: +### Autoencoders + +- (Dec 2013) **Auto-Encoding Variational Bayes** + Diederik P Kingma, Max Welling + https://arxiv.org/abs/1312.6114 + + ### Learning rate and optimization - (2012) **Lecture 6.5 -- RmsProp: Divide the gradient by a running average of its recent magnitude** From 5a3d2b6de3fab55c19a3b68c45f7c90e4ed1b55f Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 19:59:03 -0700 Subject: [PATCH 002/162] Reordered MNIST file. --- vel/models/vision/mnist_cnn_01.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/vel/models/vision/mnist_cnn_01.py b/vel/models/vision/mnist_cnn_01.py index 40b2d54e..05b61c09 100644 --- a/vel/models/vision/mnist_cnn_01.py +++ b/vel/models/vision/mnist_cnn_01.py @@ -26,11 +26,6 @@ class Net(SupervisedModel): Dense - output (softmax) """ - @staticmethod - def _weight_initializer(tensor): - init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) - init.constant_(tensor.bias, 0.0) - def __init__(self, img_rows, img_cols, img_channels, num_classes): super(Net, self).__init__() @@ -45,6 +40,11 @@ def __init__(self, img_rows, img_cols, img_channels, num_classes): self.dropout2 = nn.Dropout(p=0.5) self.fc2 = nn.Linear(128, num_classes) + @staticmethod + def _weight_initializer(tensor): + init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) + init.constant_(tensor.bias, 0.0) + def reset_weights(self): self._weight_initializer(self.conv1) self._weight_initializer(self.conv2) From c156b6969309efef7438f0f74abb1a239b0d03eb Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 20:27:50 -0700 Subject: [PATCH 003/162] Add default varargs to be empty. --- vel/launcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vel/launcher.py b/vel/launcher.py index e2a00a47..bb44f733 100644 --- a/vel/launcher.py +++ b/vel/launcher.py @@ -13,7 +13,7 @@ def main(): parser.add_argument('config', metavar='FILENAME', help='Configuration file for the run') parser.add_argument('command', metavar='COMMAND', help='A command to run') - parser.add_argument('varargs', nargs='*', metavar='VARARGS', help='Extra options to the command') + parser.add_argument('varargs', nargs='*', default=[], metavar='VARARGS', help='Extra options to the command') parser.add_argument('-r', '--run_number', type=int, default=0, help="A run number") parser.add_argument('-d', '--device', default='cuda', help="A device to run the model on") parser.add_argument('-s', '--seed', type=int, default=None, help="Random seed for the project") From 5d9f112234dc030a36156bce18f2ac298fd5a55c Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 20:42:44 -0700 Subject: [PATCH 004/162] Easy script model config. --- vel/api/__init__.py | 3 +-- vel/{internals => api}/model_config.py | 26 +++++++++++++++++++++----- 2 files changed, 22 insertions(+), 7 deletions(-) rename vel/{internals => api}/model_config.py (90%) diff --git a/vel/api/__init__.py b/vel/api/__init__.py index ac1eec1f..3eb8c6e3 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -11,5 +11,4 @@ from .source import Source, TrainingData, TextData from .storage import Storage from .train_phase import TrainPhase, EmptyTrainPhase - -from vel.internals.model_config import ModelConfig +from .model_config import ModelConfig diff --git a/vel/internals/model_config.py b/vel/api/model_config.py similarity index 90% rename from vel/internals/model_config.py rename to vel/api/model_config.py index 979fcdee..7b1e1225 100644 --- a/vel/internals/model_config.py +++ b/vel/api/model_config.py @@ -1,5 +1,6 @@ import datetime as dtm import os.path +import typing from vel.exceptions import VelInitializationException from vel.internals.parser import Parser @@ -58,14 +59,29 @@ def from_file(cls, filename: str, run_number: int, continue_training: bool = Fal ) @classmethod - def from_memory(cls, model_data: dict, run_number: int, project_dir: str, - continue_training=False, seed: int = None, device: str = 'cuda', params=None): + def script(cls, model_name: str = 'script', configuration: typing.Optional[dict] = None, run_number: int = 1, + continue_training=False, seed: int = None, device: str = 'cuda', params=None): """ Create model config from supplied data """ + if configuration is None: + configuration = {} + + configuration['name'] = model_name + + project_config_path = ModelConfig.find_project_directory(os.path.dirname(os.path.abspath(os.getcwd()))) + + with open(os.path.join(project_config_path, cls.PROJECT_FILE_NAME), 'r') as fp: + project_config_contents = Parser.parse(fp) + + aggregate_dictionary = { + **project_config_contents, + **configuration + } + return ModelConfig( - filename="[memory]", - configuration=model_data, + filename="[script]", + configuration=aggregate_dictionary, run_number=run_number, - project_dir=project_dir, + project_dir=project_config_path, continue_training=continue_training, seed=seed, device=device, From 96bb187f378cbfee84621c793593226aafa84fed Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 20:44:48 -0700 Subject: [PATCH 005/162] train_data has been renamed to data. --- vel/sources/vision/cifar10.py | 2 +- vel/sources/vision/mnist.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vel/sources/vision/cifar10.py b/vel/sources/vision/cifar10.py index 452f5eb7..6d4a0861 100644 --- a/vel/sources/vision/cifar10.py +++ b/vel/sources/vision/cifar10.py @@ -20,7 +20,7 @@ def create(model_config, batch_size, normalize=True, num_workers=0, augmentation augmentations = [ToArray()] + (augmentations if augmentations is not None else []) if normalize: - train_data = train_dataset.train_data + train_data = train_dataset.data mean_value = (train_data / 255).mean(axis=(0, 1, 2)) std_value = (train_data / 255).std(axis=(0, 1, 2)) diff --git a/vel/sources/vision/mnist.py b/vel/sources/vision/mnist.py index 24f26646..d1a39b84 100644 --- a/vel/sources/vision/mnist.py +++ b/vel/sources/vision/mnist.py @@ -18,7 +18,7 @@ def create(model_config, batch_size, normalize=True, num_workers=0, augmentation augmentations = [ToArray()] + (augmentations if augmentations is not None else []) if normalize: - train_data = train_dataset.train_data + train_data = train_dataset.data mean_value = (train_data.double() / 255).mean().item() std_value = (train_data.double() / 255).std().item() From 7a44a8e4c1dc65713ff1c43ead059cdfd097e77f Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 21:05:37 -0700 Subject: [PATCH 006/162] Added matplotlib dependency. --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index cb5f1854..b336050f 100644 --- a/setup.py +++ b/setup.py @@ -28,14 +28,15 @@ install_requires=[ 'attrs', 'cloudpickle', + 'matplotlib', 'numpy', 'opencv-python', 'pandas', 'pyyaml', 'scikit-learn', 'torch ~= 1.0', - 'torchvision', 'torchtext', + 'torchvision', 'tqdm' ], extras_require={ From 6b5ac6da6cc4961b41b4b1c70883b5a0cc62a856 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 21:05:52 -0700 Subject: [PATCH 007/162] Fixed a typo. --- vel/augmentations/to_array.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vel/augmentations/to_array.py b/vel/augmentations/to_array.py index 3f8c980f..ffbd353d 100644 --- a/vel/augmentations/to_array.py +++ b/vel/augmentations/to_array.py @@ -4,7 +4,7 @@ class ToArray(data.Augmentation): - """ Convert imate to an array of floats """ + """ Convert image to an array of floats """ def __init__(self, mode='x', tags=None): super().__init__(mode, tags) From 82148e07ded14c76bf13af2dd9d68beaa0d50fc0 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 21:06:16 -0700 Subject: [PATCH 008/162] Unsupervised MNIST dataset. --- vel/api/__init__.py | 2 +- vel/api/source.py | 11 ++++++----- vel/augmentations/unsupervised.py | 14 ++++++++++++++ vel/modules/input/embedding.py | 6 +++--- vel/notebook/__init__.py | 3 ++- vel/notebook/defaults.py | 6 ++++++ vel/sources/img_dir_source.py | 4 ++-- vel/sources/nlp/imdb.py | 4 ++-- vel/sources/vision/cifar10.py | 4 ++-- vel/sources/vision/mnist.py | 10 +++++++--- 10 files changed, 45 insertions(+), 19 deletions(-) create mode 100644 vel/augmentations/unsupervised.py create mode 100644 vel/notebook/defaults.py diff --git a/vel/api/__init__.py b/vel/api/__init__.py index 3eb8c6e3..411a6eaa 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -8,7 +8,7 @@ from .optimizer import OptimizerFactory from .schedule import Schedule from .scheduler import SchedulerFactory -from .source import Source, TrainingData, TextData +from .source import Source, SupervisedTrainingData, SupervisedTextData from .storage import Storage from .train_phase import TrainPhase, EmptyTrainPhase from .model_config import ModelConfig diff --git a/vel/api/source.py b/vel/api/source.py index 083221ee..ebfffb5d 100644 --- a/vel/api/source.py +++ b/vel/api/source.py @@ -1,5 +1,7 @@ import torch.utils.data as data +from .data import DataFlow + class Source: """ Source of data for supervised learning algorithms """ @@ -31,7 +33,7 @@ def val_iterations_per_epoch(self): raise NotImplementedError -class TextData(Source): +class SupervisedTextData(Source): """ An NLP torchtext data source """ def __init__(self, train_source, val_source, train_iterator, val_iterator, data_field, target_field): super().__init__() @@ -68,10 +70,9 @@ def val_iterations_per_epoch(self): return len(self.val_iterator) -class TrainingData(Source): +class SupervisedTrainingData(Source): """ Most common source of data combining a basic datasource and sampler """ def __init__(self, train_source, val_source, num_workers, batch_size, augmentations=None): - import vel.api.data as vel_data super().__init__() @@ -84,8 +85,8 @@ def __init__(self, train_source, val_source, num_workers, batch_size, augmentati self.augmentations = augmentations # Derived values - self.train_ds = vel_data.DataFlow(self.train_source, augmentations, tag='train') - self.val_ds = vel_data.DataFlow(self.val_source, augmentations, tag='val') + self.train_ds = DataFlow(self.train_source, augmentations, tag='train') + self.val_ds = DataFlow(self.val_source, augmentations, tag='val') self._train_loader = data.DataLoader( self.train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers diff --git a/vel/augmentations/unsupervised.py b/vel/augmentations/unsupervised.py new file mode 100644 index 00000000..2caeb448 --- /dev/null +++ b/vel/augmentations/unsupervised.py @@ -0,0 +1,14 @@ +import vel.api.data as data + + +class Unsupervised(data.Augmentation): + """ Simply transform supervised to an unsupervised dataset, cloning data to a target """ + def __init__(self): + super().__init__('both', None) + + def __call__(self, x_data, y_data): + return x_data, x_data + + +def create(): + return Unsupervised() diff --git a/vel/modules/input/embedding.py b/vel/modules/input/embedding.py index ff7d790c..ab4aee1c 100644 --- a/vel/modules/input/embedding.py +++ b/vel/modules/input/embedding.py @@ -1,13 +1,13 @@ import torch.nn as nn -from vel.api import LinearBackboneModel, TextData, ModelFactory +from vel.api import LinearBackboneModel, SupervisedTextData, ModelFactory class EmbeddingInput(LinearBackboneModel): """ Learnable Embedding input layer """ def __init__(self, alphabet_size: int, output_dim: int, pretrained: bool=False, frozen: bool=False, - source: TextData=None): + source: SupervisedTextData=None): super().__init__() self._output_dim = output_dim @@ -34,7 +34,7 @@ def forward(self, input_data): return self.layer(input_data) -def create(alphabet_size: int, output_dim: int, pretrained: bool=False, frozen: bool=False, source: TextData=None): +def create(alphabet_size: int, output_dim: int, pretrained: bool=False, frozen: bool=False, source: SupervisedTextData=None): """ Vel factory function """ def instantiate(**_): return EmbeddingInput(alphabet_size, output_dim, pretrained=pretrained, frozen=frozen, source=source) diff --git a/vel/notebook/__init__.py b/vel/notebook/__init__.py index 5d173dba..b29639c0 100644 --- a/vel/notebook/__init__.py +++ b/vel/notebook/__init__.py @@ -1 +1,2 @@ -from .loader import load \ No newline at end of file +from .loader import load +from .defaults import reasonable_notbook_defaults diff --git a/vel/notebook/defaults.py b/vel/notebook/defaults.py new file mode 100644 index 00000000..044d9446 --- /dev/null +++ b/vel/notebook/defaults.py @@ -0,0 +1,6 @@ + + +def reasonable_notbook_defaults(): + """ Notbook defaults """ + import matplotlib.pyplot as plt + plt.rcParams['figure.figsize'] = [10, 5] diff --git a/vel/sources/img_dir_source.py b/vel/sources/img_dir_source.py index c1df3120..1e0c138e 100644 --- a/vel/sources/img_dir_source.py +++ b/vel/sources/img_dir_source.py @@ -2,7 +2,7 @@ import torchvision.datasets as ds -from vel.api import TrainingData +from vel.api import SupervisedTrainingData class ImageDirSource(ds.ImageFolder): @@ -20,7 +20,7 @@ def create(model_config, path, num_workers, batch_size, augmentations=None, tta= train_ds = ImageDirSource(train_path) val_ds = ImageDirSource(valid_path) - return TrainingData( + return SupervisedTrainingData( train_ds, val_ds, num_workers=num_workers, diff --git a/vel/sources/nlp/imdb.py b/vel/sources/nlp/imdb.py index 6a9310cb..c6ac6fe9 100644 --- a/vel/sources/nlp/imdb.py +++ b/vel/sources/nlp/imdb.py @@ -7,7 +7,7 @@ import torchtext.data as data -from vel.api import TextData +from vel.api import SupervisedTextData class IMDBCached(imdb.IMDB): @@ -68,6 +68,6 @@ def create(model_config, batch_size, vectors=None): shuffle=True ) - return TextData( + return SupervisedTextData( train_source, test_source, train_iterator, test_iterator, text_field, label_field ) diff --git a/vel/sources/vision/cifar10.py b/vel/sources/vision/cifar10.py index 6d4a0861..4d8c02ee 100644 --- a/vel/sources/vision/cifar10.py +++ b/vel/sources/vision/cifar10.py @@ -1,6 +1,6 @@ from torchvision import datasets -from vel.api import TrainingData +from vel.api import SupervisedTrainingData from vel.augmentations.normalize import Normalize from vel.augmentations.to_tensor import ToTensor @@ -28,7 +28,7 @@ def create(model_config, batch_size, normalize=True, num_workers=0, augmentation augmentations.append(ToTensor()) - return TrainingData( + return SupervisedTrainingData( train_dataset, test_dataset, batch_size=batch_size, diff --git a/vel/sources/vision/mnist.py b/vel/sources/vision/mnist.py index d1a39b84..0ac79aae 100644 --- a/vel/sources/vision/mnist.py +++ b/vel/sources/vision/mnist.py @@ -1,14 +1,15 @@ from torchvision import datasets -from vel.api import TrainingData +from vel.api import SupervisedTrainingData from vel.augmentations.normalize import Normalize from vel.augmentations.to_tensor import ToTensor from vel.augmentations.to_array import ToArray +from vel.augmentations.unsupervised import Unsupervised -def create(model_config, batch_size, normalize=True, num_workers=0, augmentations=None): +def create(model_config, batch_size, normalize=True, num_workers=0, augmentations=None, unsupervised=False): """ Create a MNIST dataset, normalized """ path = model_config.data_dir('mnist') @@ -26,7 +27,10 @@ def create(model_config, batch_size, normalize=True, num_workers=0, augmentation augmentations.append(ToTensor()) - return TrainingData( + if unsupervised: + augmentations.append(Unsupervised()) + + return SupervisedTrainingData( train_dataset, test_dataset, num_workers=num_workers, From 24a25561dd34c78d82064da60aba84ca60bc895e Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 22:06:43 -0700 Subject: [PATCH 009/162] Mnist Autoencoder. --- .../mnist/mnist_cnn_autoencoder.yaml | 34 +++++++ vel/launcher.py | 2 +- vel/models/autoencoder/__init__.py | 0 .../autoencoder/mnist_cnn_autoencoder.py | 93 +++++++++++++++++++ vel/modules/layers.py | 11 +++ 5 files changed, 139 insertions(+), 1 deletion(-) create mode 100644 examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml create mode 100644 vel/models/autoencoder/__init__.py create mode 100644 vel/models/autoencoder/mnist_cnn_autoencoder.py diff --git a/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml b/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml new file mode 100644 index 00000000..ed2b9536 --- /dev/null +++ b/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml @@ -0,0 +1,34 @@ +name: 'mnist_cnn_autoenoder' + + +model: + name: vel.models.autoencoder.mnist_cnn_autoencoder + img_rows: 28 + img_cols: 28 + img_channels: 1 + num_classes: 10 + + +source: + name: vel.sources.vision.mnist + batch_size: 128 + normalize: False + num_workers: 4 + unsupervised: true + + +commands: + train: + name: vel.commands.train_command + epochs: 12 + log_frequency: 100 + + optimizer: + name: vel.optimizers.adadelta + + checkpoint: + metric: 'val:loss' + + + visdom: + name: vel.commands.vis_store_command diff --git a/vel/launcher.py b/vel/launcher.py index bb44f733..e4c68114 100644 --- a/vel/launcher.py +++ b/vel/launcher.py @@ -3,7 +3,7 @@ import multiprocessing import sys -from vel.internals.model_config import ModelConfig +from vel.api.model_config import ModelConfig from vel.internals.parser import Parser diff --git a/vel/models/autoencoder/__init__.py b/vel/models/autoencoder/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/models/autoencoder/mnist_cnn_autoencoder.py b/vel/models/autoencoder/mnist_cnn_autoencoder.py new file mode 100644 index 00000000..3dfd7b93 --- /dev/null +++ b/vel/models/autoencoder/mnist_cnn_autoencoder.py @@ -0,0 +1,93 @@ +import torch.nn as nn +import torch.nn.init as init +import torch.nn.functional as F + +import vel.util.network as net_util + +from vel.api import SupervisedModel, ModelFactory +from vel.metrics.loss_metric import Loss +from vel.modules.layers import Flatten, Reshape + + +class MnistCnnAutoencoder(SupervisedModel): + """ + A simple MNIST classification model. + + Conv 3x3 - 32 + Conv 3x3 - 64 + MaxPool 2x2 + Dropout 0.25 + Flatten + Dense - 128 + Dense - output (softmax) + """ + + def __init__(self, img_rows, img_cols, img_channels, num_classes): + super(MnistCnnAutoencoder, self).__init__() + + self.flattened_size = (img_rows - 4) // 2 * (img_cols - 4) // 2 * 64 + + layer_series = [ + (3, 1, 1), + (3, 1, 2), + (3, 1, 2), + ] + + self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) + self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels=img_channels, out_channels=16, kernel_size=(3, 3), padding=1), + nn.ReLU(True), + nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3), stride=2, padding=1), + nn.ReLU(True), + nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1), + Flatten(), + nn.Linear(self.final_width * self.final_height * 32, 32) + ) + + self.decoder = nn.Sequential( + nn.Linear(32, self.final_width * self.final_height * 32), + nn.ReLU(True), + Reshape(32, self.final_width, self.final_height), + nn.ConvTranspose2d(in_channels=32, out_channels=32, kernel_size=3, stride=2, padding=1, output_padding=1), + nn.ReLU(True), + nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=3, stride=2, padding=1, output_padding=1), + nn.ReLU(True), + nn.ConvTranspose2d(in_channels=16, out_channels=img_channels, kernel_size=3, padding=1), + ) + + @staticmethod + def _weight_initializer(tensor): + init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) + init.constant_(tensor.bias, 0.0) + + def reset_weights(self): + for m in self.children(): + if isinstance(m, nn.Conv2d): + self._weight_initializer(m) + elif isinstance(m, nn.ConvTranspose2d): + self._weight_initializer(m) + elif isinstance(m, nn.Linear): + self._weight_initializer(m) + + def forward(self, x): + encoding = self.encoder(x) + decoded = self.decoder(encoding) + return decoded + + def loss_value(self, x_data, y_true, y_pred): + """ Calculate a value of loss function """ + return F.mse_loss(y_pred, y_true) + + def metrics(self): + """ Set of metrics for this model """ + return [Loss()] + + +def create(img_rows, img_cols, img_channels, num_classes): + """ Vel factory function """ + def instantiate(**_): + return MnistCnnAutoencoder(img_rows, img_cols, img_channels, num_classes) + + return ModelFactory.generic(instantiate) diff --git a/vel/modules/layers.py b/vel/modules/layers.py index 6b694334..9a95e31d 100644 --- a/vel/modules/layers.py +++ b/vel/modules/layers.py @@ -39,6 +39,17 @@ def forward(self, x): return x.view(x.size(0), -1) +class Reshape(nn.Module): + """ Flatten input vector """ + def __init__(self, *sizes): + super().__init__() + + self.sizes = sizes + + def forward(self, x): + return x.view(x.size(0), *self.sizes) + + class OneHotEncode(nn.Module): """ One-hot encoding layer """ def __init__(self, num_classes): From ed88af125d1156216c085b1d98bf3ffc563b308a Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 22:44:00 -0700 Subject: [PATCH 010/162] Loading pretrained models. --- vel/api/model_config.py | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/vel/api/model_config.py b/vel/api/model_config.py index 7b1e1225..e2f98cd2 100644 --- a/vel/api/model_config.py +++ b/vel/api/model_config.py @@ -6,6 +6,8 @@ from vel.internals.parser import Parser from vel.internals.provider import Provider +from .info import TrainingInfo + class ModelConfig: """ @@ -30,8 +32,12 @@ def find_project_directory(start_path) -> str: else: return ModelConfig.find_project_directory(up_path) + @staticmethod + def from_project_directory(path) -> str: + return os.path.join(ModelConfig.find_project_directory('.'), path) + @classmethod - def from_file(cls, filename: str, run_number: int, continue_training: bool = False, seed: int = None, + def from_file(cls, filename: str, run_number: int = 1, continue_training: bool = False, seed: int = None, device: str = 'cuda', params=None): """ Create model config from file """ with open(filename, 'r') as fp: @@ -209,3 +215,26 @@ def quit_banner(self) -> None: # Small UI utils def __repr__(self): return f"" + + #################################################################################################################### + # CONVENIENCE METHODS FOR SCRIPTS + def load_trained_model(self): + """ Load a latest trained model from storage """ + model = self.provide("model").instantiate() + storage = self.provide("storage") + + last_epoch_idx = storage.last_epoch_idx() + + if last_epoch_idx == 0: + raise VelInitializationException("No trained model available") + + training_info = TrainingInfo( + start_epoch_idx=last_epoch_idx, + run_name=self.run_name, + ) + + model_state, hidden_state = storage.load(training_info) + + model.load_state_dict(model_state) + + return model From e52df95a3eb664d9c4f68c667cf000633782d4df Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 22:44:16 -0700 Subject: [PATCH 011/162] Better optimizer for the task. --- .../autoencoders/mnist/mnist_cnn_autoencoder.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml b/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml index ed2b9536..6d4651dd 100644 --- a/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml +++ b/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml @@ -17,14 +17,17 @@ source: unsupervised: true +optimizer: + name: vel.optimizers.adam + lr: 1.0e-3 + + commands: train: name: vel.commands.train_command epochs: 12 log_frequency: 100 - optimizer: - name: vel.optimizers.adadelta checkpoint: metric: 'val:loss' From d3d1fe71d1e1563cfc3ae62d00f3c55371d5b7bf Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 22:48:01 -0700 Subject: [PATCH 012/162] Turned methods of Source into properties. --- vel/api/learner.py | 6 +++--- vel/api/source.py | 18 ++++++++++++++++++ vel/commands/augvis_command.py | 2 +- vel/commands/lr_find_command.py | 4 ++-- vel/commands/summary_command.py | 2 +- vel/commands/train_command.py | 2 +- vel/phase/cycle.py | 2 +- vel/phase/generic.py | 2 +- 8 files changed, 28 insertions(+), 10 deletions(-) diff --git a/vel/api/learner.py b/vel/api/learner.py index 98bb4b89..a99fbcec 100644 --- a/vel/api/learner.py +++ b/vel/api/learner.py @@ -60,9 +60,9 @@ def train_epoch(self, epoch_info, source: 'vel.api.Source', interactive=True): self.train() if interactive: - iterator = tqdm.tqdm(source.train_loader(), desc="Training", unit="iter", file=sys.stdout) + iterator = tqdm.tqdm(source.train_loader, desc="Training", unit="iter", file=sys.stdout) else: - iterator = source.train_loader() + iterator = source.train_loader for batch_idx, (data, target) in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) @@ -77,7 +77,7 @@ def validation_epoch(self, epoch_info, source: 'vel.api.Source'): """ Run a single evaluation epoch """ self.eval() - iterator = tqdm.tqdm(source.val_loader(), desc="Validation", unit="iter", file=sys.stdout) + iterator = tqdm.tqdm(source.val_loader, desc="Validation", unit="iter", file=sys.stdout) with torch.no_grad(): for batch_idx, (data, target) in enumerate(iterator): diff --git a/vel/api/source.py b/vel/api/source.py index ebfffb5d..a566521a 100644 --- a/vel/api/source.py +++ b/vel/api/source.py @@ -8,26 +8,32 @@ class Source: def __init__(self): pass + @property def train_loader(self): """ PyTorch loader of training data """ raise NotImplementedError + @property def val_loader(self): """ PyTorch loader of validation data """ raise NotImplementedError + @property def train_dataset(self): """ Return the training dataset """ raise NotImplementedError + @property def val_dataset(self): """ Return the validation dataset """ raise NotImplementedError + @property def train_iterations_per_epoch(self): """ Return number of iterations per epoch """ raise NotImplementedError + @property def val_iterations_per_epoch(self): """ Return number of iterations per epoch - validation """ raise NotImplementedError @@ -45,26 +51,32 @@ def __init__(self, train_source, val_source, train_iterator, val_iterator, data_ self.data_field = data_field self.target_field = target_field + @property def train_loader(self): """ PyTorch loader of training data """ return self.train_iterator + @property def val_loader(self): """ PyTorch loader of validation data """ return self.val_iterator + @property def train_dataset(self): """ Return the training dataset """ return self.train_source + @property def val_dataset(self): """ Return the validation dataset """ return self.val_source + @property def train_iterations_per_epoch(self): """ Return number of iterations per epoch """ return len(self.train_iterator) + @property def val_iterations_per_epoch(self): """ Return number of iterations per epoch - validation """ return len(self.val_iterator) @@ -96,26 +108,32 @@ def __init__(self, train_source, val_source, num_workers, batch_size, augmentati self.val_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers ) + @property def train_loader(self): """ PyTorch loader of training data """ return self._train_loader + @property def val_loader(self): """ PyTorch loader of validation data """ return self._val_loader + @property def train_dataset(self): """ Return the training dataset """ return self.train_ds + @property def val_dataset(self): """ Return the validation dataset """ return self.val_ds + @property def train_iterations_per_epoch(self): """ Return number of iterations per epoch """ return len(self._train_loader) + @property def val_iterations_per_epoch(self): """ Return number of iterations per epoch - validation """ return len(self._val_loader) diff --git a/vel/commands/augvis_command.py b/vel/commands/augvis_command.py index bd56ee6a..df4f5352 100644 --- a/vel/commands/augvis_command.py +++ b/vel/commands/augvis_command.py @@ -13,7 +13,7 @@ def __init__(self, source: Source, samples, cases): def run(self): """ Run the visualization """ - dataset = self.source.train_dataset() + dataset = self.source.train_dataset num_samples = len(dataset) fig, ax = plt.subplots(self.cases, self.samples+1) diff --git a/vel/commands/lr_find_command.py b/vel/commands/lr_find_command.py index f3a67c47..f1de1e20 100644 --- a/vel/commands/lr_find_command.py +++ b/vel/commands/lr_find_command.py @@ -75,7 +75,7 @@ def run(self): # Optimizer shoudl be created after freeze optimizer = self.optimizer_factory.instantiate(learner.model) - iterator = iter(self.source.train_loader()) + iterator = iter(self.source.train_loader) # Metrics to track through this training metrics = learner.metrics() + [AveragingNamedMetric("lr")] @@ -101,7 +101,7 @@ def run(self): try: data, target = next(iterator) except StopIteration: - iterator = iter(self.source.train_loader()) + iterator = iter(self.source.train_loader) data, target = next(iterator) learner.train_batch(batch_info, data, target) diff --git a/vel/commands/summary_command.py b/vel/commands/summary_command.py index 8da53b8a..37393b84 100644 --- a/vel/commands/summary_command.py +++ b/vel/commands/summary_command.py @@ -12,7 +12,7 @@ def run(self, *args): if self.source is None: self.model.summary() else: - x_data, y_data = next(iter(self.source.train_loader())) + x_data, y_data = next(iter(self.source.train_loader)) self.model.summary(input_size=x_data.shape[1:]) diff --git a/vel/commands/train_command.py b/vel/commands/train_command.py index 7c798263..f9b6afd2 100644 --- a/vel/commands/train_command.py +++ b/vel/commands/train_command.py @@ -49,7 +49,7 @@ def run(self): epoch_info = api.EpochInfo( training_info=training_info, global_epoch_idx=global_epoch_idx, - batches_per_epoch=self.source.train_iterations_per_epoch(), + batches_per_epoch=self.source.train_iterations_per_epoch, optimizer=optimizer ) diff --git a/vel/phase/cycle.py b/vel/phase/cycle.py index 7374bcb1..1c89915c 100644 --- a/vel/phase/cycle.py +++ b/vel/phase/cycle.py @@ -137,7 +137,7 @@ def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: in training_info=training_info, global_epoch_idx=global_idx, local_epoch_idx=local_idx, - batches_per_epoch=self._source.train_iterations_per_epoch(), + batches_per_epoch=self._source.train_iterations_per_epoch, optimizer=self._optimizer_instance, # Add special callback for this epoch callbacks=[self.special_callback] + training_info.callbacks diff --git a/vel/phase/generic.py b/vel/phase/generic.py index 87d0f571..25c52c1e 100644 --- a/vel/phase/generic.py +++ b/vel/phase/generic.py @@ -27,7 +27,7 @@ def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: in training_info=training_info, global_epoch_idx=global_idx, local_epoch_idx=local_idx, - batches_per_epoch=self._source.train_iterations_per_epoch(), + batches_per_epoch=self._source.train_iterations_per_epoch, optimizer=self._optimizer_instance ) From 17a5531151ee99abd0e17be24aecd0b4a4f9b975 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 23:23:12 -0700 Subject: [PATCH 013/162] Fixing model summary. --- vel/api/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vel/api/model.py b/vel/api/model.py index 53098d3e..9f776348 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -40,7 +40,7 @@ def summary(self, input_size=None, hashsummary=False): if input_size is None: print(self) print("-" * 120) - number = sum(p.numel() for p in self.model.parameters()) + number = sum(p.numel() for p in self.parameters()) print("Number of model parameters: {:,}".format(number)) print("-" * 120) else: From 89c110fbf4726244e05ba98788722f7ae17ef3c7 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 23:37:22 -0700 Subject: [PATCH 014/162] Some more changes to MNIST autoencoder. --- .../mnist/mnist_cnn_autoencoder.yaml | 3 +- .../autoencoder/mnist_cnn_autoencoder.py | 41 ++++++++++--------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml b/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml index 6d4651dd..00501f4c 100644 --- a/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml +++ b/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml @@ -6,7 +6,8 @@ model: img_rows: 28 img_cols: 28 img_channels: 1 - num_classes: 10 + channels: [8, 16, 16] + representation_length: 16 source: diff --git a/vel/models/autoencoder/mnist_cnn_autoencoder.py b/vel/models/autoencoder/mnist_cnn_autoencoder.py index 3dfd7b93..002cc576 100644 --- a/vel/models/autoencoder/mnist_cnn_autoencoder.py +++ b/vel/models/autoencoder/mnist_cnn_autoencoder.py @@ -22,39 +22,39 @@ class MnistCnnAutoencoder(SupervisedModel): Dense - output (softmax) """ - def __init__(self, img_rows, img_cols, img_channels, num_classes): + def __init__(self, img_rows, img_cols, img_channels, channels=[16, 32, 32], representation_length=32): super(MnistCnnAutoencoder, self).__init__() - self.flattened_size = (img_rows - 4) // 2 * (img_cols - 4) // 2 * 64 - layer_series = [ (3, 1, 1), (3, 1, 2), (3, 1, 2), ] + self.representation_length = representation_length self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) + self.channels = channels self.encoder = nn.Sequential( - nn.Conv2d(in_channels=img_channels, out_channels=16, kernel_size=(3, 3), padding=1), + nn.Conv2d(in_channels=img_channels, out_channels=channels[0], kernel_size=(3, 3), padding=1), nn.ReLU(True), - nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3), stride=2, padding=1), + nn.Conv2d(in_channels=channels[0], out_channels=channels[1], kernel_size=(3, 3), stride=2, padding=1), nn.ReLU(True), - nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1), + nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=(3, 3), stride=2, padding=1), Flatten(), - nn.Linear(self.final_width * self.final_height * 32, 32) + nn.Linear(self.final_width * self.final_height * channels[2], representation_length) ) self.decoder = nn.Sequential( - nn.Linear(32, self.final_width * self.final_height * 32), + nn.Linear(representation_length, self.final_width * self.final_height * channels[2]), nn.ReLU(True), Reshape(32, self.final_width, self.final_height), - nn.ConvTranspose2d(in_channels=32, out_channels=32, kernel_size=3, stride=2, padding=1, output_padding=1), + nn.ConvTranspose2d(in_channels=channels[2], out_channels=channels[1], kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU(True), - nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=3, stride=2, padding=1, output_padding=1), + nn.ConvTranspose2d(in_channels=channels[1], out_channels=channels[0], kernel_size=3, stride=2, padding=1, output_padding=1), nn.ReLU(True), - nn.ConvTranspose2d(in_channels=16, out_channels=img_channels, kernel_size=3, padding=1), + nn.ConvTranspose2d(in_channels=channels[0], out_channels=img_channels, kernel_size=3, padding=1), ) @staticmethod @@ -63,13 +63,14 @@ def _weight_initializer(tensor): init.constant_(tensor.bias, 0.0) def reset_weights(self): - for m in self.children(): - if isinstance(m, nn.Conv2d): - self._weight_initializer(m) - elif isinstance(m, nn.ConvTranspose2d): - self._weight_initializer(m) - elif isinstance(m, nn.Linear): - self._weight_initializer(m) + pass + # for m in children: + # if isinstance(m, nn.Conv2d): + # self._weight_initializer(m) + # elif isinstance(m, nn.ConvTranspose2d): + # self._weight_initializer(m) + # elif isinstance(m, nn.Linear): + # self._weight_initializer(m) def forward(self, x): encoding = self.encoder(x) @@ -85,9 +86,9 @@ def metrics(self): return [Loss()] -def create(img_rows, img_cols, img_channels, num_classes): +def create(img_rows, img_cols, img_channels, representation_length=32): """ Vel factory function """ def instantiate(**_): - return MnistCnnAutoencoder(img_rows, img_cols, img_channels, num_classes) + return MnistCnnAutoencoder(img_rows, img_cols, img_channels, representation_length) return ModelFactory.generic(instantiate) From 95f191c2945b9f756369567a2babf869b475b1bc Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 23:39:03 -0700 Subject: [PATCH 015/162] Better weight reset for autoencoder. --- .../autoencoder/mnist_cnn_autoencoder.py | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/vel/models/autoencoder/mnist_cnn_autoencoder.py b/vel/models/autoencoder/mnist_cnn_autoencoder.py index 002cc576..f78262b4 100644 --- a/vel/models/autoencoder/mnist_cnn_autoencoder.py +++ b/vel/models/autoencoder/mnist_cnn_autoencoder.py @@ -1,3 +1,5 @@ +import itertools as it + import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F @@ -50,9 +52,13 @@ def __init__(self, img_rows, img_cols, img_channels, channels=[16, 32, 32], repr nn.Linear(representation_length, self.final_width * self.final_height * channels[2]), nn.ReLU(True), Reshape(32, self.final_width, self.final_height), - nn.ConvTranspose2d(in_channels=channels[2], out_channels=channels[1], kernel_size=3, stride=2, padding=1, output_padding=1), + nn.ConvTranspose2d( + in_channels=channels[2], out_channels=channels[1], kernel_size=3, stride=2, padding=1, output_padding=1 + ), nn.ReLU(True), - nn.ConvTranspose2d(in_channels=channels[1], out_channels=channels[0], kernel_size=3, stride=2, padding=1, output_padding=1), + nn.ConvTranspose2d( + in_channels=channels[1], out_channels=channels[0], kernel_size=3, stride=2, padding=1, output_padding=1 + ), nn.ReLU(True), nn.ConvTranspose2d(in_channels=channels[0], out_channels=img_channels, kernel_size=3, padding=1), ) @@ -63,14 +69,13 @@ def _weight_initializer(tensor): init.constant_(tensor.bias, 0.0) def reset_weights(self): - pass - # for m in children: - # if isinstance(m, nn.Conv2d): - # self._weight_initializer(m) - # elif isinstance(m, nn.ConvTranspose2d): - # self._weight_initializer(m) - # elif isinstance(m, nn.Linear): - # self._weight_initializer(m) + for m in it.chain(self.encoder, self.decoder): + if isinstance(m, nn.Conv2d): + self._weight_initializer(m) + elif isinstance(m, nn.ConvTranspose2d): + self._weight_initializer(m) + elif isinstance(m, nn.Linear): + self._weight_initializer(m) def forward(self, x): encoding = self.encoder(x) From 792064a39ff080ce6bb795465b748a77fd072c9c Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 23:44:06 -0700 Subject: [PATCH 016/162] Small code changes in MNIST autoencoder. --- vel/models/autoencoder/mnist_cnn_autoencoder.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/vel/models/autoencoder/mnist_cnn_autoencoder.py b/vel/models/autoencoder/mnist_cnn_autoencoder.py index f78262b4..556a1a4b 100644 --- a/vel/models/autoencoder/mnist_cnn_autoencoder.py +++ b/vel/models/autoencoder/mnist_cnn_autoencoder.py @@ -24,9 +24,12 @@ class MnistCnnAutoencoder(SupervisedModel): Dense - output (softmax) """ - def __init__(self, img_rows, img_cols, img_channels, channels=[16, 32, 32], representation_length=32): + def __init__(self, img_rows, img_cols, img_channels, channels=None, representation_length=32): super(MnistCnnAutoencoder, self).__init__() + if channels is None: + channels = [16, 32, 32] + layer_series = [ (3, 1, 1), (3, 1, 2), @@ -91,9 +94,14 @@ def metrics(self): return [Loss()] -def create(img_rows, img_cols, img_channels, representation_length=32): +def create(img_rows, img_cols, img_channels, channels=None, representation_length=32): """ Vel factory function """ + if channels is None: + channels = [16, 32, 32] + def instantiate(**_): - return MnistCnnAutoencoder(img_rows, img_cols, img_channels, representation_length) + return MnistCnnAutoencoder( + img_rows, img_cols, img_channels, channels=channels, representation_length=representation_length + ) return ModelFactory.generic(instantiate) From 3cd40b3802bd9780aaca92e53ce5cfcd065e9874 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 7 Apr 2019 23:45:31 -0700 Subject: [PATCH 017/162] Fixing a bug in MNIST autoencoder. --- vel/models/autoencoder/mnist_cnn_autoencoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vel/models/autoencoder/mnist_cnn_autoencoder.py b/vel/models/autoencoder/mnist_cnn_autoencoder.py index 556a1a4b..ddef1d38 100644 --- a/vel/models/autoencoder/mnist_cnn_autoencoder.py +++ b/vel/models/autoencoder/mnist_cnn_autoencoder.py @@ -54,7 +54,7 @@ def __init__(self, img_rows, img_cols, img_channels, channels=None, representati self.decoder = nn.Sequential( nn.Linear(representation_length, self.final_width * self.final_height * channels[2]), nn.ReLU(True), - Reshape(32, self.final_width, self.final_height), + Reshape(channels[2], self.final_width, self.final_height), nn.ConvTranspose2d( in_channels=channels[2], out_channels=channels[1], kernel_size=3, stride=2, padding=1, output_padding=1 ), From 123fc5fe9506b47bceb9318297d4140ce15d1341 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Mon, 8 Apr 2019 21:48:31 -0700 Subject: [PATCH 018/162] Ignoring local notebooks for now. --- .gitignore | 3 + .../autoencoders/mnist/mnist_cnn_vae.yaml | 38 +++++++ .../autoencoder/mnist_cnn_autoencoder.py | 10 +- vel/models/autoencoder/mnist_cnn_vae.py | 105 ++++++++++++++++++ 4 files changed, 147 insertions(+), 9 deletions(-) create mode 100644 examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml create mode 100644 vel/models/autoencoder/mnist_cnn_vae.py diff --git a/.gitignore b/.gitignore index b0a800fd..6860e5a1 100644 --- a/.gitignore +++ b/.gitignore @@ -117,3 +117,6 @@ environment.yaml # Test cache /.pytest_cache + +# Local notebooks +/examples-notebooks diff --git a/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml b/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml new file mode 100644 index 00000000..74c499c9 --- /dev/null +++ b/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml @@ -0,0 +1,38 @@ +name: 'mnist_cnn_autoenoder' + + +model: + name: vel.models.autoencoder.mnist_cnn_vae + img_rows: 28 + img_cols: 28 + img_channels: 1 + channels: [8, 16, 16] + representation_length: 16 + + +source: + name: vel.sources.vision.mnist + batch_size: 128 + normalize: False + num_workers: 4 + unsupervised: true + + +optimizer: + name: vel.optimizers.adam + lr: 1.0e-3 + + +commands: + train: + name: vel.commands.train_command + epochs: 12 + log_frequency: 100 + + + checkpoint: + metric: 'val:loss' + + + visdom: + name: vel.commands.vis_store_command diff --git a/vel/models/autoencoder/mnist_cnn_autoencoder.py b/vel/models/autoencoder/mnist_cnn_autoencoder.py index ddef1d38..79eb8432 100644 --- a/vel/models/autoencoder/mnist_cnn_autoencoder.py +++ b/vel/models/autoencoder/mnist_cnn_autoencoder.py @@ -13,15 +13,7 @@ class MnistCnnAutoencoder(SupervisedModel): """ - A simple MNIST classification model. - - Conv 3x3 - 32 - Conv 3x3 - 64 - MaxPool 2x2 - Dropout 0.25 - Flatten - Dense - 128 - Dense - output (softmax) + A simple MNIST autoencoder, containing 3 convolutional layers. """ def __init__(self, img_rows, img_cols, img_channels, channels=None, representation_length=32): diff --git a/vel/models/autoencoder/mnist_cnn_vae.py b/vel/models/autoencoder/mnist_cnn_vae.py new file mode 100644 index 00000000..6745e6d4 --- /dev/null +++ b/vel/models/autoencoder/mnist_cnn_vae.py @@ -0,0 +1,105 @@ +import itertools as it + +import torch.nn as nn +import torch.nn.init as init +import torch.nn.functional as F + +import vel.util.network as net_util + +from vel.api import SupervisedModel, ModelFactory +from vel.metrics.loss_metric import Loss +from vel.modules.layers import Flatten, Reshape + + +class MnistCnnAutoencoder(SupervisedModel): + """ + A simple MNIST variational autoencoder, containing 3 convolutional layers. + """ + + def __init__(self, img_rows, img_cols, img_channels, channels=None, representation_length=32): + super(MnistCnnAutoencoder, self).__init__() + + assert representation_length % 2 == 0, "Representation length must be even" + + if channels is None: + channels = [16, 32, 32] + + layer_series = [ + (3, 1, 1), + (3, 1, 2), + (3, 1, 2), + ] + + self.representation_length = representation_length + self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) + self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) + self.channels = channels + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels=img_channels, out_channels=channels[0], kernel_size=(3, 3), padding=1), + nn.ReLU(True), + nn.Conv2d(in_channels=channels[0], out_channels=channels[1], kernel_size=(3, 3), stride=2, padding=1), + nn.ReLU(True), + nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=(3, 3), stride=2, padding=1), + Flatten(), + nn.Linear(self.final_width * self.final_height * channels[2], representation_length) + ) + + self.decoder = nn.Sequential( + nn.Linear(representation_length, self.final_width * self.final_height * channels[2]), + nn.ReLU(True), + Reshape(channels[2], self.final_width, self.final_height), + nn.ConvTranspose2d( + in_channels=channels[2], out_channels=channels[1], kernel_size=3, stride=2, padding=1, output_padding=1 + ), + nn.ReLU(True), + nn.ConvTranspose2d( + in_channels=channels[1], out_channels=channels[0], kernel_size=3, stride=2, padding=1, output_padding=1 + ), + nn.ReLU(True), + nn.ConvTranspose2d(in_channels=channels[0], out_channels=img_channels, kernel_size=3, padding=1), + ) + + @staticmethod + def _weight_initializer(tensor): + init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) + init.constant_(tensor.bias, 0.0) + + def reset_weights(self): + for m in it.chain(self.encoder, self.decoder): + if isinstance(m, nn.Conv2d): + self._weight_initializer(m) + elif isinstance(m, nn.ConvTranspose2d): + self._weight_initializer(m) + elif isinstance(m, nn.Linear): + self._weight_initializer(m) + + def forward(self, x): + encoding = self.encoder(x) + decoded = self.decoder(encoding) + + return { + 'result': decoded, + 'encoding': encoding + } + + def loss_value(self, x_data, y_true, y_pred): + """ Calculate a value of loss function """ + return F.mse_loss(y_pred, y_true) + + def metrics(self): + """ Set of metrics for this model """ + return [Loss()] + + +def create(img_rows, img_cols, img_channels, channels=None, representation_length=32): + """ Vel factory function """ + if channels is None: + channels = [16, 32, 32] + + def instantiate(**_): + return MnistCnnAutoencoder( + img_rows, img_cols, img_channels, channels=channels, representation_length=representation_length + ) + + return ModelFactory.generic(instantiate) From 846fd4d1a10c6941864db558c9100e735d1fe574 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Mon, 8 Apr 2019 22:34:39 -0700 Subject: [PATCH 019/162] Reducing number of parameters. --- vel/api/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vel/api/model.py b/vel/api/model.py index 9f776348..3d2029ca 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -39,10 +39,10 @@ def summary(self, input_size=None, hashsummary=False): if input_size is None: print(self) - print("-" * 120) + print("-" * 100) number = sum(p.numel() for p in self.parameters()) print("Number of model parameters: {:,}".format(number)) - print("-" * 120) + print("-" * 100) else: summary(self, input_size) From f62830055a255085412cda8cecdd87f224847dc3 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 9 Apr 2019 08:15:10 -0700 Subject: [PATCH 020/162] Implemented Variational autoencoder. --- README.md | 4 + .../autoencoders/mnist/mnist_cnn_vae.yaml | 2 +- vel/api/__init__.py | 3 +- vel/api/learner.py | 16 ++-- vel/api/model.py | 77 ++++++++++++------- vel/metrics/loss_metric.py | 2 +- .../autoencoder/mnist_cnn_autoencoder.py | 10 ++- vel/models/autoencoder/mnist_cnn_vae.py | 77 ++++++++++++++++--- vel/models/imagenet/resnet34.py | 4 +- .../multilayer_rnn_sequence_classification.py | 4 +- vel/models/vision/cifar10_cnn_01.py | 4 +- vel/models/vision/cifar_resnet_v1.py | 4 +- vel/models/vision/cifar_resnet_v2.py | 4 +- vel/models/vision/cifar_resnext.py | 15 +--- vel/models/vision/mnist_cnn_01.py | 5 +- vel/notebook/__init__.py | 2 +- vel/notebook/defaults.py | 2 +- vel/notebook/loader.py | 10 ++- 18 files changed, 160 insertions(+), 85 deletions(-) diff --git a/README.md b/README.md index b418f9b6..597aa4e8 100644 --- a/README.md +++ b/README.md @@ -118,6 +118,10 @@ that are ready to run and easy to modify for other similar usecases: - Distributional Q-Learning - Noisy Networks for Exploration - Rainbow (combination of the above) + +# Implemented models - Unsupervised learning + +- Autoencoders and Variational autoencoders with an examples on MNIST dataset. # Examples diff --git a/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml b/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml index 74c499c9..14224ef0 100644 --- a/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml @@ -20,7 +20,7 @@ source: optimizer: name: vel.optimizers.adam - lr: 1.0e-3 + lr: 1.0e-4 commands: diff --git a/vel/api/__init__.py b/vel/api/__init__.py index 411a6eaa..eeaadd26 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -2,7 +2,8 @@ from .info import BatchInfo, EpochInfo, TrainingInfo from .learner import Learner from .model import ( - Model, BackboneModel, LinearBackboneModel, SupervisedModel, RnnLinearBackboneModel, RnnModel, RnnSupervisedModel + Model, SupervisedModel, LossFunctionModel, + BackboneModel, LinearBackboneModel, RnnLinearBackboneModel, RnnModel, RnnSupervisedModel ) from .model_factory import ModelFactory from .optimizer import OptimizerFactory diff --git a/vel/api/learner.py b/vel/api/learner.py index a99fbcec..6ebb1949 100644 --- a/vel/api/learner.py +++ b/vel/api/learner.py @@ -3,12 +3,13 @@ import tqdm import typing +from .model import SupervisedModel from .info import BatchInfo, EpochInfo, TrainingInfo class Learner: """ Manages training process of a single model """ - def __init__(self, device: torch.device, model, max_grad_norm: typing.Optional[float]=None): + def __init__(self, device: torch.device, model: SupervisedModel, max_grad_norm: typing.Optional[float]=None): self.device = device self.model = model.to(device) self.max_grad_norm = max_grad_norm @@ -90,21 +91,14 @@ def validation_epoch(self, epoch_info, source: 'vel.api.Source'): def feed_batch(self, batch_info, data, target): """ Run single batch of data """ data, target = data.to(self.device), target.to(self.device) - output, loss = self.model.loss(data, target) + metrics = self.model.calculate_gradient(data, target) - # Store extra batch information for calculation of the statistics - batch_info['data'] = data - batch_info['target'] = target - batch_info['output'] = output - batch_info['loss'] = loss - - return loss + batch_info.update(metrics) def train_batch(self, batch_info, data, target): """ Train single batch of data """ batch_info.optimizer.zero_grad() - loss = self.feed_batch(batch_info, data, target) - loss.backward() + self.feed_batch(batch_info, data, target) if self.max_grad_norm is not None: batch_info['grad_norm'] = torch.nn.utils.clip_grad_norm_( diff --git a/vel/api/model.py b/vel/api/model.py index 3d2029ca..1060170b 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -13,7 +13,7 @@ class Model(nn.Module): def metrics(self) -> list: """ Set of metrics for this model """ - return [Loss()] + return [] def train(self, mode=True): r""" @@ -75,6 +75,54 @@ def is_recurrent(self) -> bool: return False +class SupervisedModel(Model): + """ Model for a supervised learning problem """ + + def calculate_gradient(self, x_data, y_true): + raise NotImplementedError + + +class LossFunctionModel(SupervisedModel): + """ Model for a supervised learning with a simple loss function """ + + def metrics(self) -> list: + """ Set of metrics for this model """ + return [Loss()] + + def calculate_gradient(self, x_data, y_true): + y_pred = self(x_data) + loss_value = self.loss_value(x_data, y_true, y_pred) + + if self.training: + loss_value.backward() + + return { + 'loss': loss_value.item(), + 'data': x_data, + 'target': y_true, + 'output': y_pred + } + + def loss_value(self, x_data, y_true, y_pred): + """ Calculate a value of loss function """ + raise NotImplementedError + + +class BackboneModel(Model): + """ Model that serves as a backbone network to connect your heads to """ + + +class LinearBackboneModel(BackboneModel): + """ + Model that serves as a backbone network to connect your heads to - one that spits out a single-dimension output + """ + + @property + def output_dim(self) -> int: + """ Final dimension of model output """ + raise NotImplementedError + + class RnnModel(Model): """ Class representing recurrent model """ @@ -93,10 +141,6 @@ def zero_state(self, batch_size): return torch.zeros(batch_size, self.state_dim) -class BackboneModel(Model): - """ Model that serves as a backbone network to connect your heads to """ - - class RnnLinearBackboneModel(BackboneModel): """ Model that serves as a backbone network to connect your heads to - @@ -123,29 +167,6 @@ def zero_state(self, batch_size): return torch.zeros(batch_size, self.state_dim, dtype=torch.float32) -class LinearBackboneModel(BackboneModel): - """ - Model that serves as a backbone network to connect your heads to - one that spits out a single-dimension output - """ - - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - raise NotImplementedError - - -class SupervisedModel(Model): - """ Model for a supervised learning problem """ - def loss(self, x_data, y_true): - """ Forward propagate network and return a value of loss function """ - y_pred = self(x_data) - return y_pred, self.loss_value(x_data, y_true, y_pred) - - def loss_value(self, x_data, y_true, y_pred): - """ Calculate a value of loss function """ - raise NotImplementedError - - class RnnSupervisedModel(RnnModel): """ Model for a supervised learning problem """ diff --git a/vel/metrics/loss_metric.py b/vel/metrics/loss_metric.py index d241a393..8de3707d 100644 --- a/vel/metrics/loss_metric.py +++ b/vel/metrics/loss_metric.py @@ -8,4 +8,4 @@ def __init__(self): def _value_function(self, batch_info): """ Just forward a value of the loss""" - return batch_info['loss'].item() + return batch_info['loss'] diff --git a/vel/models/autoencoder/mnist_cnn_autoencoder.py b/vel/models/autoencoder/mnist_cnn_autoencoder.py index 79eb8432..1ad5536f 100644 --- a/vel/models/autoencoder/mnist_cnn_autoencoder.py +++ b/vel/models/autoencoder/mnist_cnn_autoencoder.py @@ -6,12 +6,12 @@ import vel.util.network as net_util -from vel.api import SupervisedModel, ModelFactory +from vel.api import LossFunctionModel, ModelFactory from vel.metrics.loss_metric import Loss from vel.modules.layers import Flatten, Reshape -class MnistCnnAutoencoder(SupervisedModel): +class MnistCnnAutoencoder(LossFunctionModel): """ A simple MNIST autoencoder, containing 3 convolutional layers. """ @@ -77,6 +77,12 @@ def forward(self, x): decoded = self.decoder(encoding) return decoded + def encode(self, sample): + return self.encoder(sample) + + def decode(self, sample): + return self.decoder(sample) + def loss_value(self, x_data, y_true, y_pred): """ Calculate a value of loss function """ return F.mse_loss(y_pred, y_true) diff --git a/vel/models/autoencoder/mnist_cnn_vae.py b/vel/models/autoencoder/mnist_cnn_vae.py index 6745e6d4..0cdfadde 100644 --- a/vel/models/autoencoder/mnist_cnn_vae.py +++ b/vel/models/autoencoder/mnist_cnn_vae.py @@ -1,5 +1,6 @@ import itertools as it +import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F @@ -7,17 +8,18 @@ import vel.util.network as net_util from vel.api import SupervisedModel, ModelFactory +from vel.api.metrics import AveragingNamedMetric from vel.metrics.loss_metric import Loss from vel.modules.layers import Flatten, Reshape -class MnistCnnAutoencoder(SupervisedModel): +class MnistCnnVAE(SupervisedModel): """ A simple MNIST variational autoencoder, containing 3 convolutional layers. """ def __init__(self, img_rows, img_cols, img_channels, channels=None, representation_length=32): - super(MnistCnnAutoencoder, self).__init__() + super(MnistCnnVAE, self).__init__() assert representation_length % 2 == 0, "Representation length must be even" @@ -31,6 +33,7 @@ def __init__(self, img_rows, img_cols, img_channels, channels=None, representati ] self.representation_length = representation_length + self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) self.channels = channels @@ -42,7 +45,7 @@ def __init__(self, img_rows, img_cols, img_channels, channels=None, representati nn.ReLU(True), nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=(3, 3), stride=2, padding=1), Flatten(), - nn.Linear(self.final_width * self.final_height * channels[2], representation_length) + nn.Linear(self.final_width * self.final_height * channels[2], representation_length * 2) ) self.decoder = nn.Sequential( @@ -58,6 +61,7 @@ def __init__(self, img_rows, img_cols, img_channels, channels=None, representati ), nn.ReLU(True), nn.ConvTranspose2d(in_channels=channels[0], out_channels=img_channels, kernel_size=3, padding=1), + nn.Sigmoid() ) @staticmethod @@ -74,22 +78,71 @@ def reset_weights(self): elif isinstance(m, nn.Linear): self._weight_initializer(m) - def forward(self, x): - encoding = self.encoder(x) - decoded = self.decoder(encoding) + def encode(self, sample): + encoding = self.encoder(sample) + + mu = encoding[:, :self.representation_length] + # I encode std directly as a softplus, rather than exp(logstd) + std = F.softplus(encoding[:, self.representation_length:]) + + return mu + torch.randn_like(std) * std + + def decode(self, sample): + return self.decoder(sample) + + def forward(self, sample): + encoding = self.encoder(sample) + + mu = encoding[:, :self.representation_length] + # I encode std directly as a softplus, rather than exp(logstd) + std = F.softplus(encoding[:, self.representation_length:]) + + z = mu + torch.randn_like(std) * std + + decoded = self.decoder(z) return { - 'result': decoded, - 'encoding': encoding + 'decoded': decoded, + 'encoding': z, + 'mu': mu, + 'std': std } - def loss_value(self, x_data, y_true, y_pred): + def calculate_gradient(self, x_data, y_true): """ Calculate a value of loss function """ - return F.mse_loss(y_pred, y_true) + output = self(x_data) + + y_pred = output['decoded'] + + mu = output['mu'] + std = output['std'] + var = std ** 2 + + kl_divergence = - 0.5 * (1 + torch.log(var) - mu ** 2 - var).sum(dim=1) + kl_divergence = kl_divergence.mean() + + # reconstruction = 0.5 * F.mse_loss(y_pred, y_true) + + # We must sum over all image axis and average only on minibatch axis + reconstruction = F.binary_cross_entropy(y_pred, y_true, reduce=False).sum(1).sum(1).sum(1).mean() + loss = reconstruction + kl_divergence + + if self.training: + loss.backward() + + return { + 'loss': loss.item(), + 'reconstruction': reconstruction.item(), + 'kl_divergence': kl_divergence.item() + } def metrics(self): """ Set of metrics for this model """ - return [Loss()] + return [ + Loss(), + AveragingNamedMetric('reconstruction'), + AveragingNamedMetric('kl_divergence') + ] def create(img_rows, img_cols, img_channels, channels=None, representation_length=32): @@ -98,7 +151,7 @@ def create(img_rows, img_cols, img_channels, channels=None, representation_lengt channels = [16, 32, 32] def instantiate(**_): - return MnistCnnAutoencoder( + return MnistCnnVAE( img_rows, img_cols, img_channels, channels=channels, representation_length=representation_length ) diff --git a/vel/models/imagenet/resnet34.py b/vel/models/imagenet/resnet34.py index e5a5a97c..fc819a2a 100644 --- a/vel/models/imagenet/resnet34.py +++ b/vel/models/imagenet/resnet34.py @@ -5,14 +5,14 @@ import vel.modules.layers as l import vel.util.module_util as mu -from vel.api import SupervisedModel, ModelFactory +from vel.api import LossFunctionModel, ModelFactory # Because of concat pooling it's 2x 512 NET_OUTPUT = 1024 -class Resnet34(SupervisedModel): +class Resnet34(LossFunctionModel): """ Resnet34 network model """ def __init__(self, fc_layers=None, dropout=None, pretrained=True): diff --git a/vel/models/rnn/multilayer_rnn_sequence_classification.py b/vel/models/rnn/multilayer_rnn_sequence_classification.py index 8542e119..0d3e7439 100644 --- a/vel/models/rnn/multilayer_rnn_sequence_classification.py +++ b/vel/models/rnn/multilayer_rnn_sequence_classification.py @@ -4,13 +4,13 @@ import torch.nn.functional as F import torch.nn as nn -from vel.api import SupervisedModel, ModelFactory, LinearBackboneModel +from vel.api import LossFunctionModel, ModelFactory, LinearBackboneModel from vel.metrics.accuracy import Accuracy from vel.metrics.loss_metric import Loss from vel.modules.rnn_layer import RnnLayer -class MultilayerRnnSequenceClassification(SupervisedModel): +class MultilayerRnnSequenceClassification(LossFunctionModel): """ Multilayer GRU network for sequence modeling (n:1) """ def __init__(self, input_block: LinearBackboneModel, rnn_type: str, output_dim: int, diff --git a/vel/models/vision/cifar10_cnn_01.py b/vel/models/vision/cifar10_cnn_01.py index 10eeafe8..50dc1328 100644 --- a/vel/models/vision/cifar10_cnn_01.py +++ b/vel/models/vision/cifar10_cnn_01.py @@ -8,12 +8,12 @@ import torch.nn.init as init import torch.nn.functional as F -from vel.api import SupervisedModel, ModelFactory +from vel.api import LossFunctionModel, ModelFactory from vel.metrics.loss_metric import Loss from vel.metrics.accuracy import Accuracy -class Net(SupervisedModel): +class Net(LossFunctionModel): """ A simple MNIST classification model. diff --git a/vel/models/vision/cifar_resnet_v1.py b/vel/models/vision/cifar_resnet_v1.py index 5b638710..fef562c8 100644 --- a/vel/models/vision/cifar_resnet_v1.py +++ b/vel/models/vision/cifar_resnet_v1.py @@ -6,11 +6,11 @@ import torch.nn as nn import torch.nn.functional as F -from vel.api import SupervisedModel, ModelFactory +from vel.api import LossFunctionModel, ModelFactory from vel.modules.resnet_v1 import Bottleneck, BasicBlock -class ResNetV1(SupervisedModel): +class ResNetV1(LossFunctionModel): """ A ResNet V1 model as defined in the literature """ def __init__(self, block, layers, inplanes, divisor=4, img_channels=3, num_classes=1000): diff --git a/vel/models/vision/cifar_resnet_v2.py b/vel/models/vision/cifar_resnet_v2.py index bd430af8..3bc03b52 100644 --- a/vel/models/vision/cifar_resnet_v2.py +++ b/vel/models/vision/cifar_resnet_v2.py @@ -6,11 +6,11 @@ import torch.nn as nn import torch.nn.functional as F -from vel.api import SupervisedModel, ModelFactory +from vel.api import LossFunctionModel, ModelFactory from vel.modules.resnet_v2 import Bottleneck, BasicBlock -class ResNetV2(SupervisedModel): +class ResNetV2(LossFunctionModel): """ A ResNet V2 (pre-activation resnet) model as defined in the literature """ def __init__(self, block, layers, inplanes, divisor=4, img_channels=3, num_classes=1000): diff --git a/vel/models/vision/cifar_resnext.py b/vel/models/vision/cifar_resnext.py index 9ce14b4a..ffa0fc76 100644 --- a/vel/models/vision/cifar_resnext.py +++ b/vel/models/vision/cifar_resnext.py @@ -6,14 +6,15 @@ import torch.nn as nn import torch.nn.functional as F -from vel.api import SupervisedModel, ModelFactory +from vel.api import LossFunctionModel, ModelFactory from vel.modules.resnext import ResNeXtBottleneck -class ResNeXt(SupervisedModel): +class ResNeXt(LossFunctionModel): """ A ResNext model as defined in the literature """ - def __init__(self, block, layers, inplanes, image_features, cardinality=4, divisor=4, img_channels=3, num_classes=1000): + def __init__(self, block, layers, inplanes, image_features, cardinality=4, divisor=4, img_channels=3, + num_classes=1000): super().__init__() self.num_classess = num_classes @@ -77,14 +78,6 @@ def metrics(self): from vel.metrics.accuracy import Accuracy return [Loss(), Accuracy()] - def summary(self): - """ Print model summary """ - # import torchsummary - - print(self) - # self.eval() - # torchsummary.summary(self, input_size=(3, 32, 32)) - def create(blocks, mode='basic', inplanes=64, cardinality=4, image_features=64, divisor=4, num_classes=1000): """ Vel factory function """ diff --git a/vel/models/vision/mnist_cnn_01.py b/vel/models/vision/mnist_cnn_01.py index 05b61c09..08472f4d 100644 --- a/vel/models/vision/mnist_cnn_01.py +++ b/vel/models/vision/mnist_cnn_01.py @@ -8,12 +8,13 @@ import torch.nn.init as init import torch.nn.functional as F -from vel.api import SupervisedModel, ModelFactory + +from vel.api import LossFunctionModel, ModelFactory from vel.metrics.loss_metric import Loss from vel.metrics.accuracy import Accuracy -class Net(SupervisedModel): +class Net(LossFunctionModel): """ A simple MNIST classification model. diff --git a/vel/notebook/__init__.py b/vel/notebook/__init__.py index b29639c0..3b31f630 100644 --- a/vel/notebook/__init__.py +++ b/vel/notebook/__init__.py @@ -1,2 +1,2 @@ -from .loader import load +from .loader import load_config from .defaults import reasonable_notbook_defaults diff --git a/vel/notebook/defaults.py b/vel/notebook/defaults.py index 044d9446..3781ad0b 100644 --- a/vel/notebook/defaults.py +++ b/vel/notebook/defaults.py @@ -3,4 +3,4 @@ def reasonable_notbook_defaults(): """ Notbook defaults """ import matplotlib.pyplot as plt - plt.rcParams['figure.figsize'] = [10, 5] + plt.rcParams['figure.figsize'] = [15, 8] diff --git a/vel/notebook/loader.py b/vel/notebook/loader.py index 9edc5e1d..55bd13fc 100644 --- a/vel/notebook/loader.py +++ b/vel/notebook/loader.py @@ -1,8 +1,10 @@ from vel.api import ModelConfig -def load(config_path, run_number=0, device='cuda:0'): +def load_config(config_path, run_number=0, device='cuda:0'): """ Load a ModelConfig from filename """ - model_config = ModelConfig.from_file(config_path, run_number, device=device) - - return model_config + return ModelConfig.from_file( + ModelConfig.from_project_directory(config_path), + run_number=run_number, + device=device + ) From 3e5069c1ed41e1507d0afd24ff5f7796245df007 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 9 Apr 2019 08:21:01 -0700 Subject: [PATCH 021/162] Adding example notebooks. --- .gitignore | 3 - .../mnist/mnist-autoencoder.ipynb | 288 ++++++++++++++++ .../autoencoders/mnist/mnist-vae.ipynb | 310 ++++++++++++++++++ 3 files changed, 598 insertions(+), 3 deletions(-) create mode 100644 examples-notebooks/autoencoders/mnist/mnist-autoencoder.ipynb create mode 100644 examples-notebooks/autoencoders/mnist/mnist-vae.ipynb diff --git a/.gitignore b/.gitignore index 6860e5a1..b0a800fd 100644 --- a/.gitignore +++ b/.gitignore @@ -117,6 +117,3 @@ environment.yaml # Test cache /.pytest_cache - -# Local notebooks -/examples-notebooks diff --git a/examples-notebooks/autoencoders/mnist/mnist-autoencoder.ipynb b/examples-notebooks/autoencoders/mnist/mnist-autoencoder.ipynb new file mode 100644 index 00000000..ad7a6b1e --- /dev/null +++ b/examples-notebooks/autoencoders/mnist/mnist-autoencoder.ipynb @@ -0,0 +1,288 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import torch\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import vel\n", + "import vel.notebook as nb\n", + "nb.reasonable_notbook_defaults()\n", + "torch.set_grad_enabled(False) # We don't need autograd here\n", + "None" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "config = nb.load_config('examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml', run_number=4, device='cpu')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:Setting up a new session...\n" + ] + } + ], + "source": [ + "model = config.load_trained_model()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MnistCnnAutoencoder(\n", + " (encoder): Sequential(\n", + " (0): Conv2d(1, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (1): ReLU(inplace)\n", + " (2): Conv2d(8, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (3): ReLU(inplace)\n", + " (4): Conv2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (5): Flatten()\n", + " (6): Linear(in_features=784, out_features=16, bias=True)\n", + " )\n", + " (decoder): Sequential(\n", + " (0): Linear(in_features=16, out_features=784, bias=True)\n", + " (1): ReLU(inplace)\n", + " (2): Reshape()\n", + " (3): ConvTranspose2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", + " (4): ReLU(inplace)\n", + " (5): ConvTranspose2d(16, 8, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", + " (6): ReLU(inplace)\n", + " (7): ConvTranspose2d(8, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " )\n", + ")\n", + "----------------------------------------------------------------------------------------------------\n", + "Number of model parameters: 33,009\n", + "----------------------------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "data_source = config.provide('source')\n", + "train_dataset = data_source.train_dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def get_sample(idx):\n", + " return train_dataset[idx][0]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def show_image(axis, sample):\n", + " axis.imshow(train_dataset.denormalize(sample)[:, :, 0], cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAACzCAYAAAD48u9xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAHTJJREFUeJzt3XuQVdX55vHnRYOXOKiIQUqjmARNaQrbiEocSkhA4xgTUROVUhFjiRVvJKUUxhCHjMEQRafEaOIlgBdGtIJENOOoI15iFAokJj9FETXRH9jBK3LRyChr/uCkgr6r6d3nutbu76eKovth77PXPv3QfVbv3qsthCAAAAAAQDp6tHoAAAAAAIBPYqIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAImpaaJmZkeZ2TIze8nMLq7XoIBGobPIEb1FbugsckNnkSILIVS3o9lWkl6UdISkFZIWSRoVQli6hX2qOxhQEUKwavels2iFWjordb23dBZ18FYIYddqd6azaIGmdrayD71FTYq8Pqjlitohkl4KIbwSQtggabakY2t4PKDR6CxyRG/RbK/WuD+dRbPRWZRSLRO13SX952bvr6hkQKroLHJEb5EbOovc0FkkaetGH8DMxkoa2+jjAPVCZ5EbOovc0FnkiN6i2WqZqK2U9PnN3t+jkn1CCOFGSTdK/DwvWo7OIked9pbOIjF0Frnh9QGSVMuPPi6SNMDM9jaznpJOljSvPsMCGoLOIkf0Frmhs8gNnUWSqr6iFkL4yMzOk/SApK0kTQ8hPFe3kQF1RmeRI3qL3NBZ5IbOIlVVL89f1cG4TIwa1brUeVfRWdSKziJDT4cQBjXrYHQWddDUzkr0FrVr9PL8AAAAAIAGYKIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJ2brVAwBQPgcddJDLzjvvPJeNHj3aZbfeeqvLrr322uhxlixZUsXoAAAA0scVNQAAAABIDBM1AAAAAEgMEzUAAAAASExN96iZ2d8lrZX0saSPQgiD6jEooJHoLXJDZ5EbOovc0FmkyEII1e+8qdSDQghvFdy++oNlbKuttnLZjjvuWNNjxhZm2H777V227777uuzcc8+NPubUqVNdNmrUKJf985//dNmUKVNc9rOf/Sx6nFqEEKzWx+hKb7trZ4tqa2uL5vPnz3dZr169qj7Oe++9F8132WWXqh+zWegsNjd8+HCXzZo1y2VDhw512bJlyxoypoina32RSmfTNnHiRJd19DW7Rw//w1fDhg1z2WOPPVbzuGrQ1M5Wtqe3qEmR1wf86CMAAAAAJKbWiVqQ9KCZPW1mY+sxIKAJ6C1yQ2eRGzqL3NBZJKfW36M2JISw0sw+J+khM3shhPD45htUyk7hkZIt9pbOIkF0Frmhs8gNr2mRnJquqIUQVlb+fkPSXEmHRLa5MYQwiJsykYrOektnkRo6i9zQWeSG17RIUdVX1Mzss5J6hBDWVt4+UtL/qNvIWmTPPfeM5j179nTZYYcd5rIhQ4a4bKeddnLZCSecUMXoum7FihUumzZtWnTb4447zmVr16512V/+8heXtfgm4sLK2ttmOOQQ9zVLc+bMiW4bWywntnBRrF8bNmxwWUeLhgwePNhlS5YsKfSYuUihs4cffrjLYh+TuXPnNmM4WTv44INdtmjRohaMpHFS6Cz+bcyYMS6bMGGCyzZu3Fj4MWtZiC5FdBapquVHH/tKmmtm/3qc/xVC+D91GRXQOPQWuaGzyA2dRW7oLJJU9UQthPCKpAPqOBag4egtckNnkRs6i9zQWaSK5fkBAAAAIDFM1AAAAAAgMbUuz5+1trY2l82fPz+6bWxxhNTEbgSeOHGiy9atWxfdf9asWS5rb2932bvvvuuyZcuWFRkiErT99tu77Ktf/arLbr/9dpf169evpmMvX77cZVdccYXLZs+eHd3/T3/6k8tinf/FL35RxejwL8OGDXPZgAEDXMZiIv/Wo0f8+6B77723y/baay+XVe6VAWoW69e2227bgpGgDA499FCXnXrqqS4bOnRodP/999+/0HEuuugil73++usuiy3iJ8VfsyxcuLDQsVPCFTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASEy3Xkzktddec9nbb78d3bYZi4l0dJPj6tWrXfb1r3/dZRs2bHDZbbfdVvvAUGo33HCDy0aNGtWUY8cWLdlhhx1c9thjj0X3jy1yMXDgwJrHhU8aPXq0y5566qkWjCQfHS20c9ZZZ7ksdtP7Cy+8UPcxofxGjBjhsvPPP7/Qvh117phjjnHZqlWrujYwZOmkk05y2TXXXOOyPn36uKyjBZEeffRRl+26664uu/LKKwuMsOPjxB7z5JNPLvSYKeGKGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSmW6/6+M4777hs/Pjx0W1jqx79+c9/dtm0adMKHfuZZ55x2RFHHBHddv369S7bf//9XTZu3LhCx0b3ddBBB7nsW9/6lss6WkXp0zpajfHee+912dSpU132+uuvuyz2/+rdd9+NHucb3/iGy4qOHcX16MH39Lrq5ptvLrzt8uXLGzgSlNWQIUNcNmPGDJcVXbW6o1X2Xn311a4NDMnbemv/8n/QoEEuu+mmm1y2/fbbu+zxxx932WWXXRY99hNPPOGybbbZxmV33XWXy4488sjoY8YsXry48LYp46svAAAAACSGiRoAAAAAJIaJGgAAAAAkptOJmplNN7M3zOzZzbLeZvaQmS2v/L1zY4cJdA29RW7oLHJDZ5EbOovcWAhhyxuYHS5pnaRbQwhfqWRXSHonhDDFzC6WtHMIYUKnBzPb8sES1qtXL5etXbvWZTfccIPLzjzzTJedeuqpLrvjjjuqHF33EUIotFJEvXqbc2fb2tpcNn/+fJfFuh1z//33u2zUqFHRbYcOHeqygQMHuiy24MKbb75ZaDyS9PHHH7vs/fffLzSeJUuWFD5OLXLrbOzj9NRTT7ns7rvvdtlpp51Wy6FL5cknn4zmgwcPdtlhhx3msgULFtR9TF3wdAjBryzwKal0truKLfTw/e9/v9C+jz76qMuGDx9e65BaqamdreyXbW/HjBnjsqILID300EMuO+mkk1y2Zs2awuOJvSaeOXNmoX1XrlwZzWOLo3Tl9UUzFHl90OkVtRDC45I+vTzisZJuqbx9i6SRXR4d0ED0Frmhs8gNnUVu6CxyU+09an1DCO2Vt/8hqW+dxgM0Er1FbugsckNnkRs6i2TV/HvUQghhS5d/zWyspLG1Hgeopy31ls4iRXQWuaGzyA2vaZGaaq+orTKzfpJU+fuNjjYMIdwYQhhU5GeHgQYr1Fs6i4TQWeSGziI3vKZFsqq9ojZP0umSplT+vqduI0pU0Zsi33vvvULbnXXWWS678847o9tu3Lix0GOiU6Xs7T777BPNx48f77Idd9zRZW+99ZbL2tvbXXbLLbe4bN26ddFj/+EPfyiUNcJ2223nsgsvvNBlp5xySjOGU6umd/boo492Wew5xb/17et/UmrvvfcuvH9HN8NnqpSfZ1upT58+0Ty2cEjs9cLq1atd9vOf/7z2gZVHaTt72WWXRfNLLrnEZbHFBa+//nqXTZw40WVdWTgk5ic/+UnV+15wwQXRPLWFQ6pVZHn+OyQ9JWlfM1thZmdqU5mPMLPlkkZU3geSQW+RGzqL3NBZ5IbOIjedXlELIcTX35ayXscV5UZvkRs6i9zQWeSGziI31d6jBgAAAABoECZqAAAAAJCYmpfnxydNmjTJZQcddJDLhg4d6rIRI0ZEH/PBBx+seVwoh2222cZlU6dOjW4bWxRi7dq1Lhs9erTLFi9e7LKcF5TYc889Wz2EbOy7776FtnvuuecaPJJ8xP4PxhYYkaQXX3zRZbH/l+ie+vfv77I5c+bU9JjXXnutyx555JGaHhPpufTSS10WWzREkjZs2OCyBx54wGUTJkxw2QcffFBoPNtuu200P/LII10W+xptZi6LLYJzzz2lWfsliitqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYFhOps/Xr17vsrLPOctmSJUtcdtNNN0UfM3bTb2yxh+uuu85lsd80j3wdeOCBLostGtKRY4891mWPPfZYTWNC97Ro0aJWD6GuevXq5bKjjjrKZaeeeqrLYjfHd+Syyy5z2erVqwvvj3KLdW7gwIGF93/44Yddds0119Q0JqRnp512ctk555zjso5eA8YWDhk5cmTV4/nSl77kslmzZkW3jS2wF/O73/3OZVdccUXXBlYCXFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEMFEDAAAAgMSwmEgTvPzyyy4bM2aMy2bMmBHd/7TTTiuUffazn3XZrbfe6rL29vbocZC+q6++2mVmFt02tkhI2RYO6dHDf69p48aNLRhJ99O7d++6P+YBBxzgsli/R4wY4bI99tjDZT179nTZKaecEj12rEsffPCByxYuXOiyDz/80GVbbx3/8vr0009Hc3Q/scUbpkyZUnj/J554wmWnn366y957772uDQzJi31u69OnT+H9L7jgApd97nOfc9kZZ5zhsu985zsu+8pXvuKyHXbYIXrs2AInsez22293WWzBvrLjihoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJKbTiZqZTTezN8zs2c2ySWa20syeqfw5urHDBIqjs8gRvUVu6CxyQ2eRmyKrPs6U9CtJn14+8H+GEKbWfUTdxNy5c122fPny6Laxlf6GDx/usssvv9xle+21l8smT54cPc7KlSujeYZmqgSdPeaYY1zW1tbmsthqSZI0b968uo8pNbEVHmPPxzPPPNOM4dRqphLobWylw9hz+pvf/MZll1xySU3HHjhwoMtiqz5+9NFHLnv//fddtnTpUpdNnz49euzFixe7LLZK6qpVq1y2YsUKl2233XbR47zwwgvRPFMzlUBnc9C/f3+XzZkzp6bHfOWVV1wW6yc+YaZK0NkNGza47M0333TZrrvuGt3/b3/7m8s6ei1RxOuvv+6yNWvWRLft16+fy9566y2X3XvvvVWPp0w6vaIWQnhc0jtNGAtQF3QWOaK3yA2dRW7oLHJTyz1q55nZXyuXkXfuaCMzG2tmi83Mf7sSaC46ixx12ls6i8TQWeSG1wdIUrUTtV9L+qKkNkntkq7qaMMQwo0hhEEhhEFVHguoBzqLHBXqLZ1FQugscsPrAySrqolaCGFVCOHjEMJGSTdJOqS+wwLqi84iR/QWuaGzyA2dRcqKLCbimFm/EEJ75d3jJD27pe1RzLPPxp/GE0880WXf/va3XTZjxgyXnX322S4bMGBA9DhHHHFEZ0PMVo6djS1G0LNnT5e98cYb0f3vvPPOuo+pGbbZZhuXTZo0qfD+8+fPd9mPf/zjWobUMq3o7TnnnOOyV1991WWHHXZY3Y/92muvuez3v/+9y55//nmXLViwoO7jiRk7dqzLYjfsxxZ66A5y/FzbDBMmTHBZbDGkrpgyZUpN+2OTHDu7evVql40cOdJl9913X3T/3r17u+zll1922T333OOymTNnuuydd/xtf7Nnz44eO7aYSEfbosBEzczukDRMUh8zWyHpv0saZmZtkoKkv0vyswGgRegsckRvkRs6i9zQWeSm04laCGFUJP5tA8YC1AWdRY7oLXJDZ5EbOovc1LLqIwAAAACgAZioAQAAAEBiqlpMBM0Vu2n0tttuc9nNN9/ssq239h/iww8/PHqcYcOGuezRRx/tfIBoqQ8//DCat7e3R/OUxBYOmThxosvGjx8f3X/FihUuu+oqv7LyunXrqhgd/uWXv/xlq4eQjOHDhxfabs6cOQ0eCVLV1tbmsiOPPLLqx4st6CBJy5Ytq/oxUT4LFy50WWyho0aIva4cOnRodNvYIjrddfGlIriiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYTGRhAwcODCaf/e733XZwQcf7LLYwiExS5cujeaPP/54of2Rlnnz5rV6CIXEbrCPLRJy0kknuayjm+lPOOGE2gcGNMDcuXNbPQS0yIMPPuiynXfeudC+CxYscNmYMWNqHRLQUNttt53LYouGSFIIwWWzZ8+u+5jKgitqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYFhNpgn333ddl5513nsuOP/746P677bZb1cf++OOPXdbe3h7dtqMbP9EaZlYoGzlyZHT/cePG1X1MRf3oRz9y2U9/+lOX7bjjji6bNWuWy0aPHl2fgQFAg+2yyy4uK/r19frrr3fZunXrah4T0EgPPPBAq4dQWlxRAwAAAIDEMFEDAAAAgMQwUQMAAACAxHQ6UTOzz5vZI2a21MyeM7Nxlby3mT1kZssrfxf7bY5Ag9FZ5IbOIkf0Frmhs8hNkStqH0m6MISwn6TBks41s/0kXSzp4RDCAEkPV94HUkBnkRs6ixzRW+SGziIrna76GEJol9ReeXutmT0vaXdJx0oaVtnsFkmPSprQkFEmKrYa46hRo1wWW+Gxf//+dR/P4sWLXTZ58mSXzZs3r+7HTklZOhtCKJR1tCrotGnTXDZ9+nSXvf322y4bPHiwy0477TSXHXDAAdFj77HHHi577bXXXBZbKSq26lnZlaWz3VVsNdZ99tknuu2CBQsaPZymobfSjBkzXNajR/V3lTz55JO1DAedoLON8c1vfrPVQyitLn02MbP+kg6UtFBS30rhJekfkvrWdWRAHdBZ5IbOIkf0Frmhs8hB4d+jZmY7SJoj6YchhDWbfwcxhBDMzH+rf9N+YyWNrXWgQFfRWeSGziJH1fSWzqKV+FyLXBS6omZmn9GmQs8KIdxdiVeZWb/Kv/eT9EZs3xDCjSGEQSGEQfUYMFAEnUVu6CxyVG1v6Sxahc+1yEmRVR9N0m8lPR9CuHqzf5on6fTK26dLuqf+wwO6js4iN3QWOaK3yA2dRW6K/Ojjf5V0mqT/MLNnKtklkqZIusvMzpT0qqQTGzPE5urbN/5jyfvtt5/LfvWrX7nsy1/+ct3HtHDhQpddeeWVLrvnHv95ZePGjXUfTwa6VWe32mqraH7OOee47IQTTnDZmjVrXDZgwICaxhS7If6RRx5x2aWXXlrTcUqkW3W2bGKL/NSyoERGuk1v29raovmIESNcFvu6u2HDBpddd911Llu1alUVo0MXdJvONtMXvvCFVg+htIqs+viEJL+k1SbD6zscoHZ0Frmhs8gRvUVu6Cxy0y2+5QcAAAAAOWGiBgAAAACJYaIGAAAAAIkp/HvUcte7d2+X3XDDDS7r6Ibhet8oGVts4aqrropu+8ADD7jsgw8+qOt4kJ6nnnrKZYsWLXLZwQcfXPgxd9ttN5d1tIDOp7399tsumz17dnTbcePGFR4TUEZf+9rXovnMmTObOxDUxU477RTNY59TY1auXOmyiy66qKYxAan44x//6LKOFlTqpovcVY0ragAAAACQGCZqAAAAAJAYJmoAAAAAkBgmagAAAACQmOwXEzn00ENdNn78eJcdcsghLtt9993rPp7333/fZdOmTXPZ5Zdf7rL169fXfTzI14oVK1x2/PHHu+zss8+O7j9x4sSqj33NNde47Ne//rXLXnrppaqPAZSFWUe/PxcAyu/ZZ5912fLly6Pbxhbn++IXv+iyN998s/aBlQBX1AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMdkvJnLccccVyopaunRpNL/vvvtc9tFHH7nsqquuctnq1aurHg+wufb2dpdNmjQpum1HOYDq3X///S773ve+14KRoJleeOGFaP7kk0+6bMiQIY0eDpC82KJ5knTzzTe7bPLkyS47//zzXdbRa/Qy44oaAAAAACSGiRoAAAAAJIaJGgAAAAAkptOJmpl93sweMbOlZvacmY2r5JPMbKWZPVP5c3Tjhwt0js4iN3QWuaGzyBG9RW4shLDlDcz6SeoXQlhiZv9F0tOSRko6UdK6EMLUwgcz2/LBgE6EEKyzbegsUkJnkaGnQwiDtrQBnUViOu2sRG+bqVevXtH8rrvuctmIESNcdvfdd7vsjDPOcNn69eurGF0airw+6HTVxxBCu6T2yttrzex5SbvXPjygMegsckNnkRs6ixzRW+SmS/eomVl/SQdKWliJzjOzv5rZdDPbuc5jA2pGZ5EbOovc0FnkiN4iB4Unama2g6Q5kn4YQlgj6deSviipTZu+O+F/gdim/caa2WIzW1yH8QKF0Vnkhs4iN3QWOaK3yEWhiZqZfUabCj0rhHC3JIUQVoUQPg4hbJR0k6RDYvuGEG4MIQwq8rPDQL3QWeSGziI3dBY5orfISZFVH03SbyU9H0K4erO832abHSfp2foPD+g6Oovc0Fnkhs4iR/QWuSmy6uMQSX+U9B+SNlbiSySN0qZLxEHS3yWdXblJc0uPxQo5qEnBFfToLJJBZ5GhIqs+0lmkpOiqj/S2xWKrQU6ePNllP/jBD1w2cOBAly1durQ+A2uBeq36+ISk2AP972oGBTQanUVu6CxyQ2eRI3qL3HRp1UcAAAAAQOMxUQMAAACAxDBRAwAAAIDEdLqYSF0Pxo2XqFGRGy/ric6iVnQWGSq0MEO90FnUQVM7K9Fb1K7I6wOuqAEAAABAYpioAQAAAEBimKgBAAAAQGKYqAEAAABAYjr9hdd19pakVytv96m8XwZlOhcp3fPZqwXHpLN5SPV86Gz9lOlcpLTPp9m9LWtnpXKdT8rn0srPtSk/L9Uo0/mkfC6FOtvUVR8/cWCzxc1eoadRynQuUvnOp17K9LyU6Vyk8p1PvZTpeSnTuUjlO596KdvzUqbzKdO51FPZnpcynU8ZzoUffQQAAACAxDBRAwAAAIDEtHKidmMLj11vZToXqXznUy9lel7KdC5S+c6nXsr0vJTpXKTynU+9lO15KdP5lOlc6qlsz0uZzif7c2nZPWoAAAAAgDh+9BEAAAAAEtP0iZqZHWVmy8zsJTO7uNnHr5WZTTezN8zs2c2y3mb2kJktr/y9cyvHWJSZfd7MHjGzpWb2nJmNq+RZnk+j0Nl00Nli6Gw66GxxOfe2TJ2V6G1ROXdWKldvy9rZpk7UzGwrSddJ+m+S9pM0ysz2a+YY6mCmpKM+lV0s6eEQwgBJD1fez8FHki4MIewnabCkcysfj1zPp+7obHLobCfobHLobAEl6O1MlaezEr3tVAk6K5Wrt6XsbLOvqB0i6aUQwishhA2SZks6tsljqEkI4XFJ73wqPlbSLZW3b5E0sqmDqlIIoT2EsKTy9lpJz0vaXZmeT4PQ2YTQ2ULobELobGFZ97ZMnZXobUFZd1YqV2/L2tlmT9R2l/Sfm72/opLlrm8Iob3y9j8k9W3lYKphZv0lHShpoUpwPnVEZxNFZztEZxNFZ7eojL0txceY3naojJ2VSvAxLlNnWUykzsKmZTSzWkrTzHaQNEfSD0MIazb/txzPB12T48eYznZvOX6M6Wz3luvHmN52bzl+jMvW2WZP1FZK+vxm7+9RyXK3ysz6SVLl7zdaPJ7CzOwz2lToWSGEuytxtufTAHQ2MXS2U3Q2MXS2kDL2NuuPMb3tVBk7K2X8MS5jZ5s9UVskaYCZ7W1mPSWdLGlek8fQCPMknV55+3RJ97RwLIWZmUn6raTnQwhXb/ZPWZ5Pg9DZhNDZQuhsQuhsYWXsbbYfY3pbSBk7K2X6MS5tZ0MITf0j6WhJL0p6WdJPmn38Ooz/Dkntkv6fNv088pmSdtGmlWSWS/q/knq3epwFz2WINl0C/qukZyp/js71fBr4PNHZRP7Q2cLPE51N5A+d7dJzlW1vy9TZyvnQ22LPU7adrYy/NL0ta2etcnIAAAAAgESwmAgAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAk5v8DVMTDbjI6QLUAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# Browse examples\n", + "fig, axes = plt.subplots(1, 5)\n", + "\n", + "for index in range(5):\n", + " show_image(axes[index], get_sample(index))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAAGgCAYAAADbx5TwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xu41WWd///XDShHURBFBBVPaegQGio6flMTzRwLjyVTitUVXZplc5XpmPW1HBvHtEmzmjyiRR4mPDd90zyEpJKAZooiiqLgFvCAbA6KwP37g9VvkPd7sT97r8O+789+Pq6Li82Ltdbn/qz1Xp/1ufda93uFGKMAAAAAAOno1tkDAAAAAAB8EBM1AAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEhMTRO1EMJRIYQ5IYQXQgjn1mtQQKNQs8gRdYvcULPIDTWLFIUYY8euGEJ3Sc9LOkLSAkmPSxofY5y9iet0bGNARYwxdPS61Cw6Qy01K7W/bqlZ1MEbMcZtOnplahadoKk1W7kOdYuaFDk/qOUdtf0lvRBjnBdjXC3pZknjarg9oNGoWeSIukWzza/x+tQsmo2aRSnVMlEbKunVDf69oJIBqaJmkSPqFrmhZpEbahZJ6tHoDYQQJkqa2OjtAPVCzSI31CxyQ80iR9Qtmq2WidpCSTts8O9hlewDYoxXSbpK4vO86HTULHLUZt1Ss0gMNYvccH6AJNXy0cfHJe0eQtg5hLC5pJMl3VWfYQENQc0iR9QtckPNIjfULJLU4XfUYoxrQghnSvqDpO6SrosxPlO3kQF1Rs0iR9QtckPNIjfULFLV4fb8HdoYbxOjRrW2Om8vaha1omaRoZkxxtHN2hg1izpoas1K1C1q1+j2/AAAAACABmCiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAienR2QMAUD4f/ehHTXbmmWea7NRTTzXZjTfeaLKf/vSn7nZmzZrVgdEBAACkj3fUAAAAACAxTNQAAAAAIDFM1AAAAAAgMTWtUQshvCypVdJaSWtijKPrMSigkahb5IaaRW6oWeSGmkWKQoyx41deX9SjY4xvFLx8xzeWse7du5tsyy23rOk2vcYMffr0Mdkee+xhsq9+9avubV566aUmGz9+vMneffddk1188cUm+/73v+9upxYxxlDrbbSnbrtqzRY1atQoN3/ggQdM1r9//w5v55133nHzrbfeusO32SzULDZ0+OGHm2zy5MkmO+SQQ0w2Z86chozJMbPWk1RqNm3nn3++yaq9ZnfrZj98deihh5rsT3/6U83jqkFTa7ZyeeoWNSlyfsBHHwEAAAAgMbVO1KKke0MIM0MIE+sxIKAJqFvkhppFbqhZ5IaaRXJq/R61g2OMC0MI20q6L4TwXIxx6oYXqBQ7BY+UbLJuqVkkiJpFbqhZ5IZzWiSnpnfUYowLK38vlnS7pP2dy1wVYxzNokykoq26pWaRGmoWuaFmkRvOaZGiDr+jFkLoK6lbjLG18vORkn5Qt5F1kh133NHNN998c5MddNBBJjv44INNttVWW5nshBNO6MDo2m/BggUmu+KKK9zLHnfccSZrbW012V//+leTdfIi4sLKWrfNsP/+5jVLU6ZMcS/rNcvxGhd59bV69WqTVWsaMmbMGJPNmjWr0G3mIoWa/djHPmYy7zG5/fbbmzGcrO23334me/zxxzthJI2TQs3if5122mkmO+ecc0y2bt26wrdZSyO6FFGzSFUtH30cLOn2EMLfb+c3Mcb/V5dRAY1D3SI31CxyQ80iN9QsktThiVqMcZ6kj9RxLEDDUbfIDTWL3FCzyA01i1TRnh8AAAAAEsNEDQAAAAASU2t7/qyNGjXKZA888IB7Wa85Qmq8hcDnn3++yZYvX+5ef/LkySZraWkx2dtvv22yOXPmFBkiEtSnTx+T7bvvvib79a9/bbIhQ4bUtO25c+ea7JJLLjHZzTff7F7/z3/+s8m8mv/3f//3DowOf3fooYeabPfddzcZzUT+V7du/u9Bd955Z5PttNNOJquslQFq5tVXr169OmEkKIMDDjjAZJ///OdNdsghh7jX32uvvQpt51vf+pbJXnvtNZN5Tfwk/5xl+vTphbadEt5RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEdOlmIq+88orJ3nzzTfeyzWgmUm2R49KlS0122GGHmWz16tUm+9WvflX7wFBqv/zlL002fvz4pmzba1rSr18/k/3pT39yr+81uRg5cmTN48IHnXrqqSZ79NFHO2Ek+ajWaOfLX/6yybxF788991zdx4TyGzt2rMm+9rWvFbputZo75phjTLZo0aL2DQxZ+uxnP2uyyy+/3GSDBg0yWbWGSA899JDJttlmG5P96Ec/KjDC6tvxbvPkk08udJsp4R01AAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASEyX7vr41ltvmezss892L+t1PXriiSdMdsUVVxTa9pNPPmmyI444wr3sihUrTLbXXnuZ7Kyzziq0bXRdH/3oR032T//0Tyar1kVpY9W6Md59990mu/TSS0322muvmcx7Xr399tvudj7+8Y+brOjYUVy3bvxOr72uueaawpedO3duA0eCsjr44INNdv3115usaNfqal325s+f376BIXk9etjT/9GjR5vs6quvNlmfPn1MNnXqVJNdeOGF7ranTZtmsp49e5rs1ltvNdmRRx7p3qZnxowZhS+bMl59AQAAACAxTNQAAAAAIDFM1AAAAAAgMW1O1EII14UQFocQnt4gGxhCuC+EMLfy94DGDhNoH+oWuaFmkRtqFrmhZpGbEGPc9AVC+Jik5ZJujDHuXckukfRWjPHiEMK5kgbEGM9pc2MhbHpjCevfv7/JWltbTfbLX/7SZF/60pdM9vnPf95kN910UwdH13XEGAt1iqhX3eZcs6NGjTLZAw88YDKvtj2///3vTTZ+/Hj3socccojJRo4caTKv4cKSJUsKjUeS1q5da7KVK1cWGs+sWbMKb6cWudWs9zg9+uijJrvttttMdsopp9Sy6VJ55JFH3HzMmDEmO+igg0z22GOP1X1M7TAzxmg7C2wklZrtqrxGD1/84hcLXfehhx4y2eGHH17rkDpTU2u2cr1s6/a0004zWdEGSPfdd5/JPvvZz5ps2bJlhcfjnRNPmjSp0HUXLlzo5l5zlPacXzRDkfODNt9RizFOlbRxe8Rxkm6o/HyDpGPbPTqggahb5IaaRW6oWeSGmkVuOtqef3CMsaXy8+uSBle7YAhhoqSJHdwOUE+F6paaRUKoWeSGmkVuOKdFsmr+HrUYY9zU278xxqskXSXl/TYxymVTdUvNIkXULHJDzSI3nNMiNR3t+rgohDBEkip/L67fkICGoW6RG2oWuaFmkRtqFsnq6Dtqd0maIOniyt931m1EiSq6KPKdd94pdLkvf/nLJrvlllvcy65bt67QbaJNpazbD33oQ25+9tlnm2zLLbc02RtvvGGylpYWk91www0mW758ubvt3/3ud4WyRujdu7fJvvnNb5rsc5/7XDOGU6um1+zRRx9tMu8+xf8aPNh+UmrnnXcufP1qi+EzVcrjbGcaNGiQm3uNQ7zzhaVLl5rs3/7t32ofWHmUtmYvvPBCNz/vvPNM5jUX/PnPf26y888/32TtaRzi+c53vtPh6379619389Qah3RUkfb8N0l6VNIeIYQFIYQvaX0xHxFCmCtpbOXfQDKoW+SGmkVuqFnkhppFbtp8Ry3G6PfflrLu44pyo26RG2oWuaFmkRtqFrnp6Bo1AAAAAECDMFEDAAAAgMTU3J4fH3TBBReY7KMf/ajJDjnkEJONHTvWvc1777235nGhHHr27GmySy+91L2s1xSitbXVZKeeeqrJZsyYYbKcG0rsuOOOnT2EbOyxxx6FLvfMM880eCT58J6DXoMRSXr++edN5j0v0TUNHz7cZFOmTKnpNn/605+a7MEHH6zpNpGe733veybzmoZI0urVq032hz/8wWTnnHOOyVatWlVoPL169XLzI4880mTea3QIwWReE5w77yxN7xcX76gBAAAAQGKYqAEAAABAYpioAQAAAEBimKgBAAAAQGJoJlJnK1asMNmXv/xlk82aNctkV199tXub3qJfr9nDz372M5N53zSPfO2zzz4m85qGVDNu3DiT/elPf6ppTOiaHn/88c4eQl3179/fZEcddZTJPv/5z5vMWxxfzYUXXmiypUuXFr4+ys2ruZEjRxa+/v3332+yyy+/vKYxIT1bbbWVyc444wyTVTsH9BqHHHvssR0ez2677WayyZMnu5f1Gux5fvvb35rskksuad/ASoB31AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMTQTaYIXX3zRZKeddprJrr/+evf6p5xySqGsb9++JrvxxhtN1tLS4m4H6fvxj39sshCCe1mvSUjZGod062Z/17Ru3bpOGEnXM3DgwLrf5kc+8hGTefU9duxYkw0bNsxkm2++uck+97nPudv2amnVqlUmmz59usnee+89k/Xo4b+8zpw5083R9XjNGy6++OLC1582bZrJJkyYYLJ33nmnfQND8rxj26BBgwpf/+tf/7rJtt12W5N94QtfMNmnP/1pk+29994m69evn7ttr8GJl/361782mdewr+x4Rw0AAAAAEsNEDQAAAAASw0QNAAAAABLDRA0AAAAAEtPmRC2EcF0IYXEI4ekNsgtCCAtDCE9W/hzd2GECxVGzyBF1i9xQs8gNNYvcFOn6OEnSlZI2bh/4nzHGS+s+oi7i9ttvN9ncuXPdy3qd/g4//HCT/fCHPzTZTjvtZLKLLrrI3c7ChQvdPEOTVIKaPeaYY0w2atQok3ndkiTprrvuqvuYUuN1ePTujyeffLIZw6nVJCVQt16nQ+8+/a//+i+TnXfeeTVte+TIkSbzuj6uWbPGZCtXrjTZ7NmzTXbddde5254xY4bJvC6pixYtMtmCBQtM1rt3b3c7zz33nJtnapISqNkcDB8+3GRTpkyp6TbnzZtnMq8+8QGTVIKaXb16tcmWLFlism222ca9/ksvvWSyaucSRbz22msmW7ZsmXvZIUOGmOyNN94w2d13393h8ZRJm++oxRinSnqrCWMB6oKaRY6oW+SGmkVuqFnkppY1ameGEJ6qvI08oNqFQggTQwgzQgj215VAc1GzyFGbdUvNIjHULHLD+QGS1NGJ2i8k7SpplKQWSZdVu2CM8aoY4+gY4+gObguoB2oWOSpUt9QsEkLNIjecHyBZHZqoxRgXxRjXxhjXSbpa0v71HRZQX9QsckTdIjfULHJDzSJlRZqJGCGEITHGlso/j5P09KYuj2Keftq/Gz/zmc+Y7FOf+pTJrr/+epN95StfMdnuu+/ubueII45oa4jZyrFmvWYEm2++uckWL17sXv+WW26p+5iaoWfPnia74IILCl//gQceMNm//uu/1jKkTtMZdXvGGWeYbP78+SY76KCD6r7tV155xWR33HGHyZ599lmTPfbYY3Ufj2fixIkm8xbse40euoIcj7XNcM4555jMa4bUHhdffHFN18d6Odbs0qVLTXbsscea7J577nGvP3DgQJO9+OKLJrvzzjtNNmnSJJO99ZZd9nfzzTe72/aaiVS7LApM1EIIN0k6VNKgEMICSf9X0qEhhFGSoqSXJdnZANBJqFnkiLpFbqhZ5IaaRW7anKjFGMc78bUNGAtQF9QsckTdIjfULHJDzSI3tXR9BAAAAAA0ABM1AAAAAEhMh5qJoLm8RaO/+tWvTHbNNdeYrEcP+xB/7GMfc7dz6KGHmuyhhx5qe4DoVO+9956bt7S0uHlKvMYh559/vsnOPvts9/oLFiww2WWX2c7Ky5cv78Do8Hf/8R//0dlDSMbhhx9e6HJTpkxp8EiQqlGjRpnsyCOP7PDteQ0dJGnOnDkdvk2Uz/Tp003mNTpqBO+88pBDDnEv6zXR6arNl4rgHTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAzNRBIycuRINz/xxBNNtt9++5nMaxzimT17tptPnTq10PWRlrvuuquzh1CIt8DeaxLy2c9+1mTVFtOfcMIJtQ8MaIDbb7+9s4eATnLvvfeabMCAAYWu+9hjj5nstNNOq3VIQEP17t3bZF7TEEmKMZrs5ptvrvuYyoJ31AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMTQTaYI99tjDZGeeeabJjj/+ePf62223XYe3vXbtWpO1tLS4l6228BOdI4RQKDv22GPd65911ll1H1NR//Iv/2Ky7373uybbcsstTTZ58mSTnXrqqfUZGAA02NZbb22yoq+vP//5z022fPnymscENNIf/vCHzh5CafGOGgAAAAAkhokaAAAAACSGiRoAAAAAJKbNiVoIYYcQwoMhhNkhhGdCCGdV8oEhhPtCCHMrfxf7NkegwahZ5IaaRY6oW+SGmkVuiryjtkbSN2OMIySNkfTVEMIISedKuj/GuLuk+yv/BlJAzSI31CxyRN0iN9QsstJm18cYY4uklsrPrSGEZyUNlTRO0qGVi90g6SFJ5zRklInyujGOHz/eZF6Hx+HDh9d9PDNmzDDZRRddZLK77rqr7ttOSVlqNsZYKKvWFfSKK64w2XXXXWeyN99802Rjxowx2SmnnGKyj3zkI+62hw0bZrJXXnnFZF6nKK/rWdmVpWa7Kq8b64c+9CH3so899lijh9M01K10/fXXm6xbt46vKnnkkUdqGQ7aQM02xic+8YnOHkJptetoEkIYLmkfSdMlDa4UvCS9LmlwXUcG1AE1i9xQs8gRdYvcULPIQeHvUQsh9JM0RdI3YozLNvwNYowxhhDsr/rXX2+ipIm1DhRoL2oWuaFmkaOO1C01i87EsRa5KPSOWghhM60v6Mkxxtsq8aIQwpDK/w+RtNi7bozxqhjj6Bjj6HoMGCiCmkVuqFnkqKN1S82is3CsRU6KdH0Mkq6V9GyM8ccb/NddkiZUfp4g6c76Dw9oP2oWuaFmkSPqFrmhZpGbIh99/EdJp0j6WwjhyUp2nqSLJd0aQviSpPmSPtOYITbX4MH+x5JHjBhhsiuvvNJke+65Z93HNH36dJP96Ec/Mtmdd9rjyrp16+o+ngx0qZrt3r27m59xxhkmO+GEE0y2bNkyk+2+++41jclbEP/ggw+a7Hvf+15N2ymRLlWzZeM1+amloURGukzdjho1ys3Hjh1rMu91d/Xq1Sb72c9+ZrJFixZ1YHRohy5Ts820yy67dPYQSqtI18dpkmxLq/UOr+9wgNpRs8gNNYscUbfIDTWL3HSJX/kBAAAAQE6YqAEAAABAYpioAQAAAEBiCn+PWu4GDhxosl/+8pcmq7ZguN4LJb1mC5dddpl72T/84Q8mW7VqVV3Hg/Q8+uijJnv88cdNtt9++xW+ze22285k1RrobOzNN9802c033+xe9qyzzio8JqCMDjzwQDefNGlScweCuthqq63c3DumehYuXGiyb33rWzWNCUjFww8/bLJqDZW6aJO7DuMdNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABITPbNRA444ACTnX322Sbbf//9TTZ06NC6j2flypUmu+KKK0z2wx/+0GQrVqyo+3iQrwULFpjs+OOPN9lXvvIV9/rnn39+h7d9+eWXm+wXv/iFyV544YUObwMoixCqfX8uAJTf008/bbK5c+e6l/Wa8+26664mW7JkSe0DKwHeUQMAAACAxDBRAwAAAIDEMFEDAAAAgMQwUQMAAACAxGTfTOS4444rlBU1e/ZsN7/nnntMtmbNGpNddtllJlu6dGmHxwNsqKWlxWQXXHCBe9lqOYCO+/3vf2+yk046qRNGgmZ67rnn3PyRRx4x2cEHH9zo4QDJ85rmSdI111xjsosuushkX/va10xW7Ry9zHhHDQAAAAASw0QNAAAAABLDRA0AAAAAEtPmRC2EsEMI4cEQwuwQwjMhhLMq+QUhhIUhhCcrf45u/HCBtlGzyA01i9xQs8gRdYvchBjjpi8QwhBJQ2KMs0IIW0iaKelYSZ+RtDzGeGnhjYWw6Y0BbYgxhrYuQ80iJdQsMjQzxjh6UxegZpGYNmtWom6bqX///m5+6623mmzs2LEmu+2220z2hS98wWQrVqzowOjSUOT8oM2ujzHGFkktlZ9bQwjPShpa+/CAxqBmkRtqFrmhZpEj6ha5adcatRDCcEn7SJpeic4MITwVQrguhDCgzmMDakbNIjfULHJDzSJH1C1yUHiiFkLoJ2mKpG/EGJdJ+oWkXSWN0vrfTtgvEFt/vYkhhBkhhBl1GC9QGDWL3FCzyA01ixxRt8hFoYlaCGEzrS/oyTHG2yQpxrgoxrg2xrhO0tWS9veuG2O8KsY4ushnh4F6oWaRG2oWuaFmkSPqFjkp0vUxSLpW0rMxxh9vkA/Z4GLHSXq6/sMD2o+aRW6oWeSGmkWOqFvkpkjXx4MlPSzpb5LWVeLzJI3X+reIo6SXJX2lskhzU7dFhxzUpGAHPWoWyaBmkaEiXR+pWaSkaNdH6raTed0gL7roIpOdfvrpJhs5cqTJZs+eXZ+BdYJ6dX2cJsm7of/pyKCARqNmkRtqFrmhZpEj6ha5aVfXRwAAAABA4zFRAwAAAIDEMFEDAAAAgMS02Uykrhtj4SVqVGThZT1Rs6gVNYsMFWrMUC/ULOqgqTUrUbeoXZHzA95RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEtPmF13X2hqT5lZ8HVf5dBmXaFynd/dmpE7ZJzeYh1f2hZuunTPsipb0/za7bstasVK79SXlfOvNYm/L90hFl2p+U96VQzTa16+MHNhzCjGZ36GmUMu2LVL79qZcy3S9l2hepfPtTL2W6X8q0L1L59qdeyna/lGl/yrQv9VS2+6VM+1OGfeGjjwAAAACQGCZqAAAAAJCYzpyoXdWJ2663Mu2LVL79qZcy3S9l2hepfPtTL2W6X8q0L1L59qdeyna/lGl/yrQv9VS2+6VM+5P9vnTaGjUAAAAAgI+PPgIAAABAYpo+UQshHBVCmBNCeCGEcG6zt1+rEMJ1IYTFIYSnN8gGhhDuCyHMrfw9oDPHWFQIYYcQwoMhhNkhhGdCCGdV8iz3p1Go2XRQs8VQs+mgZovLuW7LVLMSdVtUzjUrlatuy1qzTZ2ohRC6S/qZpE9KGiFpfAhhRDPHUAeTJB21UXaupPtjjLtLur/y7xyskfTNGOMISWMkfbXyeOS6P3VHzSaHmm0DNZscaraAEtTtJJWnZiXqtk0lqFmpXHVbyppt9jtq+0t6IcY4L8a4WtLNksY1eQw1iTFOlfTWRvE4STdUfr5B0rFNHVQHxRhbYoyzKj+3SnpW0lBluj8NQs0mhJothJpNCDVbWNZ1W6aalajbgrKuWalcdVvWmm32RG2opFc3+PeCSpa7wTHGlsrPr0sa3JmD6YgQwnBJ+0iarhLsTx1Rs4miZquiZhNFzW5SGeu2FI8xdVtVGWtWKsFjXKaapZlIncX1bTSzaqUZQugnaYqkb8QYl234fznuD9onx8eYmu3acnyMqdmuLdfHmLrt2nJ8jMtWs82eqC2UtMMG/x5WyXK3KIQwRJIqfy/u5PEUFkLYTOsLenKM8bZKnO3+NAA1mxhqtk3UbGKo2ULKWLdZP8bUbZvKWLNSxo9xGWu22RO1xyXtHkLYOYSwuaSTJd3V5DE0wl2SJlR+niDpzk4cS2EhhCDpWknPxhh/vMF/Zbk/DULNJoSaLYSaTQg1W1gZ6zbbx5i6LaSMNStl+hiXtmZjjE39I+loSc9LelHSd5q9/TqM/yZJLZLe1/rPI39J0tZa30lmrqQ/ShrY2eMsuC8Ha/1bwE9JerLy5+hc96eB9xM1m8gfarbw/UTNJvKHmm3XfZVt3ZapZiv7Q90Wu5+yrdnK+EtTt2Wt2VDZOQAAAABAImgmAgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAialpohZCOCqEMCeE8EII4dx6DQpoFGoWOaJukRtqFrmhZpGiEGPs2BVD6C7peUlHSFog6XFJ42OMs6tdp1u3brFbN97EQ8esW7dO69atCx29fkdqNoRAzaLD1q1bpxhjh2tWan/dhhBiCDVtEl1cjPFl3Dx6AAAgAElEQVSNGOM2Hb1+R2qW4yxqsW7duqbWrMQ5LWpT9Jy2Rw3b2F/SCzHGeZIUQrhZ0jhJmypq9evXr4ZNoitbvnx5rTfRoZrt27dvrdtFF7VixYp63Ey76jaEoF69etVju+iiVq1aNb/Gm2hXzXbr1k29e/eucZPoylasWNHUmpXW1+0WW2xR42bRVbW2tha6XC2/Chgq6dUN/r2gkgGpomaRI+oWuaFmkRtqFkmq5R21QkIIEyVNrPzc6M0BNaNmkRtqFrmhZpEj6hbNVstEbaGkHTb497BK9gExxqskXSVJPXr06NiCOKA+2l2z3bt3p2bR2dqs2w1rtlu3btQsOlu7apbjLBLAOS2SVMtHHx+XtHsIYecQwuaSTpZ0V32GBTQENYscUbfIDTWL3FCzSFKH31GLMa4JIZwp6Q+Suku6Lsb4TN1GBtQZNYscUbfIDTWL3FCzSFWH2/N3RI8ePSJdH9FRy5cv15o1a5r6ofDu3btHuj6io1asWKG1a9c2tWa7desW6fqIWqxatWpmjHF0s7bXvXv3SNdH1GLFihVNrVlp/TktXR/RUa2trYXOafkCCAAAAABIDBM1AAAAAEhMw9vzIz3duvnzc+9jsGvXri18feDvvLbFXi2tXLnSZF4d9ujhH6r4uBQA1Ed72s03c9kM0JVxxg0AAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJoetjnRXtiNiejkneZb3uTN7lvOzdd991t9O9e3eTed32inb0a08HKaTPezyrPcbvv/++yfr372+yfv36mczrBLlq1Sp3O0WfGwDQVRU9L1m3bp2be+cG3mW9Y2+12wRQDO+oAQAAAEBimKgBAAAAQGKYqAEAAABAYpioAQAAAEBiaCZSkLcY12tk4C2c9S7nNd/wGndI/kJe7/pe1qtXL5N5DRwkqU+fPibbaqutTLZ8+XKTtba2msxrAOGNEXko2kRGkrbZZhuTnX766SY74IADTPb000+b7Oqrr3a389JLL5nMew4WXUwPtKVos5pqx/OePXsWuux7771nsmpNddA1FW0q5h0TveZO1Y6T3nnAsmXLTObVp9e8rD3N1JCv9jzONAGrjrMXAAAAAEgMEzUAAAAASAwTNQAAAABITE1r1EIIL0tqlbRW0poY4+h6DApoJOoWuaFmkRtqFrmhZpGiejQTOSzG+EYdbqfpvMWL1RbTeg09vIWSq1evLrTtQYMGFcokadtttzWZt0B3++23N9khhxxisj322MPdzg477GCy999/32R/+ctfTHbDDTeYbO7cuSZbunSpu+1OkG3d1pvXyMCrba8W9t57b/c2v/nNb5rsiCOOMNmaNWtMVu154Ln88stNtnjx4kLXzXBBOzWbAO81wmvW0LdvX/f6e+65p8m8Y/ef//znQtvxXgsSWphPzTaZd5z2GticdtppJjv66KPd2/SaifzgBz8w2bRp00zmNcVJ/NhLzW6gaCOu9jQb8845ijbN8y7nHQOl4k0AU8dHHwEAAAAgMbVO1KKke0MIM0MIE+sxIKAJqFvkhppFbqhZ5IaaRXJq/ejjwTHGhSGEbSXdF0J4LsY4dcMLVIp9YuXnGjcH1MUm65aaRYKoWeSGmkVuOKdFcmp6Ry3GuLDy92JJt0va37nMVTHG0THG0XzpLFLQVt1uWLMciJGC9tRsZ4wP2BjHWeSGc1qkqMPvqIUQ+krqFmNsrfx8pCS7ujQRRV8IttlmGzf3Gm3svvvuJttll11MtvPOO5vMaxBSbYzeonTvsgMGDDDZwIEDTeYtLJb8RZZvvvmmybyGKa+//rrJEmoc8v/LrW5rUfRFZNWqVSbr3bu3yT72sY+Z7JxzznFvc+TIkSbzFrl7i4C9eh8zZoy7nenTp5vsoYceMllra6vJcllUnELNbrbZZibr37+/ybzj0sqVK01WremS11wmNV5DD++5tu+++7rX//a3v20y7zh92WWXmeyOO+4wmbe43htjM6VQs12V91z953/+Z5OdccYZJhs2bJh7my0tLSbzGkJ4ryUcZ9NUtNGG19DDy7zzSq8JjVS9zja2fPlyk/Xq1ctkXn1Kfj16rz3e61ZKdVvLRx8HS7q9soM9JP0mxvj/6jIqoHGoW+SGmkVuqFnkhppFkjo8UYsxzpP0kTqOBWg46ha5oWaRG2oWuaFmkSo+YAsAAAAAiWGiBgAAAACJqbU9fzb69OljMq9hwhe/+EX3+mPHjjWZt1DSW2DrLZD3FoBvvvnm7ra9xZMrVqwwWb9+/UzmNVHwGoRI0oIFC0x2//33m+zXv/61yRYtWuTe5sboktQ83mJYr6GHV4tHHXWUyc4880yT7bHHHu62vWYG3sJgr469xfDe81eSjjvuOJM9/PDDJnvvvfdM5j1Xu1L3uWr76tWD1/xo7733NtlLL71kMu9x97Yh+YvUU1rULflj9Orde65J/v542XPPPWcyr/mO93zp7GYiqK/2HJd22mknk5100kkm857T1RqAPfLIIyZ74IEHTFb0udGVjrOp8h4r7/jiHatHjBhhshNPPNFk3nmz5J/rvvPOO+5lN+ad5z7xxBPuZW+//XaTPfXUUyZbtmxZoW13Fs6aAQAAACAxTNQAAAAAIDFM1AAAAAAgMUzUAAAAACAxXaaZSC3fri5J/fv3N5m3iNtbOO8tCve2s8MOO7jb9i7rNWFYuHChyf72t7+ZzGsaIkl/+ctfTPb888+b7PXXXzeZ1ySExiGdy1vE7S3iPfbYY032rW99y2RefVZrWuDVzdSpU0329ttvF9rOnnvu6W5n1KhRJvvEJz5hsltvvdVkq1evNlm1Jhdl5B2/JL9xy4QJE0w2btw4k3lNB6688kqTzZ8/v8gQk+Qd17zXkmrH2ZaWFpN5dedd32vC4DWrQr68+qp2XPKa0Hz84x83mXec9Bosea/tkjR58mSTeecgNA7pXF6dVGvG5DUO8c41P/WpT5nMa7r34Q9/2GRLlixxt+2dE7/44osm23XXXU02ZswYkx144IHudrxz4nnz5pnMO2/3jumdhTNpAAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASEyX6frodbjxuhZdc8017vXff/99k+2yyy4me+GFF0zmdVLabbfdTPbGG2+4237llVdMNmvWLJN5HR5fffVVk22xxRbudrz7qEcPWyJehuYo2nFO8rsajh8/3mRf//rXTbbTTjuZzOvo9fjjj7vbvv3220322GOPmWzp0qUm22677Ux23HHHudsZOXKkybwulnfeeafJvHr37t+ydi2r9jweOHCgyYYMGWIy7zjidTT0jn/V7tMcusZ54/E6q3nPIUnafffdTeZ1ePS68nld3VLqTobGqNbZc/jw4SY76aSTTLb11lubzOvI53XHlaRp06aZzKv51J6rqN71cfvttzfZKaecYrJTTz3VZF5n4D//+c8mu/vuu91tP/HEEybbaqutTHb88cebzHsuVOsw3qtXL5N55/2pH1d5Rw0AAAAAEsNEDQAAAAASw0QNAAAAABLT5kQthHBdCGFxCOHpDbKBIYT7QghzK38PaOwwgfahbpEbaha5oWaRG2oWuSnSFWKSpCsl3bhBdq6k+2OMF4cQzq38+5z6D6+xvAWIL7/8snvZOXPmmKx///4m8xYgbrnllibr3bu3yebNm+du+3e/+53JvAYjRRefe00Uql222kLUDExSSet2Y16jG0n6xCc+YbIzzjjDZHvuuafJvFr64x//aLKf/OQn7rZffPFFk3nPDa++WltbTTZ16lR3O+PGjTPZhz/8YZN5jR1mz55tMm+h8mabbeZuuwEmqYk1W23hv/eYeI1Hli9fbjLvOOkdZ71jzabGlBLv/vEW148dO9a9/jbbbGMyb3H9qlWrTOY91737rIn34yR1keNsrYo+Jl4TqGqNf0477TSTHXjggSbzjr3e+cZtt93mbmfZsmUm857DOTx/VeKa9V6/qjU1Ou+880x2+OGHm8yrPa/pzI033mgy7/VA8pt8HHXUUSbzXst79uxpMq+JnyTdcsstJvPOL1LX5jtqMcapkt7aKB4n6YbKzzdIsm3WgE5E3SI31CxyQ80iN9QsctPRNWqDY4x/78P8uqTBdRoP0EjULXJDzSI31CxyQ80iWTV/IVaMMYYQqn4+LoQwUdLEys+1bg6oi03VLTWLFFGzyA01i9xwTovUdPQdtUUhhCGSVPl7cbULxhivijGOjjGOrvaldECTFKrbDWuWAzE6WbtrtqmjAyyOs8gN57RIVkffUbtL0gRJF1f+vrNuI0rU66+/bjKvyYC3+NFrOuItSJ81a5a77UWLFpnMW3DsaWIjhBxkX7fe4z54sP8pja997Wsm8+pz3bp1Jrv99ttNduGFF5psyZIl7rZraUzj1ezMmTPdyy5cuNBkhxxyiMk+85nPmOySSy4x2ZtvvlloPE3U9Jr1Gh15C7CnT59uMu/+22677UzmHdMkv4FGaifxXmOG3XbbzWRHHnmke31vcb73+uLxatFrINDJsj/ONot3TPQyr9GC5B/XvHp45513THbllVearFpThmrNTEok6Zr1Xk+9x9l7nE4//XT3Ng877DCTecehyZMnm+y3v/2tybxjunduIUn77LOPybwa33XXXU3m1fK1117rbsdrDui9niR4DP2AIu35b5L0qKQ9QggLQghf0vpiPiKEMFfS2Mq/gWRQt8gNNYvcULPIDTWL3LT5a5IY4/gq/2X7eAKJoG6RG2oWuaFmkRtqFrnhA7YAAAAAkBgmagAAAACQmNKvEN0Ub1FhtYYHs2fPNtnee+9tstGjbdO1bbfd1mTe4tBqTQv69u1rsrfe2vj7Gv1F7ptvvrnJUlucLxVrNFG0GUWOitZiv379THbeeee5tzlmzJhC27nrrrtMdsUVV5hs2bJlhW5Pqu2x8ur4vffecy/7yiuvmMxruOI9L73nRlm7eHn7Va1BgHe82nrrrU322muvmWzo0KEm8x6PV1991d32u+++a7JevXqZrJbHqdaa9fZnzz33NNkWW2zhXv+NN94w2Ysvvmgy7/HxXjc83nMI6fGaGAwYMMBkJ598snt977n6/vvvm+yxxx4z2R//+EeTec8/SerZs6fJUjyPKCuvKYf3+uU1Dhk7dqx7my+99JLJfvOb35jMayy2dOlSk3nHK+91Q5KOPvpok40YMcJk3jnHr371K5M9+OCD7na884aix9CUlPOsBAAAAAAyxkQNAAAAABLDRA0AAAAAEsNEDQAAAAAS06WbiXiqLTRcvny5ybxFjTfccIPJzjjjDJOdcsopJjvzzDPdbR900EEm874t/oknnjCZ903zvXv3drfTmc06vOYAG4+nzIuXvfveW0B88MEHm+zQQw91b9Or5UcffdRk3/3ud03mNXvozAXl3gJ5Sdpyyy1N5tW3dzlv7GVtJuIt9PbqS/IXenvNWBYuXGgyb/H4ihUrTLZq1Sp32/379zeZ93gOGzbMZF6TDu9xX7Jkibtt7znoLZofPHiwybz7zFvsL0lTp0412W233WYyr+a9+qz2OCItRRtGHX/88SY77LDD3Nv06mHBggUmu/LKK03m1XafPn3c7VBjncurE+/4sN9++5nMO3eV/HOB3/72tybzGnJ4deI1wTn22GPdbZ9wwgkm85oveU1CbrzxRpNVa4JTltfzcuwFAAAAAJQIEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMzUQ2Uq05grdYceXKlSbzFnheddVVJmttbTXZxIkT3W2feOKJJvMW919zzTUmu+eee0w2f/58dztr1qwxmdeEoBGKLFbuzGYnjebtm9e845Of/KTJhgwZ4t7mokWLTPaTn/zEZG+//bbJvEYkjWgc4tWXd1/stttu7vV33nlnk3mNKt58802Tefu4du1adzu5857b1RoneQ1BNttsM5MNGjTIZN5jN3z4cJN5xy/JP36edNJJJvv4xz9uMq9pifccWL16tbvtp556ymTz5s0z2b777msy73np3WeSNHfuXJN5+120wRA6V7Xn0ca8x+7AAw802amnnmoyr1GDJL311lsm8xqNzZw502TtOf6VuZFXrnbZZReTecfaag01vMd64MCBJvOaInnNnLymZuPGjXO37R0bZ8+ebbIpU6aYbNmyZYVuTyrP8ZJ31AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMW1O1EII14UQFocQnt4guyCEsDCE8GTlz9GNHSZQHDWLHFG3yA01i9xQs8hNkZZ+kyRdKenGjfL/jDFeWvcRJcrreuR1TfI67Hhdam666SaTeZ2/JOm0004z2R577GGyCRMmmGzUqFEm+8Y3vuFuZ+HChSbr37+/yRrRAarIbbZju5NUgpr1ujmOGTPGZF5HP0m69957TfbYY4+ZzOuM1Kxun97YvQ6r++yzj3v9rbbaymReVz+vo5/3fKvWIatJJqlBdet1EOzTp497Wa/Ll1cjXqctL+vdu7fJvG6d1S47YsQIk3lj944PLS0thTLJP57vuuuuJtt///1NtsUWW5jM6+wrSXPmzDGZV7Pe45CgSSrBsbYW3nPDO65tv/32JjvjjDNM5r22Vzs3mDp1qsluvvlmk3nPf++5Wm07JTNJmdWs93rsHXO8TuRed17Jf03t16+fybwuwEOHDi00nmrHsOXLl5vM6/r4zDPPmMw79lfr7thluj7GGKdKsj1ggURRs8gRdYvcULPIDTWL3NTy6+MzQwhPVd5G9r/kQ1IIYWIIYUYIYUZZZrfIVrtrtszf3YZstFm3G9ZsswcHONpVsxxnkQDOaZGkjk7UfiFpV0mjJLVIuqzaBWOMV8UYR8cYR3fyx4rQtXWoZvmiT3SyQnW7Yc02c3CAo901y3EWnYxzWiSrQ1UWY1wUY1wbY1wn6WpJ9kP7QEKoWeSIukVuqFnkhppFyjrUMSCEMCTG+PcV2cdJenpTl+9KvN8Metmbb75psmuuuca9TW9R+umnn26ygQMHmsxbMPqDH/zA3c4ll1xistdff929bG5Sr1nvoz9e04IBA+wnMlasWOHe5rRp00zmLRb3tt2I33B7C+y9be+1114m++IXv+je5nbbbWeyv/71rya75ZZbTOY9B70F9p2pXnXrPe7vvPOOe9mHH3640PV33HFHk3mNk1555ZVCmeQ3cznvvPNMtnTpUvf6G1u1apXJvAX3kr9I3Wvo4zU38RbHV2taMn/+fJN5i+6b9byst9SPtfXmffzNezy9Jl6f+tSnTOY9xi+//LK77SuvvNJkL730ksm8JhHVmlB1RanX7HvvvWey5557zmSTJk0y2dixY93bHDZsmMkOOOAAk/Xs2dNkXj16xzWvMY7kN416/vnnTfbaa6+ZrDObn3WWNvcuhHCTpEMlDQohLJD0fyUdGkIYJSlKelnSVxo4RqBdqFnkiLpFbqhZ5IaaRW7anKjFGMc78bUNGAtQF9QsckTdIjfULHJDzSI3rIQEAAAAgMQwUQMAAACAxJR7BV4ivEXhvXr1Mlm1RgbeAntvIbC34NTbttcEoNplPd5iZ74Hpzbdu3c3mdcow6ubat/lsnbtWpMVrSVPe1oRe7XoNWvwGjN85zvfMdlBBx3kbsdrdnPttfZTLN5C5aINHMrAu++9RhuSdP3115vsjjvuMNn/+T//x2SDBg0ymbfI/O2333a37TUj8cbpZV69t+d7jrxte01U5syZY7JPfvKTJqvWtMRr/uMd+71tIz3vvvuuybznxpFHHmkyr1HD4sWLTfab3/zG3fajjz5qMhqHlI93fuDV3d13322yBx980L1Nr/nc1ltvbbJXX33VZF6NfuQjHzHZ97//fXfbe+65p8m81wSvbr3zT+/YXya8owYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiaGZSJ15DRdWr15tMm/Rvdc8QvK/WX7IkCEm8xbOe4vUBwwY4G7Ha67gXd/bH2+BJ3zefeU1e/AW+3qLZpcsWeJux2u04TUo8BpoeHXsXbdas4Zhw4aZ7NRTTzXZySefbLKdd97ZZNUWC0+ZMsVk9957r8m8xg5ebZe1jr3nbLXHzmvU4S1mf/jhh03mNTLw6strqCFJy5cvLzSeRjR98fbRazDiNYDwrustuJf8sdM4JC3e41mtIYd3rPSaH3mv795zcNasWSa75ppr3G17xysah3QN3mPvHVerHVu8ZiQLFiwwmdcYzKvbuXPnutvxeK/nXtMS73lY9sYhHt5RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDE0EykIG9RY9FvSPcaf0yYMMFk48aNc7c9fPjwAiOUli5dajJvcX61RZ/e9b39KWvDhWbxmlh4DQa23HJLk3kLg6s1Zii6eL1v377u9TfmLZrfe++93cueddZZJjvssMNM5jWw8Zqg3HPPPe52fvjDH5rMawDRo4c91HX1OvbqsBpv4bnXoMS774s2sJGqNzhpBm/bXmMG73jsPVe9JiiSf0xl0XxavMfdew5I0i677GKyY445xmReox3v2O0d66pt22tCVe2yKD/vuOo1A9lUXkT//v1N5jVP8mpeKt64yXuN9q5bdryjBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJaXOiFkLYIYTwYAhhdgjhmRDCWZV8YAjhvhDC3MrfAxo/XKBt1CxyQ80iR9QtckPNIjdFuj6ukfTNGOOsEMIWkmaGEO6TdJqk+2OMF4cQzpV0rqRzGjfU6p3CivA6enmZ19muWt6zZ0+TDR482GTf/va3TXbiiSeazOtMJ/mdoebNm2eyp556ymReh8fp06e721m5cqXJOrMLWw2SqVmPVzfe/ex1VurVq5fJtt12W3c7RxxxhMkWLlxoMq9L2M4772yyo446ymRjx451tz106FCTeXU8c+ZMk91xxx0mu/XWW93ttLa2msx7rmbQ4THpmvV4NZtzxznv9WXQoEEm23HHHU1WtAOw5Hct8zpjVnstSkx2dVuE9xhV6zb3yU9+0mReJ0ivq/Jf//pXk917770m8zqsStXPGbBJpazZZvKOV95rsdfJUfK7kXtdI73OuV634rJ3gmzzlSDG2BJjnFX5uVXSs5KGShon6YbKxW6QdGyjBgm0BzWL3FCzyBF1i9xQs8hNu34dE0IYLmkfSdMlDY4xtlT+63VJ9q2k9deZKGli5eeOjhPoEGoWuaFmkaP21i01i87GsRY5KPzZihBCP0lTJH0jxviB9zPj+s+MuJ9LjDFeFWMcHWMcnclHOVAS9ahZDsRopnrUbBOGCXxAR+qW4yw6E+e0yEWhKgshbKb1BT05xnhbJV4UQhhS+f8hkuwHTIFOQs0iN9QsckTdIjfULHLS5kcfw/pfdV0r6dkY4483+K+7JE2QdHHl7zsbMsINeL+98BaA9+nTp1DWu3fvQtuQ/OYKo0aNMpnXXGG//fZzb3NjS5YscfMZM2aY7OabbzbZ1KlTTbZmzRqTVVvw7+XeYuXUf/uZUs16vCYD3n366quvmsxbVL7FFlu42znwwANN9uEPf9hkXhOZ3XbbzWQDBtgmWF59SdIzzzxjsr/85S8mu+KKK0zm7Xe1pjbe/ZZ6fXpSr9muylsg//jjj5tsxIgRJvMWx0v+a1auv5kva916x5Bqx7rhw4cXuk2vHu677z6TvfTSSybbaqut3Nv0mjpg08pas83knYd4xzWvgZjkNwnxzj+98/auWPNF1qj9o6RTJP0thPBkJTtP64v51hDClyTNl/SZxgwRaDdqFrmhZpEj6ha5oWaRlTYnajHGaZKq/Yr68PoOB6gdNYvcULPIEXWL3FCzyE2en7cAAAAAgBJjogYAAAAAien0r7X3FiBW07NnT5MNGjTIZAcddJDJjjnmGJMNHmy/JsP71nNJ2nLLLU3mNVfwsrfffttkc+bMMdn//M//uNueMmWKybzGI17jD68JQ7VF0dX2HfXlLaT1GnpMnjzZZF7Tgn333dfdzrBhw0w2dOhQk3mLczfffHOTLVu2zGRegxBJ+tGPfmSyF1980WReswavsUK1BiE5Ng5BPrxF888//7zJvEXz3nUlqVevXrUPDA3lvW56DcUk6R//8R9N5jUqe+ONN0w2bdo0k3mv49UaKFRrsgQ0kvca3bdvX5N558OSfwwcOXKkyR566CGTeecmXoO2MuEdNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABITKc3E/F4C3ElacyYMSb79Kc/bTKvmYi3EPi9994zWbUF4N5iRa8RQktLi8n++7//22S33HKLyRYuXOhu2xundx9VaxKCtHgNdLxGOQsWLDDZFVdcYTKvUY4kHXrooSbzFqq/9dZbJvMaf9x4440mq9ZMxHu+eAuQu3fv7l4fSIG3cH3evHkm8+p9u+22c2+zPQ200Dm8Y9UBBxzgXtY7t/COa6+99prJvFrymnp15aYhNIxKj1ejW2+9deHre81E/uEf/sFkTzzxhMneeeedwtspC95RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEdHozEW+haLVvGfcadUyfPt1kS5cuNZm3UNFbEOk1VpCk2bNnm2zWrFkmmzt3rsm8JiFe05JqC4a9cbLAtly8xevvvvuuyR555BGTVWvo4TUo8RrOeAvfvQY2K1euLHRdyW9aAuTGey16+eWXTbZ48eLCt+k9j7zjOU1HOk/R5mGS9PTTT5usX79+JvOainkNRvr06WOyrvx6z/MgD97j9Oqrr7qX3WWXXUy2/fbbm8x7Lrz99tsmK/vzg3fUAAAAACAxTNQAAAAAIDFM1AAAAAAgMW1O1EIIO4QQHgwhzA4hPBNCOKuSXxBCWBhCeLLy5+jGDxdoGzWL3FCzyPImTC4AAAcaSURBVA01ixxRt8hNkRX/ayR9M8Y4K4SwhaSZIYT7Kv/3nzHGS+s9KK/hgeQ36vAWcd95550m8xYHe40QVqxY4W7ba/7hLWD0Gn94jSJottBQTa/ZRii6QLbaYuv333+/0PW92vaeL15zEtRNKWq2bLzXCK/h1G9+8xuTec2AJP+55b1GVGuqlZDS1qx3TPUal0nShAkTTOadw3hNZGgU1ilKW7edyTtezZgxw73sXnvtZTKvmV7//v1N5p3zV5szlEWbs4UYY4uklsrPrSGEZyUNbfTAgI6iZpEbaha5oWaRI+oWuWnXGrUQwnBJ+0j6+6+WzgwhPBVCuC6EMKDOYwNqRs0iN9QsckPNIkfULXJQeKIWQugnaYqkb8QYl0n6haRdJY3S+t9OXFblehNDCDNCCDOqfU8Y0Aj1qFm+wwXNVI+abdpgAXGcRZ44p0UuCk3UQgibaX1BT44x3iZJMcZFMca1McZ1kq6WtL933RjjVTHG0THG0d7n8IFGqFfNsl4AzVKvmm3eiNHVcZxFjjinRU6KdH0Mkq6V9GyM8ccb5EM2uNhxkp6u//CA9qNmkRtqFrmhZpEj6ha5CW195CCEcLCkhyX9TdLf3+c9T9J4rX+LOEp6WdJXKos0q+rRo0fs169fjUP+IK9rktepy+Pte3s6bZW904zkdyPzePdbvX9Lunz5cq1Zs6bNG61nzXbv3j327du3lmGjC1uxYoXWrl3b1Jrt1q1b7NWrVy3DxiZ4x7oBA+xyllWrVrnX97qsFtWsd55WrVo1s613Z+t9nO3du3etw24o71xD8js4ex+J884XvMvxMdCOWbFiRZs1K9X/nHaLLbaoZdhZ8s4LvVrefvvt3evvt99+Jps/f77JZs+ebTLv+JtBh1xXa2troXPaIl0fp0nybuh/OjIwoNGoWeSGmkVuqFnkiLpFbviALQAAAAAkhokaAAAAACSGiRoAAAAAJKbNNWqpe//99wtl6BhvAbS32LloAxcAyJl3rGttba3pNmlPn75q5xWcb6Cr8RqHeA09Fi1a5F5/2rRpJluxYkWh28y1cUgteEcNAAAAABLDRA0AAAAAEsNEDQAAAAASw0QNAAAAABITvMYQDdtYCEsk/f3rxwdJeqNpG2+sMu2LlO7+7BRj3KaZG6Rms5Hq/lCz9VOmfZHS3p+m1m2Ja1Yq1/6kvC+deaxN+X7piDLtT8r7UqhmmzpR+8CGQ5gRYxzdKRuvszLti1S+/amXMt0vZdoXqXz7Uy9lul/KtC9S+fanXsp2v5Rpf8q0L/VUtvulTPtThn3ho48AAAAAkBgmagAAAACQmM6cqF3ViduutzLti1S+/amXMt0vZdoXqXz7Uy9lul/KtC9S+fanXsp2v5Rpf8q0L/VUtvulTPuT/b502ho1AAAAAICPjz4CAAAAQGKaPlELIRwVQpgTQnghhHBus7dfqxDCdSGExSGEpzfIBoYQ7gshzK38PaAzx1hUCGGHEMKDIYTZIYRnQghnVfIs96dRqNl0ULPFULPpoGaLy7luy1SzEnVbVM41K5Wrbstas02dqIUQukv6maRPShohaXwIYUQzx1AHkyQdtVF2rqT7Y4y7S7q/8u8crJH0zRjjCEljJH218njkuj91R80mh5ptAzWbHGq2gBLU7SSVp2Yl6rZNJahZqVx1W8qabfY7avtLeiHGOC/GuFrSzZLGNXkMNYkxTpX01kbxOEk3VH6+QdKxTR1UB8UYW2KMsyo/t0p6VtJQZbo/DULNJoSaLYSaTQg1W1jWdVummpWo24KyrlmpXHVb1ppt9kRtqKRXN/j3gkqWu8ExxpbKz69LGtyZg+mIEMJwSftImq4S7E8dUbOJomaromYTRc1uUhnrthSPMXVbVRlrVirBY1ymmqWZSJ3F9W00s2qlGULoJ2mKpG/EGJdt+H857g/aJ8fHmJrt2nJ8jKnZri3Xx5i67dpyfIzLVrPNnqgtlLTDBv8eVslytyiEMESSKn8v7uTxFBZC2EzrC3pyjPG2Spzt/jQANZsYarZN1GxiqNlCyli3WT/G1G2bylizUsaPcRlrttkTtccl7R5C2DmEsLmkkyXd1eQxNMJdkiZUfp4g6c5OHEthIYQg6VpJz8YYf7zBf2W5Pw1CzSaEmi2Emk0INVtYGes228eYui2kjDUrZfoYl7ZmY4xN/SPpaEnPS3pR0neavf06jP8mSS2S3tf6zyN/SdLWWt9JZq6kP0oa2NnjLLgvB2v9W8BPSXqy8ufoXPengfcTNZvIH2q28P1EzSbyh5pt132Vbd2WqWYr+0PdFrufsq3ZyvhLU7dlrdlQ2TkAAAAAQCJoJgIAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJ+f8A/hEPExCPZKIAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# Browse examples\n", + "fig, axes = plt.subplots(2, 5)\n", + "\n", + "for index in range(5):\n", + " sample = get_sample(index)\n", + " decoded = model(sample[None])[0]\n", + " show_image(axes[0, index], sample)\n", + " show_image(axes[1, index], decoded)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAACzCAYAAAD48u9xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAH65JREFUeJzt3W+MXNWZ5/Hf0+3+43bbYLuxcRhnbcAJAgIhsQBlE5LNLAsbJWFGCpsh0oZIIxlFgwgKLwbNvtjRvhqtZjJvdjMRGwgZ5M0oJDNKMqBljWEJCIRsSAIGu92OgzGOwcZg2m2723/67AtXUNvnufStqlvV59z+fiTL7ce36p5b9auqe7rqPGUhBAEAAAAA0tEz1wMAAAAAAJyNiRoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJKatiZqZ3Wxmo2a2y8zurWpQQKeQWeSI3CI3ZBa5IbNIkYUQWrugWa+knZJulPSGpC2SbgshvPoBlwlm1tL+gBCCQggtB6iVzPb29obe3t5Wd5mcso+/stv19Pi/6/GeV7zbcXp6uuVaDk6fPq3Tp0+39aTXbG57e3tDX19fO7vEPDc1NfV2COGCVi/fbGbNLBQ9lwBlTE9PdzWzjctwTouWlT2nXdDGPq6VtCuEsFuSzOyfJN0i6YNCrcHBwTZ2iflscnKy3atoOrO9vb268MIL291vMrzJkneC5NW8yw4MDLj7OXXqVFRbsmRJVDt+/HhUO3r0aFQruu9Tf5F88803q7iapnLb19enNWvWVLFfzFOjo6N72ryKpjLb09OjoaGhNneJ+WxiYqKrmW1sU/gaCMxmamqq1Hbt/ArrIkl7Z/z7jUYNSBWZRY7ILXJDZpEbMosktfOOWilmtkHShsbPnd4d0LaZma3Txx5RXzMzu2BBx5/WgbZxboAczcwt0A3tvKO2T9LqGf/+o0btLCGE+0II60MI69vYF1CFpjPLugkkYNbczswsv1xAAprKLBM1JKDp8wNyi25o51evWyStM7O1OhPmP5P0tUpGBXTGvM+s925L2fVk3nb9/f2l93348OFS1+mtR5vnL4jzJrdl7+dWm2Cha+ZNZlEbZBZJanmiFkI4ZWZ3SnpMUq+kB0IIr1Q2MqBiZBY5IrfIDZlFbsgsUtXWYoYQwqOSHq1oLEDHkVnkiNwiN2QWuSGzSBELcAAAAAAgMUzUAAAAACAx9HEGEuB1l+zr6yt1Wa8hhySdPn06qk1PT0c1r8HIxMREVPO6CZ44ccLdt1f3xuOZ541Dase7P4uagXjNabx8elnyvnjW23fR48XLvLcfumoCALqFd9QAAAAAIDFM1AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMXR97AKv81gzndA8zWyL1plZ1JGxmdve6+Y4ODgY1VauXBnVpqamoprXme748ePuvr1xel31vC543hi97ntFHfSKukFi/mnm8eJ1Ol23bl1UW7x4cVQbGRmJah/5yEei2q5du9x9v/jii1Ht9ddfj2pFmQc+SNlutt5rhtetV/K7knqX9x6DdNcF8sA7agAAAACQGCZqAAAAAJAYJmoAAAAAkBgmagAAAACQGJqJlOQtvPUW7ZZdoOstDm63QUhvb29Ua3dhMuL7dGBgINpmyZIl7mWXLVsW1W688cao9uUvfzmqPfnkk1Ht+9//flSbnJx09+3lwWvyUTbbzfD2Tb4wm6VLl0a1b3/721Ht0KFDUW3Lli1R7Rvf+EZUe/nll91979ixo9R4Dh486F4e85P3euo1nPGeU73XEm+7oufjoaGhqOY1ffJqXhOqoteSonMG4A/KNrGRyjfBwRncMgAAAACQGCZqAAAAAJAYJmoAAAAAkJi21qiZ2WuSjkg6LelUCGF9FYMCOoncIjdkFrkhs8gNmUWKqmgm8u9CCG9XcD1J8xoueA0TvEW3ZRdZFi3YLbvIsq+vL6qVXdQszbtmD6Vyu2DBAo2MjJxVu+2226LtvvjFL7qXX758eVQbHh6Oat6i8qeeeiqqebnp7+93933s2LGoVnZR+Pj4eFRrpkEIi887ovbPteedd15UO3z4cFTbunVrVNu8eXNU27ZtW1R75JFH3H17jRQuvfRSd1uUllRmy74Wl329l6Spqamodv7550c1r7GUl/dPfOITUe3KK690971q1aqoduDAgajmnRs89NBDUW3nzp3ufo4ePRrVany+kFRm51pR7s/lZcx7HEh+c5s9e/ZEtXYa9tUJH30EAAAAgMS0O1ELkv6vmb1gZhuqGBDQBeQWuSGzyA2ZRW7ILJLT7kcfPx1C2GdmKyRtMrMdIYRfztygEfYNjZ/b3B1QiQ/M7czMFn2sEOiy0pn1PlYCzIHSmeXcAIlo6pwW6Ia23lELIexr/H1A0r9IutbZ5r4QwnoWZSIVs+V2ZmY56UUKmsls2TUFQCc1k1kmakhBs+e05Bbd0PJZqJktktQTQjjS+Pk/SPpvlY0sA2UX0548eTKqec0W2n3QnzhxotR2Rd8WPx80m9sQQnS7eg1a9u3b517+pZdeimpPPPFEVPMmhC+++GJUO3ToUKnLSn7Gyt73XrZrvHg8afPpuXbHjh1R7a677opqXsMEb4L6m9/8Jqp5jREkacWKFWWGiBJSzaz3/Fe24daaNWvc61y9enVUu+qqq6Ka93y8f//+qOY1ZShqzuSN/bLLLotq119/fVTzzku+973vufvxxln0OMpVqpntlHaa3C1cuDCqXXzxxVHtzjvvdPftXf6ee+6Jat75znw8f23n7YKVkv6lMblYIOl/hxD+TyWjAjqH3CI3ZBa5IbPIDZlFklqeqIUQdku6usKxAB1HbpEbMovckFnkhswiVbTnBwAAAIDEMFEDAAAAgMTQ0q4NZRc1ett1oltQ0YJjtO7kyZNR44Jf/OIX0XYbN250L+8tuD5+/HhU874GYHJyMqoNDAxEtaImMvNx0S3y5jVS8Jp8eLXDhw9Htampqai2cuXKFkeH3HnNNy655JKoduutt0a16667zr3OxYsXR7Vnn302qm3evDmqvfLKK1HNOzfwGj9I/mu+99j40pe+FNVefvnlqHbs2DF3P6gf77lxaGgoqn34wx+Oat5j5pvf/GZUu/nmm919e02jvHMg73zHOy+qO95RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDE0EykJBozzE9mFjU42LNnT7Sd1yDkD5c/18KFC6OatyjcW1xLwxjUWW9vb1T71Kc+FdVGR0ejmtdUx1swj/nBy9KSJUui2t133x3VvvrVr0a1hx9+2N2PV//Vr34V1bzXCK+5iXeu4W0n+cfoNVt46KGHoprX6KqoMVVRHekralznnYd89KMfjWpXXHFFVBseHo5q4+PjUW3r1q3uvp9++umo9tZbb0U1L/edaMSXOt5RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEMFEDAAAAgMTQ9bGkOnV9bLdrTp1ui6p4HZSKcPvlpZnHS0/P2b/7mo8dqtqxZs2aUtstXrw4qr377rsVjwY58zrTebn58Y9/HNU2btwY1Xbs2OHup2w3R4/3/ODVvA7ARbyuj173U6+DMK9NefPuv8HBQXfbkZGRqHbppZdGtYmJiaj2/PPPR7Wf/OQnpcYj+c/VJ0+ejGoDAwPu5ecb3lEDAAAAgMQwUQMAAACAxDBRAwAAAIDEzDpRM7MHzOyAmW2bUVtmZpvMbKzx99LODhNoDrlFbsgsckNmkRsyi9zYbItHzewGSROS/jGEcGWj9t8lvRNC+Bszu1fS0hDCX862s56enlC0sBHdk2szkcnJSU1PT5cafFW57e/vDxdeeGG7Q0fGih4v3nPZ0qVnv75v27ZNExMTXc3s4OBgKNuUIzXebfqhD30oqu3ZsyeqnT59uiNjmo9GR0dfCCGsn227qjLb29sbhoaGqhj6+85t7NMYm7fvUjWv2YHk567q10hvPJJ/jN54vMYhdTMxMdHVzEpnzmlTanjh5aEZixYtimpenrzHgpfRotx59bINeOpkamqq1DntrPdqCOGXkt45p3yLpB82fv6hpD9peoRAB5Fb5IbMIjdkFrkhs8hNq9PvlSGE/Y2f35S0sqLxAJ1EbpEbMovckFnkhswiWW1/j1oIIZhZ4fv8ZrZB0obGz+3uDqjEB+V2ZmaLPnICdFvZzC5YwNdjIg1lM8u5AVLRzDkt0A2tvqP2lpmtkqTG3weKNgwh3BdCWF/ms8NAh5XK7czMtvuZb6BNTWeWXy5gjjWdWSZqmGMtndOSW3RDq796/bmk2yX9TePvn1U2IqBzyC3eV7bZwHnnnedeftmyZVHt3MYzY2NjLY7uffMqs5OTk1Ft9+7dczAStCGJzJZtoJFaExrveamoCVu7TR3wviQy2y7vfi66773XuiNHjrS876JmO2hfmfb8P5L0nKSPmtkbZvbnOhPmG81sTNK/b/wbSAa5RW7ILHJDZpEbMovczPqOWgjhtoL/+uOKxwJUhtwiN2QWuSGzyA2ZRW5YgAMAAAAAiWGiBgAAAACJoY9zjXiLQ71aUVc4r+4tEE1t8TV83v05MDAQ1byMeAuQvUYPIRR2MU6ed4xLly6NaiMjI+7lh4eHo9rbb7991r9ZYI0q9fX1RTXv6xiKGgicOHEiquX8GK6jRYsWRbWhoSF321OnTkW19957r/IxIV85d672Xj+956uir6TxzoFyfL7L9x4EAAAAgJpiogYAAAAAiWGiBgAAAACJYaIGAAAAAImhmUiNeIskvYXJF110kXv5cxshSNL4+HhUo5nI3CnbIETyG2Ocd955pfZz8ODBqOYtSvYajEh5ZMRruLB69eqotnbtWvfyY2NjUe13v/vdWf/2mjcArVqyZElU85oBFT3+vOYTOTxW68B77vaaIKxbty6qFTWEOHz4cFQ7fvx4VPPud8xvXh69nPX390e1snnynpsk/7zBeywMDg5GtWXLlkW1Y8eOufvxHgtTU1PutinjHTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAzNRDLgLfr0GiF4CzyvvvrqqHbzzTe7+3nwwQej2rvvvltihOgWb8Ht4sWL3W2Hh4ej2nXXXVdqu+3bt0e1559/PqoVNTIpWtybEq+JwooVK6Kat3hZ8hcln7t42XucAq3ymtN4DQCKmtiQx7njPd+cf/75Uc1rJlJ0vz333HNRzWsqBpzLO1+85pprotrHPvaxqPbOO+9EtXMbaUnFjYoOHDgQ1bzGI97r8de//vWo9uijj7r72bFjR1SjmQgAAAAAoG1M1AAAAAAgMUzUAAAAACAxTNQAAAAAIDGzTtTM7AEzO2Bm22bU/trM9pnZrxt/vtDZYQLlkVnkiNwiN2QWuSGzyE2Zro8PSvofkv7xnPrfhxD+tvIRIVK2w6PX6etrX/taVPvtb3/r7mdiYqLUvjPwoGqa2ZMnT0a1os6chw8fjmpjY2NRzevc6HVG8jLndSTNhXfcXlfNvXv3upf3Ole1+Xh5UDXNLarhPUd7j8Gibmsd6Aj4oMhsKX19fVHNu5+8jnp79uxxr9PLQ1HHT7zvQZFZ9ff3R7VVq1ZFNa/r49GjR6Pa5ZdfHtUuuOACd98LFy4sdZ2f/exno5rXCXLTpk3ufiYnJ916bmZ9Ry2E8EtJ8TMHkCgyixyRW+SGzCI3ZBa5aWeN2p1m9lLjbeSlRRuZ2QYz22pmW9vYF1CFpjOb6TuKqJdZczszs0XvpgBd1FRm+e4vJKDp8wNyi25odaL2D5IukfRxSfsl/V3RhiGE+0II60MI61vcF1CFljLrfZks0EWlcjszszl/HBW10HRmvS+7BbqopfMDcotuaOksNITwVgjhdAhhWtL/knRttcMCqkVmkSNyi9yQWeSGzCJlZZqJRMxsVQhhf+Offypp2wdtj/Z4b6977/RceeWVUe26666Lalu2bHH3c+jQoajm/cYox7f765JZ76OYzSweL9uExsuX13yj6GN23jhT++2jN56dO3dGtSNHjriXHx8fj2pVvwNbl9yiGt5zr/eYnktk1ufdT95zy3PPPRfVip5nveZHXka85+OhoaGo5jV5KLpO73XDq6WWT898zOzx48ej2hNPPBHVvPNFr/GHd35w1VVXufv+zGc+E9VuuOGGqOY1MnnkkUeiWlGDPG+cOZp1omZmP5L0OUkjZvaGpP8q6XNm9nFJQdJrku7o4BiBppBZ5IjcIjdkFrkhs8jNrBO1EMJtTvn+DowFqASZRY7ILXJDZpEbMovc0CkBAAAAABLDRA0AAAAAEtNSMxG0r5mmA2W/F+mmm26Kat63xe/bt8+9/MmTJ6Naag0g0B5v8bln0aJFUc1rVrNr1y738u+++25U8xakz2VjGu9xtXv37tKX9x4bPF6Aemr3se0913nPQRMTE23txxvn2rVro9rIyEip8UjSsWPHolrZ5mM5NBOZj7xzgampqVI1z+TkZFQbHR11t/XONb3zi3feib+X/PHHH49qBw8edPfjnWfn+D2jvKMGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAImhmcgcmZ6eLr1tb29vVPMWeHoLfrdt2xbVvIWckv/N8t62c9kAArGiRe5LliyJat4C8rKLvZtZKN7X1xfVyi5KBoBO8JoLeDXv9dl7fSy6vPe86DVb8F7bh4aGSm0nScPDw1Htsssui2pf+cpXopr32v7ss8+6+/HOI7zzjRwbNaBzvGYgknTxxRdHNe/8Yvv27VHtqaeeimreY6tOeEcNAAAAABLDRA0AAAAAEsNEDQAAAAASw0QNAAAAABJDM5GSvMW83je7e4tpveYb3mLjoiYd3n68Zg1btmyJak8//XRUW7x4cen9eIuqyzafQHd4WZCkW2+9NardfvvtUc3LyMaNG6Paq6++GtUmJibcfTfTLCdXRU1cmt0GQHd4r89ekxCvEdOll17qXqf3erhr166o5p1DLFy4MKpdffXVUW358uXuvlevXh3VvNf3sbGxqPb4449HtZdeesndT9mGZkWvRag/77XuiiuucLe96aabotq+ffui2v333x/VvMeW19BHqk9zG95RAwAAAIDEMFEDAAAAgMQwUQMAAACAxMw6UTOz1Wb2pJm9amavmNm3GvVlZrbJzMYafy/t/HCB2ZFZ5IbMIkfkFrkhs8hNmXfUTkm6J4RwuaTrJf2FmV0u6V5Jm0MI6yRtbvwbSAGZRW7ILHJEbpEbMouszNr1MYSwX9L+xs9HzGy7pIsk3SLpc43Nfijp/0n6y46MMgFeh6OlS1v/hcvk5GSpmlTcDfJcO3bsiGqbNm2Kal5XKUn6/e9/H9W8Tn+pd32cb5n1OodJ0oUXXhjVDh48GNW87l+7d++Oal4nx/nQ3bEb5ltmUQ91yK3XMc57bb/hhhvcy3uv297lDx06FNWuvfbaqHbXXXdFNa8jsyT94Ac/iGoPPfRQqX0fPnw4qhV1yfPqXpe/sucqc6kOmZ1rXh4HBwej2ic/+Un38l72Hn744ai2c+fOqOblruictC5dl5tao2ZmayRdI+l5SSsbgZekNyWtrHRkQAXILHJDZpEjcovckFnkoPT3qJnZsKSfSro7hDA+c6YaQghm5v4qxcw2SNrQ+Lm90QJNqCKz3vfHAJ1SRWaLfvsOdEorueXcAHOpiudaoBtKvaNmZn06E+iNIYR/bpTfMrNVjf9fJemAd9kQwn0hhPUhhPVVDBgoo6rMFn2RIlC1qjLLLxfQTa3mdmZmmaihm6p6riW36IYyXR9N0v2StocQvjPjv34u6fbGz7dL+ln1wwOaR2aRGzKLHJFb5IbMIjc22+JPM/u0pKclvSzpD50D/kpnPtP7Y0kflrRH0n8KIbzzQdfV09MTvAWHOfA+TrRyZfwR5pGRkVLXt3///qh25MgRd9vjx4+Xuk6vscOKFSui2gUXXOBefnx8vNSYvO26YXJyUtPT07P+CqvKzPb39wevKUdKihbSLlq0KKq99tprpS7vZbu/v7/5wdWY93g7t7HL3r17NTk52dXMDg4OhjVr1sx+AECB0dHRF8p8Cqaq3Pb29oahoaG2xz2bvr6+qOadkyxfvjyq3XHHHe51fv7zn49q3mvsxMREVFu7dm1Ue/3116PaPffc4+77mWeeiWre+YL3zo/XICSHZiBFJiYmuppZ6cw57cDAQFvjztHw8HBU8843ihqdeY8F77zSy2hRw5scTU1NlTqnLdP18RlJRVf0x80ODOg0MovckFnkiNwiN2QWuWEBDgAAAAAkhokaAAAAACSGiRoAAAAAJIYv3Cnp5MmTUW3v3r1R7e23345qXot3r4FDUVOIsrz9eN8Af/ToUffynRgTOq/oe7NOnDgR1ZYtW1bqOmkcMjtvgf65t3nOi/PrwLv9aak9f3mv497zpPe6+dhjj7nX6TUoueyyy6Lae++9F9W++93vRrUnn3wyqo2Njbn79niZ53kIVTp27FhUm5qaimpew62iOhktxjtqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYmomU5C1A92regsq5RIOQ9piZent7z6p5i16LFs3OJW+cAwMDczCSevIe/+fe5iyQ7oyyDUG4/TEbL0teg5CiZiJbtmyJal4zksHBwah27muLJI2Pj0e1hQsXuvsm35gL3vlOiudAdcE7agAAAACQGCZqAAAAAJAYJmoAAAAAkBgmagAAAACQGJqJAACy4jVRoLECquI1RliwwD9d8pp/eE3FTpw4Ueo6e3ri35+TbWD+4h01AAAAAEgMEzUAAAAASAwTNQAAAABIzKwTNTNbbWZPmtmrZvaKmX2rUf9rM9tnZr9u/PlC54cLzI7MIjdkFrkhs8gRuUVuyjQTOSXpnhDCi2a2WNILZrap8X9/H0L4284ND2hJpZk9dyH36dOno23MrMWhApJ4nm0bj8Gum1eZ7e3tLb3t8PBwB0eCNs2r3CJ/s07UQgj7Je1v/HzEzLZLuqjTAwNaRWaRGzKL3JBZ5IjcIjdNrVEzszWSrpH0fKN0p5m9ZGYPmNnSiscGtI3MIjdkFrkhs8gRuUUOSk/UzGxY0k8l3R1CGJf0D5IukfRxnfntxN8VXG6DmW01s60VjBcorYrMeh9zBDqFzCI3VWSW7wlDt5Fb5MLKBM3M+iT9q6THQgjfcf5/jaR/DSFc+UHX09PTEwYHB1sbKea9yclJTU9Pl1qIUlVmBwYGwqpVq86qsUYNZb355ps6ceJEVzM7ODgY1qxZ0/xggYbR0dEXQgjrZ9uuqsz29vaGoaGhFkcLSBMTE6UyK1V7TjswMNDCaAFpamqq1Dltma6PJul+SdtnBtrMZp69/qmkba0MFKgamUVuyCxyQ2aRI3KL3JTp+vhvJf1nSS+b2a8btb+SdJuZfVxSkPSapDs6MkKgeZVlNoTgvoM2V5p5526+fiyj7G2U2O3D8yxyQ2aRI3KLrJTp+viMJO/M59HqhwO0j8wiN2QWuSGzyBG5RW6a6voIAAAAAOg8JmoAAAAAkBgmagAAAACQmDLNRIB5LaXW+14DjKKmGCmNu5ump6dLbdfTE/+eKrEGIwAAYB7jHTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASIx1c/G8mR2UtKfxzxFJb3dt551Vp2OR0j2efxNCuKCbOySz2Uj1eMhsdep0LFLax9PV3NY4s1K9jiflY5nL59qUb5dW1Ol4Uj6WUpnt6kTtrB2bbQ0hrJ+TnVesTsci1e94qlKn26VOxyLV73iqUqfbpU7HItXveKpSt9ulTsdTp2OpUt1ulzodTx2OhY8+AgAAAEBimKgBAAAAQGLmcqJ23xzuu2p1OhapfsdTlTrdLnU6Fql+x1OVOt0udToWqX7HU5W63S51Op46HUuV6na71Ol4sj+WOVujBgAAAADw8dFHAAAAAEhM1ydqZnazmY2a2S4zu7fb+2+XmT1gZgfMbNuM2jIz22RmY42/l87lGMsys9Vm9qSZvWpmr5jZtxr1LI+nU8hsOshsOWQ2HWS2vJxzW6fMSuS2rJwzK9Urt3XNbFcnambWK+l/SvqPki6XdJuZXd7NMVTgQUk3n1O7V9LmEMI6SZsb/87BKUn3hBAul3S9pL9o3B+5Hk/lyGxyyOwsyGxyyGwJNcjtg6pPZiVyO6saZFaqV25rmdluv6N2raRdIYTdIYQTkv5J0i1dHkNbQgi/lPTOOeVbJP2w8fMPJf1JVwfVohDC/hDCi42fj0jaLukiZXo8HUJmE0JmSyGzCSGzpWWd2zplViK3JWWdWaleua1rZrs9UbtI0t4Z/36jUcvdyhDC/sbPb0paOZeDaYWZrZF0jaTnVYPjqRCZTRSZLURmE0VmP1Adc1uL+5jcFqpjZqUa3Md1yizNRCoWzrTRzKqVppkNS/qppLtDCOMz/y/H40FzcryPyez8luN9TGbnt1zvY3I7v+V4H9cts92eqO2TtHrGv/+oUcvdW2a2SpIafx+Y4/GUZmZ9OhPojSGEf26Usz2eDiCziSGzsyKziSGzpdQxt1nfx+R2VnXMrJTxfVzHzHZ7orZF0jozW2tm/ZL+TNLPuzyGTvi5pNsbP98u6WdzOJbSzMwk3S9pewjhOzP+K8vj6RAymxAyWwqZTQiZLa2Ouc32Pia3pdQxs1Km93FtMxtC6OofSV+QtFPSbyX9l27vv4Lx/0jSfkkndebzyH8uabnOdJIZk/S4pGVzPc6Sx/JpnXkL+CVJv278+UKux9PB24nMJvKHzJa+nchsIn/IbFO3Vba5rVNmG8dDbsvdTtlmtjH+2uS2rpm1xsEBAAAAABJBMxEAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABIzP8HfU7ieBOpF80AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "samples = torch.randn(5, model.representation_length)\n", + "\n", + "fig, axes = plt.subplots(1, 5)\n", + "\n", + "for index in range(5):\n", + " decoded = model.decoder(samples[index][None])[0].detach()\n", + " show_image(axes[index], decoded)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[-0.9777, -1.3779, -0.6812, 2.4773, 2.3612, 0.1038, 0.8307, -2.4117,\n", + " -1.0913, 1.0372, -2.3588, -0.2581, -1.2573, 0.8061, -1.3952, 2.1415]])" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + " model.encode(get_sample(0)[None])" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAABpCAYAAABLV9A4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJztnXmQHVX5/p8zk4RAQJTFiAFFENGwKBgBQQUEFAEBRRDEH6hoFBFxLVHLpUrL4osii1JYsUBR2RdlB9lBQGSRJSSCGEDAIIJCkpnJMrn9+yP3Of305Ezm5t6+9/bMPJ8qis6ZmXu7nz5L93mf856QZRmMMcYYY4wxxlSHnm6fgDHGGGOMMcaYIn5RM8YYY4wxxpiK4Rc1Y4wxxhhjjKkYflEzxhhjjDHGmIrhFzVjjDHGGGOMqRh+UTPGGGOMMcaYiuEXNWOMMcYYY4ypGC29qIUQ9g4hPBpCeDyEcHxZJzWesablY03bg3UtH2taPta0fKxp+VjT8rGm5WNNO09odsPrEEIvgMcA7AXgGQD3ADgsy7I55Z3e+MKalo81bQ/WtXysaflY0/KxpuVjTcvHmpaPNe0OE1r42x0APJ5l2TwACCGcD+AAAMPesBBC1tNjt2WKWq32AoD9YU1Lw5qWT61WeyHLsg2xmu3fmg6PNS2fVjTt3FmOOprStP47WQihM2c5isiyDFmWBfh5qlQ89pePNS0fGadWSSsvatMAPC3/fgbAjkN/KYQwE8DM+jGmTJnSwleOXRYuXPgUmtR07bXX7sg5jjYWLFjQtKZrrrlmR85xtNHX1/dU/XBEXYdqOnny5I6c42ijv7+/aU3dn6ap96fAamoKABMmtDIsjl0GBwcb1hRYWdeJEye29fxGI8uWLeNhU+OU+9Q09T61KU3XWmutjpzjaGPRokVNa+pxKo2MU6uk7SNSlmWzAMwCgN7eXs9WloA1LR9rWj7WtHysafmopo6olYfq2tPTY11LwO2/fKxp+VjTcmklHvksgE3k3xvXy0zzWNPysabtwbqWjzUtH2taPta0fKxp+VjT8rGmXaCVF7V7AGwRQnhDCGESgEMBXF7OaY1brGn5WNP2YF3Lx5qWjzUtH2taPta0fKxp+VjTLtC09THLssEQwhcAXAegF8BZWZY9UtqZdRBd5Lx8+XIAQH9/fyxjZkxdu9CONUxjSVNlcHAQQFFTssYaaySPy2Ksasp6unjx4pV+pmtBJk2a1JbvHyu6attnO1+yZMlKv6dtv7e3ty3nMhY1JbIWJ6ILzNu12HysaJrSh32A0omEHWNFUyCtV7OZsFthLGlaFaxp+VjT7tDSGrUsy64GcHVJ52JgTduBNW0P1rV8rGn5WNPysablY03Lx5qWjzXtPM6ZaYwxxhhjjDEVY9zlIabVQS0Past5xSteAQCFlPe07A0MDMQy2iO8N0yapUuXxmNaGl/5ylfGMlr2aIs0I6P1lFa8ddZZJ5axfqYsUSaNasW2rOmZWT+1nrbL+jhWSFnHUqnZ3fYbRzVlPdV6yJ/XarWVfs+0Tkp/kyalz0hjkvfZWjWqD+tiauxy3WyclKZV7T/dOowxxhhjjDGmYozbiJrORmy4Yb4x+NFHHw0A2HHHfA+/2bNnAwB++ctfxrInnngCQPENfLzOCjG6o1G09ddfPx4feuihAIDtt98+lv35z38GAFx//fWx7LnnngOQnikab1BTjaJp9GyPPfYAAGy33Xax7E9/+hMA4JFH8rW9L774IoBiPR2vpDTVJCFvfvObARTrKdv5nDlzYtnLL78MoDjL3o0EBFWAUR2Njmlde93rXgcA2GijjWIZ9f/HP/4Ry+haGG+apsYMlqmmejxt2jQAKGwiyz5z/vz5K/2N3o/xoCmQ1pVjiWqpY83UqVMBFKO/1OuFF16IZdRzvPWpjWqquqy33noA0i6El156KR6nIhrjgVW5M7Ru6jGfA/RvU0nwUs+644FGNdW6RheN1vGU64uf3WlNx+ebhTHGGGOMMcZUGL+oGWOMMcYYY0zFGNPWR9qa1O5B283WW28dy7761a/G47322gtAMdy5wQYbrPTZp556KgDg+eefX+lnY9leolYxwhDym970plj2yU9+Mh7T+qhsscUWAIq207POOgsA8L///W+l3x9vmpJNNtkkHh9yyCHxeObMmQCK+/ntuuuuAIALLrggll100UUAcrveeCGlKW0Lr3rVq2LZe9/73nj8+c9/HgDwmte8Jpb97W9/AwD84he/iGU333wzgOIedrSZjOV6mrKUUOd11103lu28887xmJpuvPHGsYyWxxNOOCGW3XvvvQDGn+2ZVhu14VBTTb707ne/Ox7Tnq/19O9//zsA4Hvf+14se+ihhwAUdRwPmgL5daqutDRq+3/nO98Zjz/+8Y8DSOv6wx/+MJY99thjAIrtYawlGFpVPdE+jppqXd1hhx3i8UEHHQQAeO1rXxvLaCM/6aSTYtm8efMAFPf9HGuapqyj1FI1TSVfUzv++973PgBFO/ndd98NIB/vAeDJJ58EUNR0rC3PSV0P23xKUx2nttlmm3jMOvvqV786ll111VUAiktJaC3XutmJPnVs3TVjjDHGGGOMGQOMmYha6s2a6co16vCe97wHAPCNb3wjlm277bbxmBE3nS3nou2ddtoplnEG45ZbbollCxcuBDB2ZtVTM1rUdPLkybFsxowZAICvfe1rsextb3vbSp+j2xtw5oJRIAC46667AOQzwUC+OHasLIhNRXn7+voAFNPCM0mIRns1cQhn23TxMKNv++67byz761//CgCYO3duLGPdHisLt6mp9gGperrVVlsByKM8QF53gTxyvmTJkljGBCMf+9jHYhkjQpyxBPL6OVbqaarts2/UesqZ3k9/+tOx7O1vf3s8ZnRCE7hwVvNzn/tcLPvud78LAHjmmWdW+t6x0p+ynmq7o3NDZ3oZ5VFXgrZ9zqSr64MRiy9/+cuxjGOcuj54X8dK2wfSurLOMJkFkEd61eGhzhqOSVrfNttsMwBFR8L3v/99AEXnRyoyOprh9Wh/xjJNFMa6ysgZUNSUfar2J9OnTwdQfB5gxHLBggWxbKxF1IjWEV6jOov4nHnggQfGMo5dQK6/6vOOd7wDQL69FACceOKJAIBFixbFMkaWxgqpxD6M8qoTbpdddgEAfOhDH4plb3nLW+Ixn6dUU0YuzzzzzFg2a9YsAPmzvn5fO3FEzRhjjDHGGGMqhl/UjDHGGGOMMaZijBnrI+0KarFhGHPvvfeOZV/4whcAAFtuuWUs07Apw8S0owF5aFMtPwyh3n777bGMlilNZDCaF29TF90fjdemmtJKpuF5tY8wTKyWMtooNDz90Y9+FEC+WBvI7SWrSrgxmqCVRLXgYl+1LB577LEAigtetZ5SU63vrGuagOSII44AUFwMz78dK9YSaqpWMGqq9hHa84bTlG0+ldBCE+UcddRRAICTTz45lnGR8Whu7wrbvNpJac85/PDDYxkT3Gh/qvqxP9X+gJ+pdj7em9NPPz2WpRI1jWZS4wN1U+voBz7wAQDApptuGsu0ndOGp/eG9U73/zzssMMAAOeff34s++9//9vaRVQQaqO60n77qU99KpbRUqZJGLTPoO0u1S+qrrvtthuAfN9KYGwka9K+i21YrXJMaKPtn0scNAlDqv2rPYx9riYdYZ+s+1WqrX+0onUpZcflUpyDDz44ltGSpxbT1POU3hveO1r8gLxvYUIcIF/2MJrHKe33WNc0ARATAn7kIx+JZUxepwlaFLZ9fcbn73LPWgD4wx/+AKC4NIr3pp2aOqJmjDHGGGOMMRVjbIQpkM/SaCpSzqZrkgtGG3QmXSM4t912G4DiQmH+DRMLAPlM0vvf//5YduGFFwIoRqBGc9SCsxWajIUz6Mcdd1ws40J2nZ18/PHH4/Gf//xnAMVZiDe+8Y0AgDe84Q2xjIu9NeLx4osvAhg7i7Sp6dprrx3LjjzySAB5FA3IZ311Jv2JJ56Ix/fffz+AouaciXvd614XyzgLqjNO//nPfwAUZ4BGc8IG1g1dSM2ZdI1UMCKkmj311FPxmLO5+nO2eY1Schb0Zz/7WSzjfdL+Z7ShdYDRCU2rfcwxxwAoJmNgqnPt855++ul4zO0NdJadmmo9ZZTitNNOi2WMQKmmo3kmmIltNDrLZEH7779/LGM91v7yX//6VzzWdNGEbX/atGmxjGPTeeedF8t4n0bzuDQUzoK/9a1vjWVf+cpXABQjDPw9TQSgUVt9DiC8V9r+6Sa58847Yxnb/2jWVSMVTKCm+n3pS18CUEwWwqjOSy+9FMs0uqjPAYSaMsoB5A4lJmoCRrfzg/2URhJZ/9TlQXfG61//+ljG61VN1eHFCJlG1Pg8pc4GJng55ZRTYhkTuIzmflTHA0Yd+QwF5E4CjfJybFNNtX+lpqz3QF4/tb7z+feMM86IZauKxJeFI2rGGGOMMcYYUzH8omaMMcYYY4wxFWNE62MI4SwA+wF4Psuyretl6wG4AMCmAJ4EcEiWZf8b7jPKJrXHBy0dDHsCwBe/+EUAxbAybVL33HNPLPv9738fj2nT0xApbWO6BwP3XtMw9mWXXQagGFLluWqoeWBgAMuXL0cIQUOtvSGE61FxTY8++mgAwMYbbxzL+DfcWw4Arr766nj8l7/8BUBxPw8mEthvv/1iGZNpaFKNO+64A0BR0+HC9gMDAxgcHEQIIVoLu1lXeZ5q26T14BOf+EQs435SusidmrI+AsC1114bj1l/1aa3++67A8gX0wL5ovp99tknlj366KMAiglNhtN0yZIlUVNaN6qq6cyZM2MZk6jowm3ak1TTG264IR4/8MADhc8D8oXEH/zgB2MZkzzofmG0+6l9cFWasv3TVtztPnUorFfUEcj7Ot2bjjYntYHddNNN8Xj27NkAihZe1kXtO2mxVDskE7Q0ommqT61CPdXzpTVGNeVePWqRYl26+eabY9mNN94Yj2l9VAsQ9/lTTTfffHMARZv1c8891/A1LF++PGpPK2y36yn11MQhvD5NyMB9pVR/2vBSYxMAzJs3D0DR/kTr9J577hnL+Dyhn92olWxwcBC1Wg0hhHjPq6Kp1kE+8+j+aLTXanukXZzPPloG5FZGtaHRQsl7BORLIJqx5KX6U3T5eYpoPaWVTsdijiU6jj/44IMAinv0cmwCck3VOnr88ccDyK3oQN6nqqW10SUOixcvjppKgo1KaKr1lHWSzz5A/hyle/Ix8Y/qqMd8JuKyBiDfL1ETkPB5IpUkpp00ElH7NYC9h5QdD+DGLMu2AHBj/d+mQSZOnFhY91VnI1jTlpg4cWIha08d19UWmDBhQuHBvI41bQFrWj7D9KnWtAV6enpS6y6saQv09PSkMhhb0xYYpj/181QLTJw40ZpWiBEjalmW3RZC2HRI8QEAdqsfnw3gFgDfKPG8GkJnablwmqnigXzRukYOOIOuCyx1ASsjGal0qEw0AgAHHHAAgOLu5pxp0xklzpToLMCECRNSyTFeiRVaAl3UVCNqjCZoQobNNtsMQFHTP/7xjwCKiQCefPLJeMzInF4zk7Xo4nfOLjNRC5AvmNXZEQ50Qx8ihtG1a3U1Ff2hppo2mhpoJIeaagp4TSaSSl3M1Nua9IazlpoKmXU7tUWAzr4BKzSukqY8Pz0nJqHRJBdTp04FULzG66+/HgBw6qmnxjJNJkL9ta2yj9GZNr4Q8HsB4Morr1zp+4bbUqJqmrId6awu641GEjmbqJFxpiueNWtWLNPEF9RDF76vu+66AIqJmJisQXVmNE4TlQxH1do+NdX+dPr06QCK6Z4ZvdH+8qyzzgJQdHpoggbqoS+mjLJpUhLOBGuaf63vZLgZ4RBC6mddHfupq95rRn80kQK599574/HZZ68YXjVSqc8Q/Ex9QGXiJR1r2FdqvWw0EtTT01M5TXnu2neJIyWWMbGXRiHPPffclcq0/2Sfoo4Y9rN6D/ndmuhl6Fg0HMP0p119nuI91iQg7AsYvQHyBCO6zdM111wDoPhcqlArTZ7F+6T9zQsvvACgqGmj9bSKmhJ99uEzj7qNmCBInR18dv/3v/8dy1Qr3if9bI5Z2i6YdESfRzuRmKXZrI9TsyybXz9+DsDU4X4xhDATwMz6cZNfNy6YYE3bQkN11ZquFta0fKxp+ay2pmZEmhr7zSrx81T5+HmqfKxpl2g5mUi2YupgWJNmlmWzsiybkWXZDN+wxrCm7WFVulrT5rCm5WNNy6dRTTt8WqMaj1PlY03Lx5qWjzXtLM1G1P4dQtgoy7L5IYSNADw/4l+UCC0HtDcB+R5UakVk6FYtJD/4wQ8A5JYGYOSFgQzl33fffbHs2WefBQDsuuuusYx7LJx44omxjHYBtQMMw2A3NeV166JfJg5RSwmtDKrpCSecAKAYVlbbFz9bQ80MMT/00EMrlenearRg0Qqo59DgvhVdratAMaEF903RhcC0hVxyySWx7Mc//jGAoo0sdb0almc4nnUOyO8DEwsAuWVCE+awrTRoN+mapqxLavv6yEc+AiC35QK5leGKK66IZT/96U8B5G0XKGrKASVlK11nnXVWKtM9xlgnW1hY3HVNVYv3vve9AIrJPWihod0RyDXVeqp1iP2eWoA22GADALkFEsgtaFrGOtnCQN/1tq/94E477QSgmNyKbfXSSy+NZRdccAGA4h5fqinvk2rKPlP7GtZjtfelrPirWWe7qunQ5CZA3peqvZa2MWoJALfeeiuA4h6pqYQgmpCBdlVd+8z+Wq3CLT6Mdr2eAsX2Ty3VKsvrvvjii2MZk1qpHVo/h22YOgL5M5omw+GzQ4n7pXb1eYr1QbVge2ZiCyBPcMO6CQDPPPMMgGK71Wcn1jvunQbkCTS0n+BSCa2nLSa+6KqmRK+RNlK1htNGqkuQtK8kqi/vF5PcAbkFWi3ODz/8MICijlVJJpLicgDcYe5IAJet4ndNY7wEa9oOXFfLx5qWjzUtH2taPta0fKxp+fh5qnysaZdoJD3/eVix0HWDEMIzAL4H4AQAF4YQjgLwFIBD2nWCfNPVt1YudP3Wt74VyzhbqTNbl19+OYBikgtGHfT3Rnoj5myGRi/++c9/Aii+bc+YscI5ozNFqQgFU0lnWYZFixbx9+cD2KsTmpJU6uhPfvKTsYya6qwQUxv/8pe/jGWcaddrTc2M6c+pm0Z1+DmaDpURS52F0qQbSn9/f9S1/lkboIN1FUhroAv8d9ttNwDFBetMcPPb3/42llGLkeqp1jVqqRE1zhrpwmMmbNCUyjrrpixevBi1Wg1ZlnGGvuOaqgY8z3e9612xjEkp9BoZ9eFid2Dktp9Kqb6qSLhGnxklUu1TfReQp5MGYl3uuKYK26JGelhPNZLIdNEaTdeZdKLXy+tUrd70pjcBKEZAiEbUGDXRPna4KPrQPhVd1pRjhV7PW9/6VgDFa+BWEbpNBDXVPjSlgbaBXXbZBUC6T9f+lP2OztAPh6bnr7e7rmoK5FEd7fd4TbrAn8lVNCED+0LVVWfV+ZmMJgPA1ltvvdLvURONsqWi8CmYnh8o3NOuasq6oPWSES5NYMXoj27xwL/Veqd1lc9qmkBn/fXXB1DUjMd6X6n5SNHKof1pvc/u+PNUCh1XGVHTa2RyCtU0VZdUA7ocPvCBD8Qy1kVNHEKXg97XRto9kKfnz7IMfX19XXtGTaGa8nq4lQuQX7eOTeyPVQttv3vvvSKxPRPaAXk/oc4G6jtcorB20UjWx8OG+dEew5SbEUikkcaSJUuWZ1lmTVtgaGr+BQsWvJBl2YtwXW2aoSl6+/r6rGmLDH1B6e/vt6YtMrRPXbhwoTVtkaEvxYODg9a0RYY+4C1btgy1Ws2atkBqwmfp0qV+nmqBRGp+P6N2kZaTiRhjjDHGGGOMKZfOxu+agDYDtSvQ8kF7DpDP/t11112x7Dvf+Q4A4Omnn45lnH1pZvGv2h9oadGZXJbpZze6F0gnSVm8uNeO2j5oJfvb3/4Wy8444wwAaUvJ6kBddNaW+tEuAeT7K2m0TC2oVUPvN2dP3/nOd8ayDTfcEEAxocWFF14IoLiXEu0PI9VT1Y+WAF04y3ajC+S5CFlnIktcxF06qQX/tBkDuRWE+8YAefIZLswG0pbZVFtVawrRBARDvxfIbYNqt2C76MRi41agBrR5AXkd0brEPalUU9qcVDO117COaTIbJr5ILebWREK0naasqlXPJMa6pNZHXpvqQ2tZyk6mY4vuQ8XkTkyiA+R7J2p/wP5H91W87rrrAFS/Tg4Hr0/HA9pqVWtqmEqkwL3rgGJ/Tfuo7u/H39X6zX3b+H+gaHke+n2jBY2isF3rPqfsP9Vizr/RuqoJbfiMRm2BvF3r+MPEcKk6P9p0BNLnzLqodYn1WOshnw10zN5qq63i8b777gsA2H777WMZx2/tU6mfRnEbtT5WGX1WYV1KWRr1OZLXzaQrALD77rvHY+7BqgnCWN81SR5tlSUmEmqI6r1FGGOMMcYYY8w4Z9RE1HT2hYso9e2Yb72nnHJKLOMseCoF90ik0strOlTOjuosPWfVmlm82Q10FmefffYBAGyzzTaxjDMy55xzTizj4led4W0wTX5BU86K6AwG77FqysiSzqIwopbypneLVOSUs4kapaRWmjb+/vvvB1BMMZuakUvVXf09Jg3gYm39PtWPM3r6t1XWVOsXI7866816deONN8YybqWRioTpvdL2ydk5rdvUSGdB+X06+8v+YPbs2bGMs9KdXni8KlLRdLbBvfbaK5axvmgSH9ZTXbDOGVyNUuhsI/to3Y6CM8WqC++JRik4y67fx/NqtM/pJHo9vEZ1fbCuqeuDSak0GsQoZiriAOSRNE0mwnqqfSfLNOU075O2iyq6PhTVle1QE9+wHapzJpUgh2iCHB3TqatGkVi/UzPo3DoGyKPM2odXmVSUWtsU3QJa7xipYOIkANhxxx0BFNu/tnXqq1E4aqr1jp+tn8N2oMlJRkt0LbWNBMdnJhUC8oRtOmazDmkCJn3WZX1X/Th+q1b8Po0+D0kOBKDabhol5aZgP6t93J577gmgGHHkfUjdDyDXSN1a7K91Ky/2P5pkjL/XzrpZ7R7aGGOMMcYYY8YhflEzxhhjjDHGmIpRHU/OCGjol+FiDd8yeQD3pQHykG4z1iP9bFqhNLzKsKmGmh966CEAxQWdVbSVMESrFhCGizV8e+uttwIALrjggliW2jMtRcqmp7pQX+6pBORhZz0HJjJRS08VEwmwrqld6bDDVuxsobamO+64AwDw61//OpYxCcZwljySCq1rqJ6WFNoDgbzualII2ld1v5Yqa6qL3D/zmc8AyBMqAHmSC90XjlZo1TSV3EOvm9+nNh1af9QmwbqoyUvmzZsHoJhMpIptn6iV88Mf/jCAfH89IE9ucffdd8cyaqqWz//+978AinVOF8HTUqL6pfbsYz3V+swy/f0qakrLmOrCMWq//faLZaxrbH9A3i41AQPtempD33jjjeMx+xO1L7Lta5/Oz9bxj+NVFXUcCs9R2z/rme53RNux9nG8TrXS7rDDDgCAbbfdNpapvYxtQutgKk05+xG9J80k1OoGKTs5+7i3v/3tsYy2TrWO0u612WabxTLW+ZQ1D8htfKm9r/R+8W/03tx8880AVm/P226Qaktsj6m9KdUOzWRpqf069blBNeC4o5ZF6qvWW1ra9XMaTVLWbVIWfY4hqunBBx8MoLinHLVPJbjSvlDbAMcxrV/6bE/Y5vVvueSpncucqt9bG2OMMcYYY8w4wy9qxhhjjDHGGFMxKm99ZCiStgUgt9Zo6JzZydSC0MyeOwwhawiUe1h86lOfimW0VDz44IOxjBZB3VMlFdLuNgyZ77zzzrGM4eQFCxbEsltuuQVAUedmQuYpqxP3FqPtCshD27SyAcBVV1210jlUKYseoaa0PAF5dqeXXnoplt1www0AgOeffz6WUVMNnbNM66H+PGW15L4gmg2OWbRuv/32WEb7pd6XKmrK61WbI/sBtXjw2h599NFYtqrsVlqm+vFvuNcdAOy9994AinYLavXwww/HMloftX1UMTMhNdVsbqw3avOi7UizPtLWmbLbavZW1YD2Ea27bOe6zw3R/Rlp7Rstmqp9kVl0td6w3dFSDuTZArX90WKv1jq1V1EjtaDRJqoZ3qiV9uksGymDbBVIWfRpJdWMbo888giAYnukRV//luO41nO1LzO7m2Y15H3RcZzHWn/Zj2jbYH9Tpax6KU1pG9P9PufPnw+gmKGUbVkzFL7lLW8BULSLa9Zcfo7WS/a/ai3jz6dPnx7L7rnnnsLv63HVNd1jjz0AFDM+89mUmV6B3GKu/Rrrn+4pqXZS2he1jFpqu6aW+vzBZzqt96m9iquCtlWOU6lnRn3mZvZXXYbA32N9BYr6cTlDakzSLKZHHHEEgOLzFJcH0D4J5PejrD7VETVjjDHGGGOMqRjVm0YfAmcadFEw37J1BoCzmjrzmIrkpBZ+6ixYambn29/+NoBiBIozIWeeeWYse+yxxwAUFzFWbZYSyDXQ/TwYpdTZMM4A6WxvSlPeI71WjWxypkmjol//+tdXOgfOvp1//vmxjLMjGvmoysyPzl5RU93fiwuFdUaLkTSdTVxV4pDhrpX73Oj3HXvssQCKiR04e6ea8hx0tqoqmqba56GHHhqPOSuuEUnWEU2OkuoPUnui6Uz5m9/8ZgD5rBmQ762kyTAY0fjDH/4Qyzgjp5pWZQ/FlAYzZ86MZbvuuiuAPLoD5El82CaBPEqhbZFlOkOriQWYHODd7353LGPESD+HCTZ0L7xUMplU/9MNdAac53TkkUfGsoMOOghAUVMmm9IyjiNaJ//6178CKPahOqvLBEw6487xUT+H4xG/F8jru2pf9uxvK2hd5f3/0Ic+FMsOP/xwALluQL53oUYqUn0cI5Q6nmkkiO4DnVXnsd5v3j+t8/wevWccS7vdt6baP50CAHDUUUcBKO4XxcRMzz77bCxj/6p1MZXwJfX8o88VfB7Q8ZP9p9ZL9rna5ums0We2bpAap9iPAsBnP/tZAMWEP9yPVusutdA4IGsMAAAVKklEQVSkajzWCF0qyYXWP/6ujjmpz2ZCKEahgTwSpPeoG2h94LFGzqmptrFLLrkEQDGaxUiZJgqaMWMGgGKU7cknn4zHrH9a19g3qC587mJ0D8ifnW+66aZYxnEzlZCkGRxRM8YYY4wxxpiK4Rc1Y4wxxhhjjKkYlbQ+agiUVkRdqM3wrobqGU5OJRNJ7aWkdgRdtE3bk9qtaDHRsDJDrty/DcitAWqnqsp+FXoe3FeDYVwgD9E+8cQTsWzOnDkAiok8GBpOJWFQy4NaVbn3lS4CnTZtGoDi/briiisAANdcc00sS2laFbResX7qYlVem9oMuDhbF/hTU/08htN1EbbW02OOOQZAvmgZyBd5a6j+8ssvB1C0lDHMr3aeqtTT1P4+an+gVprE57bbbgNQtD8QbedM7KDWPFoigLzN014J5DYKtZkwwY0m2qDmVdyfSjXlteueaezXuHgfAK699loAxT6W15bSVBMRaJ2ktVn3VmNdow1Hv4/JdoD0nj9VrKfs63QvH/ZXuq8nr00XvlNTTcay+eabAwD23XffWKZ2ZvbfaofiZ6rV6tJLLwUAXHnllbGMmqrFpwqWR5LSdf/9949ltHNdffXVsezOO+8EULQ+U1dty7Tc6n5hqcQ3ao+67777ABStdvw+TXpFPVMJn7qN9km0g9HSDeRtU9se+wK9htSykNR+a9pn/Otf/1qpjDqrLWzu3LkAirZAPk/o+Vel/et5sJ7qMyN1ZlImAPjLX/4CoGi9ZUIgrafUVC24DzzwQDzm2Ed7OpCP6Zogg9+jZTxXJr8C8r6q29ZHhWO/LkPg8w+fvYE8MZNab9nOdd9j6qtWXh2/Wf/UJs76pxZKPjOzjwZye/9TTz0Vy6iljnGtUL2nCmOMMcYYY4wZ51QyoqbRE8726e7qjFRopIczBToDxMXBCmdntt5661h23HHHxWMuEtToEGd5dGbyRz/6EYBiZIQzGFWZ9VFUUy7S1RTd1E2vh/rpLCFneTRFL69bZ4D1mBERnS3lYksuWgaAE088EUAx+UYV08YTneljhFA15QyvzhLqzA9h3dYoJNP67rXXXrHsjW98YzzmjI7eGyZkuOiii2LZ6aefDqA4087zrmI9VbgIWtPls24wgQiQ11Nts5z91agxZ5E1mqTRNd4Hzk4CeWKH3/3ud7GMM3o68151LQn7PZ0l5Gy3Lq5mhFijFKyfupCa9XTHHXeMZboInn21fjZnLX/zm9/EMqau1++jplXUVqNQjDBoVIyaauIQ9g36t9tssw2AYsIMJiXQZCFaJ+l60Agoo8oaOWeEWcfEqkR5hkO1YTRWozWMHGpUkj/X8YX18YADDohljF6oS0G3g2DKbUZ3gTwiqhE1nuNwW6ekrqUbpNoNE6IxcRKQ96kaUWFUR8ck/q0mzWC0RiMVGkWi44iRSSDXSjVNOWZYV1XHbmuackzsueeeAIC3ve1tsYzjrf4+E39pMhbWUx3bqYtu48FxHMj7SnUjcezSSDmf1VLJs3Ts6naCJrZb7ZsOPPBAAMW6Rk01QsjkQvo8T021/jP6ePLJJ8cyju1A3p/osyfHSN32hwlGtL7yu/WZRPvrMhgxohZC2CSEcHMIYU4I4ZEQwnH18vVCCNeHEP5e//+rRvoss4JarYb+/n709fWhr69P98Cwpk1Sq9XQ19eHRYsWYdGiRbGzs6bNU6vVMDAwgP7+fvT398eBwZo2T61Ww+LFizEwMICBgQFrWhJD+1QArwasaytkWYbly5djcHCQD3PWtEWyLMOyZcuwdOlSLFu2TPd6taZN4j61fDj29/X1ob+/38+oXaYR6+MggK9mWTYdwE4AjgkhTAdwPIAbsyzbAsCN9X+bBlljjTUwZcoUrLXWWmwEk2FNW2Ly5MlYe+21MWXKFHbW1rRFJk2ahLXWWgtrrrmmNS2BEAImTZqENddcE5MnT+YDsDUtAe1TAbza41Tr9PT0YMKECZz1tqYl0Nvbi0mTJqkDx5q2gPvU9jBp0iRMmTLFY38FGNFXlmXZfADz68cLQwhzAUwDcACA3eq/djaAWwB8o4yTSu2Zpfud0D6m9h1axDQEz1C+Wki4fwjD1UBuWwNyO6WG6rlf0oUXXhjLuH9QMwtdh/5Nb28vBgcHJ6GNmirUVEPiHDRUU1qc1G7Av9F9kbg4mzYeoLgQlhYcDTUzYQiTXQC5ps0kDxiqaU9PD2q1Wls1VesBQ+Fq1WQ9VusOEw7o3ia0kWoyAv5crZR6H7hnEO06QG4jvfvuu2MZLQ5labp8+fK2aqoWIlp01dJEnVVTtmm9H2zzasmjNU33RGOdA3Ir2cUXXxzLqKlaR1P6NappCKFg6eu0prQ0qqZEE1bQeqJJQJgoRxPmsJ2rpZcJBIDcpvOTn/wkltFuplarVjQFkv3wANowTvGcVFNac9UOQzuRjj2HHXYYgKL1iQvR1TbJz9GF6JqU5OyzzwZQtDnSQaA2JmrSrF0s0We0RVNFdeVYpOMUr1PrKvsCagnkdVRtuLSjasKAn//85/GYer788suxjJEEbS8pq1az7b9OWzRN2VxZz1LtX+sqky9sueWWsYzHag/jc5L2mfqcxOcp7Zt5Xql9SJupq53sU3nu2t+wLqaeBXWc5zMT91cFcmupJgv51a9+BQC47rrrYpkun+Bna7tIJbNimVpMUwmhUrbdTo79/H6tk2y/KaupJrOhZVSX4rD/1KVK3/zmNwEUxyb97NQ+fqy7Wk91aRBJ2aPLtpOu1gKgEMKmALYDcDeAqfWXOAB4DsDUYf5mJoCZ9eNmz3PMUqvVWFEXAdjMmraONS2fWq3Gzt2aloQ1bQ91TddCg+OUamrS1B+gG9YUsK4jIS8lfp4qCfep5WNNu0/DL2ohhLUBXALgS1mWLVDxsyzLQgjJqZAsy2YBmAUAvb29DU2X6Bs+v0cX6nGWS2fLmB5aZ3u5MF0XanKGWN94NX06U6iedtppsYzfnZpBa6USZlmGgYEBrLHGGli8eHFtyM9K1TQ1M6HJWDjjpdsgMOrIhAE8Z6AY0WAUSdOja/p0Rs3OO++8WEYtU2miW9W0v78fkydPxsDAQFs1Tc2g68whyzShBSMVuqCdM0kbbLBBLGMbeOyxx2KZavr73/8eQDGlMmfYVNPU7OXqkmUZlixZgkmTJmHJkiVt1VTPk1rq9fAadVaNdVGTjvDe6D1if6CL3VU/pjPX2cvUIvfUDOTqQk0nTpyIpUuXtlVThX1nStMddtghljE6kWrnmvCDKYmZzAIAzjnnnHg8e/ZsAMXF65y11BnUMto+P6c+O/p0o+OUajqc7o2cp9YLtu9ddtklllFzjahRU022wqRAJ510UizTVPDUX2dw+Tnaz5eVeIFr1bAamtZ/FnXt6elp6GT0nFk/tIxjvkbKec0aZWM0QR0HTMigSRp02w1+n15fqv2XNU6xPrTreSr1jJKKpHHM1/bP+qTOmMcffxwAcOaZZ8Yy9p+amlxT8bMdjJQUrIy62ok+lVrq9aSerTiW65jE39NEOKyL6jhg1Efbt/Yt7D9H0qyscWrx4sVtfUZlndQIIa9Nn6cYYdfnKf5cHQe//e1vAQDnn39+LKMWet9STrGRSEXKOpGMpaH0/CGEiVjxknZOlmWX1ov/HULYqP7zjQA8355THJvwgWLixIk6GFjTFuBLmjUtDw5+EyZM0E7OmraANW0P2qcCoD/LurZAlmWo1Wp8yLSmJcCXtJ6eHn2JsqYt4D61fPiSZk27TyNZHwOAMwHMzbLsp/KjywEcWT8+EsBlQ//WpGED6OnpGfomb02bhJr29vYWfMawpk2TZRmWLl2KEMLQmWVr2iTUtKenx5qWiPvU8uFLGrBS1MCaNgmjk1ybLljTJnGfWj588XV/Wg0asT7uAuD/AXg4hMAVj98CcAKAC0MIRwF4CsAhZZ2U7kFAm4faaWh74v5cQL5ruSYGYVhUKxoXA9LiCAA//vGP4zEXumuSgdS+U63YHpjyuKenR+2H66KNmmoYnTabq666KpYxRK+L2mkZ1WvlvVH7E62haiM799xz4zGtZBqK5wxNWfv6LF++HMuWLUNPT4/arNqqqV4PbSG33HJLLKN1VBOCcCGxPvhwcbbaR1J7oqmNJ7Wolba2sjSt1WoYHBxECEHbZFs11XOnpmprYJtXTWl50nrKRAxz586NZbTg0jYKFO26/G4d7KlpWTYyrp/slqasV6op9/9R6y0tT2qPZv1U6ygt4rpfWCrJTsrWUiaJPnV6CGEftElXvQaOGZrEh3uA6SL3lJ2ZiS1mzZoVy/hzXfieso5pH9Ku/dF4nXV7T1s1HQrrqib24v5fah1n/6kJLWh/0j2QmBBIny9S43g795rjy28IIabnb7emOk7df//9AIC77rorlnEfKP097nPKfdCAfH85tTayf9TngUZtZGXRjT5V+zj2h9p/UlO1fLPPZd0E8jqp+pHhLHWd2EuOY39PT4+eW1s0ZXvTZ9QrrrgCQLHfY4IbrX/c35TLFoDcSqn68XO6vQ9fszSS9fFPAIZ7K9mj3NMZH0yYMKGwvg4AFi5c+HKWZS/CmjbFhAkTCplBAWDBggXWtAV6e3tX2jS+r6/PmrZAb29v4SETAPr7+61piwztUxcuXDgny7Kr6/+0rk0QQig8KA0ODlrTFhkaoVi2bBlqtZo1bQH3qeXT29tbWJ8IAIsWLbKmXSJ08g2zt7c3G/rgNxJ829bdyJkOfr/99otlu+22G4DiGzhnyznjCQC/+c1vABQjajqTlFoY2gkWLlx4X5ZlM1b373p7e7OhDSpFKhqoA8ZOO+0EANhjj7wNciG8zkBytldnj5lqX9P26qxkKiLZCRYsWNC0plrfGoH1Wq9x5513BlBMKMAkDbqgmIkYmMocyGc0dUYutdC+0/T19TWtqaZwHw69Rk0NT7jgnbNrQL64eN68ebGMUTNNDMLZX23v7Y70NEJ/f3/TmjbSn2pbZIRH6+l2220HIHclAHmCAU0SMmfOHADFKBuP9Ty61YcqzfanIYRspMQHQLGuUFONxDLVuerCBBca5eWMus7Qy/5aye/rFoODg01pCqxIJpJKzDEUvU7WQe03Ult20F2gfSrHoiq071VRf1FramBspk9dd911ARQjkuxnU9sS6PhDTRv5zm7TSp869KUvhfZxbOM6UcRJY91iI+VGSrk4qlhPAWDRokVtHadUA7Z9TW5HNPES62cqOdZoyDbZ6DjV/RHVGGOMMcYYY0wBv6gZY4wxxhhjTMWovPWR6Hmm9rJgpj8NgdIeoYsUGXZW60QjVpd2027rYwq12zBcrCF9WgDU/sDwNG08QH5vhtujolt00vpItK7xM9R6Rk3V/sD6nKq7VUDvZSuWktW1zFAX2nCAXNOUzvp71FLrMz+vHXtNtUK7rY9Kqq7xvmh/wLLUIvdUsoUq9KFKu62PCutTal+elF05ZX2qUnsfjk5YH4f8TeH/QK5Tag8+teTS3ry639lpOmF9VKhlah+wlFVMk6qxrnY6WUgztNv6qFpRv1RdS/WVqWen0dD+2219TGmqulDL1H7GZewZ1w1sfTTGGGOMMcaYUUq1pkFXQWq2R2crdTaYpJIHDNlja1yTWsCqmnLmR2c1qHMqYUAVohPdRusXZ3607lLTVKKAqs6qdWtRLnXRespIkGqlkTSyqqjIeK6nqbqW6jtZT1PtvArJQqpEatZcI75D0fZU1TZfBVIz6MOlLAeK9dJ1NA21TPWZKaoWKa8KOobw+bLRqI7rZppWNB3ruMYYY4wxxhhjTMXwi5oxxhhjjDHGVIxRHdfWsKhDpOWgNpOUpcesPhrSH8+2O2OMMcYY0ziOqBljjDHGGGNMxfCLmjHGGGOMMcZUDL+oGWOMMcYYY0zF8IuaMcYYY4wxxlSM0MnkBiGE/wDoA/BCx760vWyA8q7l9VmWbbi6f2RNV4k1XYE1LZ+qaPpUyefSTaxp+XRdU2DMtX9r2h66rqs1XSXWdAUd17SjL2oAEEK4N8uyGR390jZRlWupynmUQVWupSrnUQZVuZaqnEcZVOlaqnQurVCl66jSubRCla6jSufSClW6jiqdS6tU5Vqqch5lUJVrqcp5lEE3rsXWR2OMMcYYY4ypGH5RM8YYY4wxxpiK0Y0XtVld+M52UZVrqcp5lEFVrqUq51EGVbmWqpxHGVTpWqp0Lq1Qpeuo0rm0QpWuo0rn0gpVuo4qnUurVOVaqnIeZVCVa6nKeZRBx6+l42vUjDHGGGOMMcasGlsfjTHGGGOMMaZi+EXNGGOMMcYYYypGR1/UQgh7hxAeDSE8HkI4vpPf3QohhE1CCDeHEOaEEB4JIRxXL18vhHB9COHv9f+/qgvnZk3LPzdrWv65jUpNgerqak3bcl7WtPzzsqbln5c1bc+5jUpdrWn5VErTLMs68h+AXgD/ALAZgEkAHgQwvVPf3+K5bwRg+/rxOgAeAzAdwIkAjq+XHw/g/zp8XtbUmlrTcairNbWm1tSaWlPrak3HvqadjKjtAODxLMvmZVm2FMD5AA7o4Pc3TZZl87Msu79+vBDAXADTsOL8z67/2tkADuzwqVnT8rGm5TNqNQUqq6s1LR9rWj7WtHysaXsYtbpa0/KpkqadfFGbBuBp+fcz9bJRRQhhUwDbAbgbwNQsy+bXf/QcgKkdPh1rWj7WtHzGhKZApXS1puVjTcvHmpaPNW0PY0JXa1o+3dbUyURWgxDC2gAuAfClLMsW6M+yFXFQ73WwmljT8rGm7cG6lo81LR9rWj7WtHysaflY0/KpgqadfFF7FsAm8u+N62WjghDCRKy4WedkWXZpvfjfIYSN6j/fCMDzHT4ta1o+1rR8RrWmQCV1tablY03Lx5qWjzVtD6NaV2taPlXRtJMvavcA2CKE8IYQwiQAhwK4vIPf3zQhhADgTABzsyz7qfzocgBH1o+PBHBZh0/NmpaPNS2fUaspUFldrWn5WNPysablY03bw6jV1ZqWT6U0LSsrSSP/AdgHKzKn/APAtzv53S2e97uwIrz5EIAH6v/tA2B9ADcC+DuAGwCs14Vzs6bW1JqOQ12tqTW1ptbUmlpXazq2NQ31EzLGGGOMMcYYUxGcTMQYY4wxxhhjKoZf1IwxxhhjjDGmYvhFzRhjjDHGGGMqhl/UjDHGGGOMMaZi+EXNGGOMMcYYYyqGX9SMMcYYY4wxpmL4Rc0YY4wxxhhjKsb/B/xk5ohU54DTAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "idx1 = 1\n", + "idx2 = 5\n", + "\n", + "N = 10\n", + "\n", + "encoding1 = model.encode(get_sample(idx1)[None])\n", + "encoding2 = model.encoder(get_sample(idx2)[None])\n", + "\n", + "fig, axes = plt.subplots(1, N)\n", + "\n", + "for i in range(10):\n", + " beta = float(i) / float(N - 1)\n", + " alpha = 1.0 - beta\n", + " \n", + " combined = model.decoder(encoding1 * alpha + encoding2 * beta)[0]\n", + " \n", + " show_image(axes[i], combined)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples-notebooks/autoencoders/mnist/mnist-vae.ipynb b/examples-notebooks/autoencoders/mnist/mnist-vae.ipynb new file mode 100644 index 00000000..4a00f5da --- /dev/null +++ b/examples-notebooks/autoencoders/mnist/mnist-vae.ipynb @@ -0,0 +1,310 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import torch\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import vel\n", + "import vel.notebook as nb\n", + "nb.reasonable_notbook_defaults()\n", + "torch.set_grad_enabled(False) # We don't need autograd here\n", + "None" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "config = nb.load_config('examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml', run_number=2, device='cpu')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:root:Setting up a new session...\n" + ] + } + ], + "source": [ + "model = config.load_trained_model()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MnistCnnVAE(\n", + " (encoder): Sequential(\n", + " (0): Conv2d(1, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (1): ReLU(inplace)\n", + " (2): Conv2d(8, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (3): ReLU(inplace)\n", + " (4): Conv2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (5): Flatten()\n", + " (6): Linear(in_features=784, out_features=32, bias=True)\n", + " )\n", + " (decoder): Sequential(\n", + " (0): Linear(in_features=16, out_features=784, bias=True)\n", + " (1): ReLU(inplace)\n", + " (2): Reshape()\n", + " (3): ConvTranspose2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", + " (4): ReLU(inplace)\n", + " (5): ConvTranspose2d(16, 8, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", + " (6): ReLU(inplace)\n", + " (7): ConvTranspose2d(8, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (8): Sigmoid()\n", + " )\n", + ")\n", + "----------------------------------------------------------------------------------------------------\n", + "Number of model parameters: 45,569\n", + "----------------------------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "data_source = config.provide('source')\n", + "train_dataset = data_source.train_dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def get_sample(idx):\n", + " return train_dataset[idx][0]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def show_image(axis, sample):\n", + " axis.imshow(train_dataset.denormalize(sample)[:, :, 0], cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAACzCAYAAAD48u9xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAHTJJREFUeJzt3XuQVdX55vHnRYOXOKiIQUqjmARNaQrbiEocSkhA4xgTUROVUhFjiRVvJKUUxhCHjMEQRafEaOIlgBdGtIJENOOoI15iFAokJj9FETXRH9jBK3LRyChr/uCkgr6r6d3nutbu76eKovth77PXPv3QfVbv3qsthCAAAAAAQDp6tHoAAAAAAIBPYqIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAImpaaJmZkeZ2TIze8nMLq7XoIBGobPIEb1FbugsckNnkSILIVS3o9lWkl6UdISkFZIWSRoVQli6hX2qOxhQEUKwavels2iFWjordb23dBZ18FYIYddqd6azaIGmdrayD71FTYq8Pqjlitohkl4KIbwSQtggabakY2t4PKDR6CxyRG/RbK/WuD+dRbPRWZRSLRO13SX952bvr6hkQKroLHJEb5EbOovc0FkkaetGH8DMxkoa2+jjAPVCZ5EbOovc0FnkiN6i2WqZqK2U9PnN3t+jkn1CCOFGSTdK/DwvWo7OIked9pbOIjF0Frnh9QGSVMuPPi6SNMDM9jaznpJOljSvPsMCGoLOIkf0Frmhs8gNnUWSqr6iFkL4yMzOk/SApK0kTQ8hPFe3kQF1RmeRI3qL3NBZ5IbOIlVVL89f1cG4TIwa1brUeVfRWdSKziJDT4cQBjXrYHQWddDUzkr0FrVr9PL8AAAAAIAGYKIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJ2brVAwBQPgcddJDLzjvvPJeNHj3aZbfeeqvLrr322uhxlixZUsXoAAAA0scVNQAAAABIDBM1AAAAAEgMEzUAAAAASExN96iZ2d8lrZX0saSPQgiD6jEooJHoLXJDZ5EbOovc0FmkyEII1e+8qdSDQghvFdy++oNlbKuttnLZjjvuWNNjxhZm2H777V227777uuzcc8+NPubUqVNdNmrUKJf985//dNmUKVNc9rOf/Sx6nFqEEKzWx+hKb7trZ4tqa2uL5vPnz3dZr169qj7Oe++9F8132WWXqh+zWegsNjd8+HCXzZo1y2VDhw512bJlyxoypoina32RSmfTNnHiRJd19DW7Rw//w1fDhg1z2WOPPVbzuGrQ1M5Wtqe3qEmR1wf86CMAAAAAJKbWiVqQ9KCZPW1mY+sxIKAJ6C1yQ2eRGzqL3NBZJKfW36M2JISw0sw+J+khM3shhPD45htUyk7hkZIt9pbOIkF0Frmhs8gNr2mRnJquqIUQVlb+fkPSXEmHRLa5MYQwiJsykYrOektnkRo6i9zQWeSG17RIUdVX1Mzss5J6hBDWVt4+UtL/qNvIWmTPPfeM5j179nTZYYcd5rIhQ4a4bKeddnLZCSecUMXoum7FihUumzZtWnTb4447zmVr16512V/+8heXtfgm4sLK2ttmOOQQ9zVLc+bMiW4bWywntnBRrF8bNmxwWUeLhgwePNhlS5YsKfSYuUihs4cffrjLYh+TuXPnNmM4WTv44INdtmjRohaMpHFS6Cz+bcyYMS6bMGGCyzZu3Fj4MWtZiC5FdBapquVHH/tKmmtm/3qc/xVC+D91GRXQOPQWuaGzyA2dRW7oLJJU9UQthPCKpAPqOBag4egtckNnkRs6i9zQWaSK5fkBAAAAIDFM1AAAAAAgMbUuz5+1trY2l82fPz+6bWxxhNTEbgSeOHGiy9atWxfdf9asWS5rb2932bvvvuuyZcuWFRkiErT99tu77Ktf/arLbr/9dpf169evpmMvX77cZVdccYXLZs+eHd3/T3/6k8tinf/FL35RxejwL8OGDXPZgAEDXMZiIv/Wo0f8+6B77723y/baay+XVe6VAWoW69e2227bgpGgDA499FCXnXrqqS4bOnRodP/999+/0HEuuugil73++usuiy3iJ8VfsyxcuLDQsVPCFTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASEy3Xkzktddec9nbb78d3bYZi4l0dJPj6tWrXfb1r3/dZRs2bHDZbbfdVvvAUGo33HCDy0aNGtWUY8cWLdlhhx1c9thjj0X3jy1yMXDgwJrHhU8aPXq0y5566qkWjCQfHS20c9ZZZ7ksdtP7Cy+8UPcxofxGjBjhsvPPP7/Qvh117phjjnHZqlWrujYwZOmkk05y2TXXXOOyPn36uKyjBZEeffRRl+26664uu/LKKwuMsOPjxB7z5JNPLvSYKeGKGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSmW6/6+M4777hs/Pjx0W1jqx79+c9/dtm0adMKHfuZZ55x2RFHHBHddv369S7bf//9XTZu3LhCx0b3ddBBB7nsW9/6lss6WkXp0zpajfHee+912dSpU132+uuvuyz2/+rdd9+NHucb3/iGy4qOHcX16MH39Lrq5ptvLrzt8uXLGzgSlNWQIUNcNmPGDJcVXbW6o1X2Xn311a4NDMnbemv/8n/QoEEuu+mmm1y2/fbbu+zxxx932WWXXRY99hNPPOGybbbZxmV33XWXy4488sjoY8YsXry48LYp46svAAAAACSGiRoAAAAAJIaJGgAAAAAkptOJmplNN7M3zOzZzbLeZvaQmS2v/L1zY4cJdA29RW7oLHJDZ5EbOovcWAhhyxuYHS5pnaRbQwhfqWRXSHonhDDFzC6WtHMIYUKnBzPb8sES1qtXL5etXbvWZTfccIPLzjzzTJedeuqpLrvjjjuqHF33EUIotFJEvXqbc2fb2tpcNn/+fJfFuh1z//33u2zUqFHRbYcOHeqygQMHuiy24MKbb75ZaDyS9PHHH7vs/fffLzSeJUuWFD5OLXLrbOzj9NRTT7ns7rvvdtlpp51Wy6FL5cknn4zmgwcPdtlhhx3msgULFtR9TF3wdAjBryzwKal0truKLfTw/e9/v9C+jz76qMuGDx9e65BaqamdreyXbW/HjBnjsqILID300EMuO+mkk1y2Zs2awuOJvSaeOXNmoX1XrlwZzWOLo3Tl9UUzFHl90OkVtRDC45I+vTzisZJuqbx9i6SRXR4d0ED0Frmhs8gNnUVu6CxyU+09an1DCO2Vt/8hqW+dxgM0Er1FbugsckNnkRs6i2TV/HvUQghhS5d/zWyspLG1Hgeopy31ls4iRXQWuaGzyA2vaZGaaq+orTKzfpJU+fuNjjYMIdwYQhhU5GeHgQYr1Fs6i4TQWeSGziI3vKZFsqq9ojZP0umSplT+vqduI0pU0Zsi33vvvULbnXXWWS678847o9tu3Lix0GOiU6Xs7T777BPNx48f77Idd9zRZW+99ZbL2tvbXXbLLbe4bN26ddFj/+EPfyiUNcJ2223nsgsvvNBlp5xySjOGU6umd/boo492Wew5xb/17et/UmrvvfcuvH9HN8NnqpSfZ1upT58+0Ty2cEjs9cLq1atd9vOf/7z2gZVHaTt72WWXRfNLLrnEZbHFBa+//nqXTZw40WVdWTgk5ic/+UnV+15wwQXRPLWFQ6pVZHn+OyQ9JWlfM1thZmdqU5mPMLPlkkZU3geSQW+RGzqL3NBZ5IbOIjedXlELIcTX35ayXscV5UZvkRs6i9zQWeSGziI31d6jBgAAAABoECZqAAAAAJCYmpfnxydNmjTJZQcddJDLhg4d6rIRI0ZEH/PBBx+seVwoh2222cZlU6dOjW4bWxRi7dq1Lhs9erTLFi9e7LKcF5TYc889Wz2EbOy7776FtnvuuecaPJJ8xP4PxhYYkaQXX3zRZbH/l+ie+vfv77I5c+bU9JjXXnutyx555JGaHhPpufTSS10WWzREkjZs2OCyBx54wGUTJkxw2QcffFBoPNtuu200P/LII10W+xptZi6LLYJzzz2lWfsliitqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYFhOps/Xr17vsrLPOctmSJUtcdtNNN0UfM3bTb2yxh+uuu85lsd80j3wdeOCBLostGtKRY4891mWPPfZYTWNC97Ro0aJWD6GuevXq5bKjjjrKZaeeeqrLYjfHd+Syyy5z2erVqwvvj3KLdW7gwIGF93/44Yddds0119Q0JqRnp512ctk555zjso5eA8YWDhk5cmTV4/nSl77kslmzZkW3jS2wF/O73/3OZVdccUXXBlYCXFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEMFEDAAAAgMSwmEgTvPzyyy4bM2aMy2bMmBHd/7TTTiuUffazn3XZrbfe6rL29vbocZC+q6++2mVmFt02tkhI2RYO6dHDf69p48aNLRhJ99O7d++6P+YBBxzgsli/R4wY4bI99tjDZT179nTZKaecEj12rEsffPCByxYuXOiyDz/80GVbbx3/8vr0009Hc3Q/scUbpkyZUnj/J554wmWnn366y957772uDQzJi31u69OnT+H9L7jgApd97nOfc9kZZ5zhsu985zsu+8pXvuKyHXbYIXrs2AInsez22293WWzBvrLjihoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJKbTiZqZTTezN8zs2c2ySWa20syeqfw5urHDBIqjs8gRvUVu6CxyQ2eRmyKrPs6U9CtJn14+8H+GEKbWfUTdxNy5c122fPny6Laxlf6GDx/usssvv9xle+21l8smT54cPc7KlSujeYZmqgSdPeaYY1zW1tbmsthqSZI0b968uo8pNbEVHmPPxzPPPNOM4dRqphLobWylw9hz+pvf/MZll1xySU3HHjhwoMtiqz5+9NFHLnv//fddtnTpUpdNnz49euzFixe7LLZK6qpVq1y2YsUKl2233XbR47zwwgvRPFMzlUBnc9C/f3+XzZkzp6bHfOWVV1wW6yc+YaZK0NkNGza47M0333TZrrvuGt3/b3/7m8s6ei1RxOuvv+6yNWvWRLft16+fy9566y2X3XvvvVWPp0w6vaIWQnhc0jtNGAtQF3QWOaK3yA2dRW7oLHJTyz1q55nZXyuXkXfuaCMzG2tmi83Mf7sSaC46ixx12ls6i8TQWeSG1wdIUrUTtV9L+qKkNkntkq7qaMMQwo0hhEEhhEFVHguoBzqLHBXqLZ1FQugscsPrAySrqolaCGFVCOHjEMJGSTdJOqS+wwLqi84iR/QWuaGzyA2dRcqKLCbimFm/EEJ75d3jJD27pe1RzLPPxp/GE0880WXf/va3XTZjxgyXnX322S4bMGBA9DhHHHFEZ0PMVo6djS1G0LNnT5e98cYb0f3vvPPOuo+pGbbZZhuXTZo0qfD+8+fPd9mPf/zjWobUMq3o7TnnnOOyV1991WWHHXZY3Y/92muvuez3v/+9y55//nmXLViwoO7jiRk7dqzLYjfsxxZ66A5y/FzbDBMmTHBZbDGkrpgyZUpN+2OTHDu7evVql40cOdJl9913X3T/3r17u+zll1922T333OOymTNnuuydd/xtf7Nnz44eO7aYSEfbosBEzczukDRMUh8zWyHpv0saZmZtkoKkv0vyswGgRegsckRvkRs6i9zQWeSm04laCGFUJP5tA8YC1AWdRY7oLXJDZ5EbOovc1LLqIwAAAACgAZioAQAAAEBiqlpMBM0Vu2n0tttuc9nNN9/ssq239h/iww8/PHqcYcOGuezRRx/tfIBoqQ8//DCat7e3R/OUxBYOmThxosvGjx8f3X/FihUuu+oqv7LyunXrqhgd/uWXv/xlq4eQjOHDhxfabs6cOQ0eCVLV1tbmsiOPPLLqx4st6CBJy5Ytq/oxUT4LFy50WWyho0aIva4cOnRodNvYIjrddfGlIriiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYTGRhAwcODCaf/e733XZwQcf7LLYwiExS5cujeaPP/54of2Rlnnz5rV6CIXEbrCPLRJy0kknuayjm+lPOOGE2gcGNMDcuXNbPQS0yIMPPuiynXfeudC+CxYscNmYMWNqHRLQUNttt53LYouGSFIIwWWzZ8+u+5jKgitqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYFhNpgn333ddl5513nsuOP/746P677bZb1cf++OOPXdbe3h7dtqMbP9EaZlYoGzlyZHT/cePG1X1MRf3oRz9y2U9/+lOX7bjjji6bNWuWy0aPHl2fgQFAg+2yyy4uK/r19frrr3fZunXrah4T0EgPPPBAq4dQWlxRAwAAAIDEMFEDAAAAgMQwUQMAAACAxHQ6UTOzz5vZI2a21MyeM7Nxlby3mT1kZssrfxf7bY5Ag9FZ5IbOIkf0Frmhs8hNkStqH0m6MISwn6TBks41s/0kXSzp4RDCAEkPV94HUkBnkRs6ixzRW+SGziIrna76GEJol9ReeXutmT0vaXdJx0oaVtnsFkmPSprQkFEmKrYa46hRo1wWW+Gxf//+dR/P4sWLXTZ58mSXzZs3r+7HTklZOhtCKJR1tCrotGnTXDZ9+nSXvf322y4bPHiwy0477TSXHXDAAdFj77HHHi577bXXXBZbKSq26lnZlaWz3VVsNdZ99tknuu2CBQsaPZymobfSjBkzXNajR/V3lTz55JO1DAedoLON8c1vfrPVQyitLn02MbP+kg6UtFBS30rhJekfkvrWdWRAHdBZ5IbOIkf0Frmhs8hB4d+jZmY7SJoj6YchhDWbfwcxhBDMzH+rf9N+YyWNrXWgQFfRWeSGziJH1fSWzqKV+FyLXBS6omZmn9GmQs8KIdxdiVeZWb/Kv/eT9EZs3xDCjSGEQSGEQfUYMFAEnUVu6CxyVG1v6Sxahc+1yEmRVR9N0m8lPR9CuHqzf5on6fTK26dLuqf+wwO6js4iN3QWOaK3yA2dRW6K/Ojjf5V0mqT/MLNnKtklkqZIusvMzpT0qqQTGzPE5urbN/5jyfvtt5/LfvWrX7nsy1/+ct3HtHDhQpddeeWVLrvnHv95ZePGjXUfTwa6VWe32mqraH7OOee47IQTTnDZmjVrXDZgwICaxhS7If6RRx5x2aWXXlrTcUqkW3W2bGKL/NSyoERGuk1v29raovmIESNcFvu6u2HDBpddd911Llu1alUVo0MXdJvONtMXvvCFVg+htIqs+viEJL+k1SbD6zscoHZ0Frmhs8gRvUVu6Cxy0y2+5QcAAAAAOWGiBgAAAACJYaIGAAAAAIkp/HvUcte7d2+X3XDDDS7r6Ibhet8oGVts4aqrropu+8ADD7jsgw8+qOt4kJ6nnnrKZYsWLXLZwQcfXPgxd9ttN5d1tIDOp7399tsumz17dnTbcePGFR4TUEZf+9rXovnMmTObOxDUxU477RTNY59TY1auXOmyiy66qKYxAan44x//6LKOFlTqpovcVY0ragAAAACQGCZqAAAAAJAYJmoAAAAAkBgmagAAAACQmOwXEzn00ENdNn78eJcdcsghLtt9993rPp7333/fZdOmTXPZ5Zdf7rL169fXfTzI14oVK1x2/PHHu+zss8+O7j9x4sSqj33NNde47Ne//rXLXnrppaqPAZSFWUe/PxcAyu/ZZ5912fLly6Pbxhbn++IXv+iyN998s/aBlQBX1AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMdkvJnLccccVyopaunRpNL/vvvtc9tFHH7nsqquuctnq1aurHg+wufb2dpdNmjQpum1HOYDq3X///S773ve+14KRoJleeOGFaP7kk0+6bMiQIY0eDpC82KJ5knTzzTe7bPLkyS47//zzXdbRa/Qy44oaAAAAACSGiRoAAAAAJIaJGgAAAAAkptOJmpl93sweMbOlZvacmY2r5JPMbKWZPVP5c3Tjhwt0js4iN3QWuaGzyBG9RW4shLDlDcz6SeoXQlhiZv9F0tOSRko6UdK6EMLUwgcz2/LBgE6EEKyzbegsUkJnkaGnQwiDtrQBnUViOu2sRG+bqVevXtH8rrvuctmIESNcdvfdd7vsjDPOcNn69eurGF0airw+6HTVxxBCu6T2yttrzex5SbvXPjygMegsckNnkRs6ixzRW+SmS/eomVl/SQdKWliJzjOzv5rZdDPbuc5jA2pGZ5EbOovc0FnkiN4iB4Unama2g6Q5kn4YQlgj6deSviipTZu+O+F/gdim/caa2WIzW1yH8QKF0Vnkhs4iN3QWOaK3yEWhiZqZfUabCj0rhHC3JIUQVoUQPg4hbJR0k6RDYvuGEG4MIQwq8rPDQL3QWeSGziI3dBY5orfISZFVH03SbyU9H0K4erO832abHSfp2foPD+g6Oovc0Fnkhs4iR/QWuSmy6uMQSX+U9B+SNlbiSySN0qZLxEHS3yWdXblJc0uPxQo5qEnBFfToLJJBZ5GhIqs+0lmkpOiqj/S2xWKrQU6ePNllP/jBD1w2cOBAly1durQ+A2uBeq36+ISk2AP972oGBTQanUVu6CxyQ2eRI3qL3HRp1UcAAAAAQOMxUQMAAACAxDBRAwAAAIDEdLqYSF0Pxo2XqFGRGy/ric6iVnQWGSq0MEO90FnUQVM7K9Fb1K7I6wOuqAEAAABAYpioAQAAAEBimKgBAAAAQGKYqAEAAABAYjr9hdd19pakVytv96m8XwZlOhcp3fPZqwXHpLN5SPV86Gz9lOlcpLTPp9m9LWtnpXKdT8rn0srPtSk/L9Uo0/mkfC6FOtvUVR8/cWCzxc1eoadRynQuUvnOp17K9LyU6Vyk8p1PvZTpeSnTuUjlO596KdvzUqbzKdO51FPZnpcynU8ZzoUffQQAAACAxDBRAwAAAIDEtHKidmMLj11vZToXqXznUy9lel7KdC5S+c6nXsr0vJTpXKTynU+9lO15KdP5lOlc6qlsz0uZzif7c2nZPWoAAAAAgDh+9BEAAAAAEtP0iZqZHWVmy8zsJTO7uNnHr5WZTTezN8zs2c2y3mb2kJktr/y9cyvHWJSZfd7MHjGzpWb2nJmNq+RZnk+j0Nl00Nli6Gw66GxxOfe2TJ2V6G1ROXdWKldvy9rZpk7UzGwrSddJ+m+S9pM0ysz2a+YY6mCmpKM+lV0s6eEQwgBJD1fez8FHki4MIewnabCkcysfj1zPp+7obHLobCfobHLobAEl6O1MlaezEr3tVAk6K5Wrt6XsbLOvqB0i6aUQwishhA2SZks6tsljqEkI4XFJ73wqPlbSLZW3b5E0sqmDqlIIoT2EsKTy9lpJz0vaXZmeT4PQ2YTQ2ULobELobGFZ97ZMnZXobUFZd1YqV2/L2tlmT9R2l/Sfm72/opLlrm8Iob3y9j8k9W3lYKphZv0lHShpoUpwPnVEZxNFZztEZxNFZ7eojL0txceY3naojJ2VSvAxLlNnWUykzsKmZTSzWkrTzHaQNEfSD0MIazb/txzPB12T48eYznZvOX6M6Wz3luvHmN52bzl+jMvW2WZP1FZK+vxm7+9RyXK3ysz6SVLl7zdaPJ7CzOwz2lToWSGEuytxtufTAHQ2MXS2U3Q2MXS2kDL2NuuPMb3tVBk7K2X8MS5jZ5s9UVskaYCZ7W1mPSWdLGlek8fQCPMknV55+3RJ97RwLIWZmUn6raTnQwhXb/ZPWZ5Pg9DZhNDZQuhsQuhsYWXsbbYfY3pbSBk7K2X6MS5tZ0MITf0j6WhJL0p6WdJPmn38Ooz/Dkntkv6fNv088pmSdtGmlWSWS/q/knq3epwFz2WINl0C/qukZyp/js71fBr4PNHZRP7Q2cLPE51N5A+d7dJzlW1vy9TZyvnQ22LPU7adrYy/NL0ta2etcnIAAAAAgESwmAgAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAk5v8DVMTDbjI6QLUAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# Browse examples\n", + "fig, axes = plt.subplots(1, 5)\n", + "\n", + "for index in range(5):\n", + " show_image(axes[index], get_sample(index))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[ 0.7010, 0.7096, 0.2029, -0.8527, -0.1471, 0.1670, -0.0375, 1.2047,\n", + " -1.9497, -0.1735, 2.7477, 0.9634, -1.8239, -1.0749, 0.8230, 0.0965]])" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + " model.encode(get_sample(0)[None])" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAAGgCAYAAADbx5TwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xm41WW5//H7BmWWGZGjBDinhqiAZlziSeQYWWqeTE6ilonnlEOlpql1LLXMqZMN5gwiaV6iSXXUzAlNIZAoEUSEBIHNLDPI9Pz+YPU7xH0v9nevaT/Pd79f18UFfFhrfZ+112evvR7W/t5bQwgCAAAAAIhHs8ZeAAAAAADgn7FRAwAAAIDIsFEDAAAAgMiwUQMAAACAyLBRAwAAAIDIsFEDAAAAgMiwUQMAAACAyLBRAwAAAIDIlLVRU9VTVHWWqr6rqldXalFAtdBZpIjeIjV0Fqmhs4iRhhBKu6JqcxF5R0ROFpEFIjJZRIaHEGbs5jqlHQwoCCFoqdels2gM5XRWpOG9pbOogOUhhG6lXpnOohHUtLOF69BblCXL64Ny3lEbKCLvhhDmhhA2i8ijInJaGbcHVBudRYroLWptXpnXp7OoNTqLXCpno7aviLy/098XFDIgVnQWKaK3SA2dRWroLKK0R7UPoKojRWRktY8DVAqdRWroLFJDZ5EieotaK2ejtlBEeu709/0K2T8JIdwjIveI8P28aHR0Fimqt7d0FpGhs0gNrw8QpXK+9XGyiBykqn1UtYWInC0i4yuzLKAq6CxSRG+RGjqL1NBZRKnkd9RCCFtV9WIReVZEmovIAyGEtyq2MqDC6CxSRG+RGjqL1NBZxKrk8fwlHYy3iVGmckedNxSdRbnoLBL0Rgihf60ORmdRATXtrAi9RfmqPZ4fAAAAAFAFbNQAAAAAIDJs1AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMmzUAAAAACAyezT2AgDkzzHHHGOyiy++2GTnnnuuyR566CGT/fSnP3WPM3Xq1BJWBwAAED/eUQMAAACAyLBRAwAAAIDIsFEDAAAAgMiUdY6aqr4nImtFZJuIbA0h9K/EooBqordIDZ1FaugsUkNnESMNIZR+5R2l7h9CWJ7x8qUfLGHNmzc3WYcOHcq6TW8wQ5s2bUx2yCGHmOxrX/uae5u33XabyYYPH26yTZs2mezmm2822fe+9z33OOUIIWi5t9GQ3jbVzmbVr18/N3/hhRdM1r59+5KPs3r1ajfv0qVLybdZK3QWOzvppJNMNnbsWJMNHjzYZLNmzarKmhxvlPsilc7G7brrrjNZsa/ZzZrZb7468cQTTfbyyy+Xva4y1LSzhcvTW5Qly+sDvvURAAAAACJT7kYtiMgfVPUNVR1ZiQUBNUBvkRo6i9TQWaSGziI65f4ctUEhhIWqureIPKeqb4cQJux8gULZKTxistve0llEiM4iNXQWqeE1LaJT1jtqIYSFhd+XisiTIjLQucw9IYT+nJSJWNTXWzqL2NBZpIbOIjW8pkWMSn5HTVXbikizEMLawp+Hisj3K7ayRvKRj3zEzVu0aGGy448/3mSDBg0yWceOHU125plnlrC6hluwYIHJ7rzzTveyZ5xxhsnWrl1rsr/+9a8ma+STiDPLa29rYeBA8zVLxo0b517WG5bjDS7y+rV582aTFRsactxxx5ls6tSpmW4zFTF09oQTTjCZ95g8+eSTtVhO0gYMGGCyyZMnN8JKqieGzuL/nH/++Sa76qqrTLZ9+/bMt1nOILoY0VnEqpxvfewuIk+q6j9u51chhGcqsiqgeugtUkNnkRo6i9TQWUSp5I1aCGGuiBxZwbUAVUdvkRo6i9TQWaSGziJWjOcHAAAAgMiwUQMAAACAyJQ7nj9p/fr1M9kLL7zgXtYbjhAb70Tg6667zmTr1q1zrz927FiT1dXVmeyDDz4w2axZs7IsERFq06aNyY4++miTPfzwwybr0aNHWceePXu2yW655RaTPfroo+71//SnP5nM6/wPf/jDElaHfzjxxBNNdtBBB5mMYSL/p1kz//9B+/TpY7JevXqZrHCuDFA2r1+tWrVqhJUgD4499liTnXPOOSYbPHiwe/3DDz8803GuuOIKky1atMhk3hA/Ef81y6RJkzIdOya8owYAAAAAkWGjBgAAAACRYaMGAAAAAJFhowYAAAAAkWnSw0Tmz59vshUrVriXrcUwkWInOa5atcpk//qv/2qyzZs3m2zMmDHlLwy5dvfdd5ts+PDhNTm2N7SkXbt2Jnv55Zfd63tDLvr27Vv2uvDPzj33XJO9/vrrjbCSdBQbtHPhhReazDvp/e233674mpB/Q4YMMdkll1yS6brFOnfqqaeabMmSJQ1bGJL0hS98wWQ/+clPTNa1a1eTFRuI9NJLL5msW7duJrv11lszrLD4cbzbPPvsszPdZkx4Rw0AAAAAIsNGDQAAAAAiw0YNAAAAACLDRg0AAAAAIsNGDQAAAAAi06SnPq5cudJkV155pXtZb+rRX/7yF5PdeeedmY49bdo0k5188snuZdevX2+yww8/3GSXXXZZpmOj6TrmmGNM9ulPf9pkxaYo7arYNMbf/va3JrvttttMtmjRIpN5n1cffPCBe5xPfvKTJsu6dmTXrBn/p9dQ9913X+bLzp49u4orQV4NGjTIZA8++KDJsk6tLjZlb968eQ1bGKK3xx725X///v1Ndu+995qsTZs2JpswYYLJbrjhBvfYr776qslatmxpsscee8xkQ4cOdW/TM2XKlMyXjRlffQEAAAAgMmzUAAAAACAybNQAAAAAIDL1btRU9QFVXaqq03fKOqvqc6o6u/B7p+ouE2gYeovU0Fmkhs4iNXQWqdEQwu4voHqCiKwTkYdCCEcUsltEZGUI4WZVvVpEOoUQrqr3YKq7P1jE2rdvb7K1a9ea7O677zbZBRdcYLJzzjnHZI888kiJq2s6QgiZJkVUqrcpd7Zfv34me+GFF0zmddvz9NNPm2z48OHuZQcPHmyyvn37mswbuLBs2bJM6xER2bZtm8k2bNiQaT1Tp07NfJxypNZZ73F6/fXXTfbEE0+YbMSIEeUcOldee+01Nz/uuONMdvzxx5ts4sSJFV9TA7wRQrCTBXYRS2ebKm/Qw5e//OVM133ppZdMdtJJJ5W7pMZU084Wrpdsb88//3yTZR2A9Nxzz5nsC1/4gsnWrFmTeT3ea+JRo0Zluu7ChQvd3BuO0pDXF7WQ5fVBve+ohRAmiMiu4xFPE5HRhT+PFpHTG7w6oIroLVJDZ5EaOovU0FmkptTx/N1DCHWFPy8Wke7FLqiqI0VkZInHASopU2/pLCJCZ5EaOovU8JoW0Sr756iFEMLu3v4NIdwjIveIpP02MfJld72ls4gRnUVq6CxSw2taxKbUqY9LVLWHiEjh96WVWxJQNfQWqaGzSA2dRWroLKJV6jtq40XkPBG5ufD7UxVbUaSynhS5evXqTJe78MILTfbrX//avez27dsz3SbqlcveHnzwwW5+5ZVXmqxDhw4mW758ucnq6upMNnr0aJOtW7fOPfbvf//7TFk1tG7d2mSXX365yb74xS/WYjnlqnlnhw0bZjLvY4r/0727/U6pPn36ZL5+sZPhE5XL59nG1LVrVzf3Bod4rxdWrVplshtvvLH8heVHbjt7ww03uPk111xjMm+44C9+8QuTXXfddSZryOAQz7XXXlvydS+99FI3j21wSKmyjOd/REReF5FDVHWBql4gO8p8sqrOFpEhhb8D0aC3SA2dRWroLFJDZ5Gaet9RCyH487dFkp7jinyjt0gNnUVq6CxSQ2eRmlLPUQMAAAAAVAkbNQAAAACITNnj+fHPrr/+epMdc8wxJhs8eLDJhgwZ4t7mH/7wh7LXhXxo2bKlyW677Tb3st5QiLVr15rs3HPPNdmUKVNMlvJAiY985CONvYRkHHLIIZku99Zbb1V5JenwPge9ASMiIu+8847JvM9LNE29e/c22bhx48q6zZ/+9Kcme/HFF8u6TcTnu9/9rsm8oSEiIps3bzbZs88+a7KrrrrKZBs3bsy0nlatWrn50KFDTeZ9jVZVk3lDcJ56KjezX1y8owYAAAAAkWGjBgAAAACRYaMGAAAAAJFhowYAAAAAkWGYSIWtX7/eZBdeeKHJpk6darJ7773XvU3vpF9v2MPPf/5zk3k/aR7pOuqoo0zmDQ0p5rTTTjPZyy+/XNaa0DRNnjy5sZdQUe3btzfZKaecYrJzzjnHZN7J8cXccMMNJlu1alXm6yPfvM717ds38/Wff/55k/3kJz8pa02IT8eOHU321a9+1WTFXgN6g0NOP/30ktdz4IEHmmzs2LHuZb0Be57HH3/cZLfcckvDFpYDvKMGAAAAAJFhowYAAAAAkWGjBgAAAACRYaMGAAAAAJFhmEgNzJkzx2Tnn3++yR588EH3+iNGjMiUtW3b1mQPPfSQyerq6tzjIH533HGHyVTVvaw3JCRvg0OaNbP/17R9+/ZGWEnT07lz54rf5pFHHmkyr99Dhgwx2X777WeyFi1amOyLX/yie2yvSxs3bjTZpEmTTPbhhx+abI89/C+vb7zxhpuj6fGGN9x8882Zr//qq6+a7LzzzjPZ6tWrG7YwRM97buvatWvm61966aUm23vvvU32pS99yWSf/exnTXbEEUeYrF27du6xvQEnXvbwww+bzBvYl3e8owYAAAAAkWGjBgAAAACRYaMGAAAAAJFhowYAAAAAkal3o6aqD6jqUlWdvlN2vaouVNVphV/DqrtMIDs6ixTRW6SGziI1dBapyTL1cZSI/ExEdh0f+OMQwm0VX1ET8eSTT5ps9uzZ7mW9SX8nnXSSyX7wgx+YrFevXia76aab3OMsXLjQzRM0SnLQ2VNPPdVk/fr1M5k3LUlEZPz48RVfU2y8CY/ex2PatGm1WE65RkkEvfUmHXof01/+8pcmu+aaa8o6dt++fU3mTX3cunWryTZs2GCyGTNmmOyBBx5wjz1lyhSTeVNSlyxZYrIFCxaYrHXr1u5x3n77bTdP1CiJoLMp6N27t8nGjRtX1m3OnTvXZF4/8U9GSQ46u3nzZpMtW7bMZN26dXOv//e//91kxV5LZLFo0SKTrVmzxr1sjx49TLZ8+XKT/fa3vy15PXlS7ztqIYQJIrKyBmsBKoLOIkX0Fqmhs0gNnUVqyjlH7WJV/VvhbeROxS6kqiNVdYqq2v+uBGqLziJF9faWziIydBap4fUBolTqRu0uETlARPqJSJ2I3F7sgiGEe0II/UMI/Us8FlAJdBYpytRbOouI0FmkhtcHiFZJG7UQwpIQwrYQwnYRuVdEBlZ2WUBl0VmkiN4iNXQWqaGziFmWYSKGqvYIIdQV/nqGiEzf3eWRzfTp/ofxrLPOMtlnPvMZkz344IMmu+iii0x20EEHucc5+eST61tislLsrDeMoEWLFiZbunSpe/1f//rXFV9TLbRs2dJk119/febrv/DCCyb79re/Xc6SGk1j9ParX/2qyebNm2ey448/vuLHnj9/vsl+85vfmGzmzJkmmzhxYsXX4xk5cqTJvBP2vUEPTUGKz7W1cNVVV5nMG4bUEDfffHNZ18cOKXZ21apVJjv99NNN9rvf/c69fufOnU02Z84ckz311FMmGzVqlMlWrrSn/T366KPusb1hIsUuiwwbNVV9REROFJGuqrpARP5bRE5U1X4iEkTkPRGxuwGgkdBZpIjeIjV0Fqmhs0hNvRu1EMJwJ76/CmsBKoLOIkX0Fqmhs0gNnUVqypn6CAAAAACoAjZqAAAAABCZkoaJoLa8k0bHjBljsvvuu89ke+xhH+ITTjjBPc6JJ55ospdeeqn+BaJRffjhh25eV1fn5jHxBodcd911Jrvyyivd6y9YsMBkt99uJyuvW7euhNXhH370ox819hKicdJJJ2W63Lhx46q8EsSqX79+Jhs6dGjJt+cNdBARmTVrVsm3ifyZNGmSybxBR9Xgva4cPHiwe1lviE5THb6UBe+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkGCYSkb59+7r5v//7v5tswIABJvMGh3hmzJjh5hMmTMh0fcRl/Pjxjb2ETLwT7L0hIV/4whdMVuxk+jPPPLP8hQFV8OSTTzb2EtBI/vCHP5isU6dOma47ceJEk51//vnlLgmoqtatW5vMGxoiIhJCMNmjjz5a8TXlBe+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkGCZSA4cccojJLr74YpN97nOfc6+/zz77lHzsbdu2mayurs69bLETP9E4VDVTdvrpp7vXv+yyyyq+pqy+8Y1vmOw73/mOyTp06GCysWPHmuzcc8+tzMIAoMq6dOlisqxfX3/xi1+YbN26dWWvCaimZ599trGXkFu8owYAAAAAkWGjBgAAAACRYaMGAAAAAJGpd6Omqj1V9UVVnaGqb6nqZYW8s6o+p6qzC79n+2mOQJXRWaSGziJF9BapobNITZZ31LaKyOUhhMNE5DgR+ZqqHiYiV4vI8yGEg0Tk+cLfgRjQWaSGziJF9BapobNISr1TH0MIdSJSV/jzWlWdKSL7ishpInJi4WKjReQlEbmqKquMlDeNcfjw4SbzJjz27t274uuZMmWKyW666SaTjR8/vuLHjkleOhtCyJQVmwp65513muyBBx4w2YoVK0x23HHHmWzEiBEmO/LII91j77fffiabP3++ybxJUd7Us7zLS2ebKm8a68EHH+xeduLEidVeTs3QW5EHH3zQZM2alX5WyWuvvVbOclAPOlsd//Zv/9bYS8itBj2bqGpvETlKRCaJSPdC4UVEFotI94quDKgAOovU0FmkiN4iNXQWKcj8c9RUtZ2IjBORr4cQ1uz8P4ghhKCq9r/6d1xvpIiMLHehQEPRWaSGziJFpfSWzqIx8VyLVGR6R01V95QdhR4bQniiEC9R1R6Ff+8hIku964YQ7gkh9A8h9K/EgoEs6CxSQ2eRolJ7S2fRWHiuRUqyTH1UEblfRGaGEO7Y6Z/Gi8h5hT+fJyJPVX55QMPRWaSGziJF9BapobNITZZvffyEiIwQkTdVdVohu0ZEbhaRx1T1AhGZJyJnVWeJtdW9u/9tyYcddpjJfvazn5ns0EMPrfiaJk2aZLJbb73VZE89ZZ9Xtm/fXvH1JKBJdbZ58+Zu/tWvftVkZ555psnWrFljsoMOOqisNXknxL/44osm++53v1vWcXKkSXU2b7whP+UMlEhIk+ltv3793HzIkCEm877ubt682WQ///nPTbZkyZISVocGaDKdraX999+/sZeQW1mmPr4qInak1Q4nVXY5QPnoLFJDZ5EieovU0Fmkpkn8lx8AAAAApISNGgAAAABEho0aAAAAAEQm889RS13nzp1Ndvfdd5us2AnDlT5R0hu2cPvtt7uXffbZZ022cePGiq4H8Xn99ddNNnnyZJMNGDAg823us88+Jis2QGdXK1asMNmjjz7qXvayyy7LvCYgjz7+8Y+7+ahRo2q7EFREx44d3dx7TvUsXLjQZFdccUVZawJi8corr5is2EClJjrkrmS8owYAAAAAkWGjBgAAAACRYaMGAAAAAJFhowYAAAAAkUl+mMixxx5rsiuvvNJkAwcONNm+++5b8fVs2LDBZHfeeafJfvCDH5hs/fr1FV8P0rVgwQKTfe5znzPZRRdd5F7/uuuuK/nYP/nJT0x21113mezdd98t+RhAXqgW+/m5AJB/06dPN9ns2bPdy3rD+Q444ACTLVu2rPyF5QDvqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZJIfJnLGGWdkyrKaMWOGm//ud78z2datW012++23m2zVqlUlrwfYWV1dncmuv/5697LFcgCle/rpp032+c9/vhFWglp6++233fy1114z2aBBg6q9HCB63tA8EZH77rvPZDfddJPJLrnkEpMVe42eZ7yjBgAAAACRYaMGAAAAAJFhowYAAAAAkal3o6aqPVX1RVWdoapvqeplhfx6VV2oqtMKv4ZVf7lA/egsUkNnkRo6ixTRW6RGQwi7v4BqDxHpEUKYqqp7icgbInK6iJwlIutCCLdlPpjq7g8G1COEoPVdhs4iJnQWCXojhNB/dxegs4hMvZ0Vobe11L59ezd/7LHHTDZkyBCTPfHEEyb70pe+ZLL169eXsLo4ZHl9UO/UxxBCnYjUFf68VlVnisi+5S8PqA46i9TQWaSGziJF9BapadA5aqraW0SOEpFJhehiVf2bqj6gqp0qvDagbHQWqaGzSA2dRYroLVKQeaOmqu1EZJyIfD2EsEZE7hKRA0Skn+z43wn7A8R2XG+kqk5R1SkVWC+QGZ1FaugsUkNnkSJ6i1Rk2qip6p6yo9BjQwhPiIiEEJaEELaFELaLyL0iMtC7bgjhnhBC/yzfOwxUCp1FaugsUkNnkSJ6i5RkmfqoInK/iMwMIdyxU95jp4udISLTK788oOHoLFJDZ5EaOosU0VukJsvUx0Ei8oqIvCki2wvxNSIyXHa8RRxE5D0RuahwkububosJOShLxgl6dBbRoLNIUJapj3QWMck69ZHeNjJvGuRNN91ksv/6r/8yWd++fU02Y8aMyiysEVRq6uOrIuLd0P+Wsiig2ugsUkNnkRo6ixTRW6SmQVMfAQAAAADVx0YNAAAAACLDRg0AAAAAIlPvMJGKHowTL1GmLCdeVhKdRbnoLBKUaTBDpdBZVEBNOytCb1G+LK8PeEcNAAAAACLDRg0AAAAAIsNGDQAAAAAiw0YNAAAAACJT7w+8rrDlIjKv8Oeuhb/nQZ7ui0i896dXIxyTzqYh1vtDZysnT/dFJO77U+ve5rWzIvm6PzHfl8Z8ro3541KKPN2fmO9Lps7WdOrjPx1YdUqtJ/RUS57ui0j+7k+l5Onjkqf7IpK/+1Mpefq45Om+iOTv/lRK3j4uebo/ebovlZS3j0ue7k8e7gvf+ggAAAAAkWGjBgAAAACRacyN2j2NeOxKy9N9Ecnf/amUPH1c8nRfRPJ3fyolTx+XPN0Xkfzdn0rJ28clT/cnT/elkvL2ccnT/Un+vjTaOWoAAAAAAB/f+ggAAAAAkan5Rk1VT1HVWar6rqpeXevjl0tVH1DVpao6faess6o+p6qzC793asw1ZqWqPVX1RVWdoapvqeplhTzJ+1MtdDYedDYbOhsPOptdyr3NU2dF6G1WKXdWJF+9zWtna7pRU9XmIvJzEfmUiBwmIsNV9bBarqECRonIKbtkV4vI8yGEg0Tk+cLfU7BVRC4PIRwmIseJyNcKj0eq96fi6Gx06Gw96Gx06GwGOejtKMlPZ0Xobb1y0FmRfPU2l52t9TtqA0Xk3RDC3BDCZhF5VEROq/EayhJCmCAiK3eJTxOR0YU/jxaR02u6qBKFEOpCCFMLf14rIjNFZF9J9P5UCZ2NCJ3NhM5GhM5mlnRv89RZEXqbUdKdFclXb/Pa2Vpv1PYVkfd3+vuCQpa67iGEusKfF4tI98ZcTClUtbeIHCUikyQH96eC6Gyk6GxRdDZSdHa38tjbXDzG9LaoPHZWJAePcZ46yzCRCgs7xmgmNUpTVduJyDgR+XoIYc3O/5bi/UHDpPgY09mmLcXHmM42bak+xvS2aUvxMc5bZ2u9UVsoIj13+vt+hSx1S1S1h4hI4feljbyezFR1T9lR6LEhhCcKcbL3pwrobGTobL3obGTobCZ57G3SjzG9rVceOyuS8GOcx87WeqM2WUQOUtU+qtpCRM4WkfE1XkM1jBeR8wp/Pk9EnmrEtWSmqioi94vIzBDCHTv9U5L3p0robETobCZ0NiJ0NrM89jbZx5jeZpLHzook+hjntrMhhJr+EpFhIvKOiMwRkWtrffwKrP8REakTkS2y4/uRLxCRLrJjksxsEfmjiHRu7HVmvC+DZMdbwH8TkWmFX8NSvT9V/DjR2Uh+0dnMHyc6G8kvOtugj1Wyvc1TZwv3h95m+zgl29nC+nPT27x2Vgt3DgAAAAAQCYaJAAAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZMraqKnqKao6S1XfVdWrK7UooFroLFJEb5EaOovU0FnESEMIpV1RtbmIvCMiJ4vIAhGZLCLDQwgzdnOd0g4GFIQQtNTr0lk0hnI6K9Lw3tJZVMDyEEK3Uq9MZ9EIatrZwnXoLcqS5fVBOe+oDRSRd0MIc0MIm0XkURE5rYzbA6qNziJF9Ba1Nq/M69NZ1BqdRS6ssoaKAAAgAElEQVSVs1HbV0Te3+nvCwoZECs6ixTRW6SGziI1dBZR2qPaB1DVkSIystrHASqFziI1dBapobNIEb1FrZWzUVsoIj13+vt+heyfhBDuEZF7RPh+XjQ6OosU1dtbOovI0FmkhtcHiFI53/o4WUQOUtU+qtpCRM4WkfGVWRZQFXQWKaK3SA2dRWroLKJU8jtqIYStqnqxiDwrIs1F5IEQwlsVWxlQYXQWKaK3SA2dRWroLGJV8nj+kg7G28QoU7mjzhuKzqJcdBYJeiOE0L9WB6OzqICadlaE3qJ81R7PDwAAAACoAjZqAAAAABAZNmoAAAAAEBk2agAAAAAQGTZqAAAAABAZNmoAAAAAEBk2agAAAAAQGTZqAAAAABCZPRp7AQDyp3nz5pkyVfuzHrdu3Wqy7du3u8cJgZ83CgAA8ol31AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMgwTaYKaNfP3523atMl02S1btphs8+bNJvMGQDD8IV3e4I/OnTu7l/3EJz5hstNOO81kBx54oMmmTZtmsrFjx7rH+etf/2qyDz/80L0sADRF3nN3x44dTfaf//mf7vUPPfRQk/34xz822d/+9jeTFRsEBSAb3lEDAAAAgMiwUQMAAACAyLBRAwAAAIDIlHWOmqq+JyJrRWSbiGwNIfSvxKKAaqK3SA2dRWroLFJDZxGjSgwT+dcQwvIK3E4uNG/e3GQtWrQwWcuWLU1W7KTbtm3bmswb/LH//vub7JhjjjHZJz/5Sfc4AwYMMNnq1atNNnHiRJN5JxZ7gx42bdrkHrsR0Nvd8E4+79atm8nOOuss9/rnnnuuybp3726yPfawT0F77bWXyYoNCFm5cqXJ5s2bZzJvAE6CGq2zXh8YDLR73tcCEZEOHTqYzOun91y5detWk0X+OPA8W2NZhz6df/75JvvmN7/p3qb3emPu3LkmmzFjhsm8QWORo7OICt/6CAAAAACRKXejFkTkD6r6hqqOrMSCgBqgt0gNnUVq6CxSQ2cRnXK/9XFQCGGhqu4tIs+p6tshhAk7X6BQdgqPmOy2t3QWEaKzSA2dRWp4TYvolPWOWghhYeH3pSLypIgMdC5zTwihPydlIhb19ZbOIjZ0Fqmhs0gNr2kRo5LfUVPVtiLSLISwtvDnoSLy/YqtrJF4J+KK+EMPunTpYrLDDz/cZCeccILJ+vTpY7KuXbu6x/ZOPveGkXjDHrxBJF4m4t/HZs3sXn7hwoUmW7x4scm8E98bW157W2leRy6//HKT/cd//Id7/fbt25ss63AE7/PglFNOcY/jndD+8MMPZzpO5EMY/r9ad9b7nD/44INN9sEHH5jMGz7kDYJJ5WPv8Z5727VrZ7Jiz7PXXnutybzBI2PGjDHZa6+9ZrIYB+XwPNt4vH727dvXZN4wkU6dOrm3uWTJEpO9+uqrJtu2bVuGFcaJzpbHe/249957u5ft2bOnybyBN95tzpkzx2QLFixwj7N27VqTpfi1p5xvfewuIk8WNjZ7iMivQgjPVGRVQPXQW6SGziI1dBapobOIUskbtRDCXBE5soJrAaqO3iI1dBapobNIDZ1FrBjPDwAAAACRYaMGAAAAAJEpdzx/MrwhId6Jit5J4SL+ybhf/vKXTXbEEUeYzDtJskWLFpnWI+KfoOudtL9582aTeSe0e4M/RERmz55tsj/+8Y8me+SRR0y2aNEik6V8YnFT4p18/o1vfMNk3snnxT5f1q1bZ7Lp06ebzOtI9+7dTdaxY0f3OCNGjDDZtGnTTPbnP//ZZCmeVFwL++yzj8m+8pWvmGzy5MkmmzRpksm85wbvuSpG3nOy93zuDXI68cQT3ds89dRTTeZ9HkyYMMFkr7/+unubaJq8ITStW7c2mfc5ve+++5qs2ACw3/3udyabMmWKyfianz/ea2dv6My3vvUtk5122mnubXoDw7xhY97XaO/rifc1X0TkxhtvzHT97du3u9ePBe+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkmswwkawaMkzEOyHyvffeM9k777xjMu+k3ZUrV7rHXrp0qcm8kx+9IQzeCcPeicEiIs8++2zJx0YavJPPL7zwQpNdccUVJmvVqpXJli9f7h7nT3/6k8m8QQjeUIn+/ftnykRE+vTpY7JPfepTJvNONvZOXm5KvJPERfwTwL3BGG+++abJvME0e+65p8mKDR3wnlsac+iLd+z169ebbP/99zfZxz72Mfc227dvb7INGzaYzHvuZVhD01Tsc7VZM/t/7R06dDDZgAEDTOZ9Xs6ZM8c9zp133mmyNWvWuJdFurzhSYceeqjJ7rrrLpN5z3fe0DwRkQ8//NBkGzduNJnXUe81rTfMSURk7ty5Jrv//vtN5r32jmnYGO+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGSazNRHb4KLN2Gs2CQ4b3rMww8/bLK1a9eazJtcM3/+fJNt2bLFPbY3EWy//fYz2V577WWyFStWmMyb1ibirx3p8iaFeZOZfvCDH5isTZs2JvP6MWbMGPfYzzzzjMm8CZHe1Edv2t2BBx7oHqdXr14mGzx4sMl++tOfmqypT30sNpHr1FNPNZk3DcybMtulSxeTeY+7N/UrRt7XCG9irzeN8ZhjjnFv05uMuWrVKpO9//77mY6D/GvIBLoDDjjAZJ/5zGdM5nX7gQcecG9z1qxZZa0J8WndurXJPvvZz5rse9/7nsm85/nVq1ebbObMme6xFy1aZDLvNfFJJ51kskMOOcRk3mtsEX9CZNu2bU3mPf/G9FzLO2oAAAAAEBk2agAAAAAQGTZqAAAAABCZejdqqvqAqi5V1ek7ZZ1V9TlVnV34vVN1lwk0DL1FaugsUkNnkRo6i9RkGSYySkR+JiIP7ZRdLSLPhxBuVtWrC3+/qvLLqy7vZNqNGze6l50zZ47JvAEH3gma3kn73sn0xYYbeLl3gv7s2bNN5g0TaSJDFEZJTnub1T777GOyp556ymTeEBqvI+PGjTPZfffd5x57yZIlWZboDqnwhih4Jx+LiBx++OEm+5d/+ReTde3a1WTe51AjGyUV6uyug2S8E/979uzpXtc7Abt9+/Yma9euncm8ITTe81+xQQSpDijo3bu3ybweivhfd/785z+bzPs8iNAoaeLPs7XQrJn/f+rec/xNN91kMm/okjfoYfTo0e5xvAE6CRslTaizrVq1cvNzzz3XZN/+9rdN1rFjR5N5A+nuvvtuk02bNs09docOHUw2YMAAk3mDcbzrLliwwD3Oa6+9ZrKVK1eazHtOjkm976iFECaIyK737DQR+cdn9GgROb3C6wLKQm+RGjqL1NBZpIbOIjWlnqPWPYRQV/jzYhGxc5qB+NBbpIbOIjV0Fqmhs4hW2T9HLYQQVLXo96uo6kgRGVnucYBK2l1v6SxiRGeRGjqL1PCaFrEp9R21JaraQ0Sk8Lv9CbUFIYR7Qgj9Qwj9SzwWUCmZektnERE6i9TQWaSG17SIVqnvqI0XkfNE5ObC73ZCQQK8E9c3bNjgXtYbZuCdYDt48GCTffzjHzeZ99PVvZ/sLiIydepUk/397383mTccwRta0oTlore7KnaysDf84yMf+YjJvB6PGTPGZP/93/9tsqVL/a9n3sm53gnxzZs3N5k3uKfYScmf/OQnTdapkx3Y9dGPftRk77zzjskiPKm4pM7u+rH27pf3sRfxT7Zes2aNyerq6kzmPQcVe0717DoERSS+ASPeAJyBAweazBssJSKybt06kz3++OMmW79+fQmri0Iun2cbU7Hn+Msuu8xk3lCGzZs3m+zWW281mfe530TkorMtW7Y02QUXXOBe9tprrzWZNwzqT3/6k8muusrOWXn33XezLFFE/EF8Q4cONVnnzp1N5n0tmzJlinscb5iI9/Uotq8xu8oynv8REXldRA5R1QWqeoHsKPPJqjpbRIYU/g5Eg94iNXQWqaGzSA2dRWrqfUcthDC8yD+dVOG1ABVDb5EaOovU0Fmkhs4iNaWeowYAAAAAqBI2agAAAAAQmbLH8+dNsWEC3gngW7ZsMdmsWbNM9pWvfMVkZ511lsmKnTz+xz/+0WS33367yYr9dHbkx5577mmyG2+80b3sscceazKv3/fdd5/JvvWtb5nM+xxoyEm427Zty3Q57zjTp093L7tx40aTtWvXzmSHHnqoycaPH59pPalRVTMoxHvcvY+diH/yuDf8yLtNb2iB97iXe/K2N3Qk6+WKXdcbEuJlBx98sMmGDRtmMu9zVcQfwvKXv/zFZBEOtkENeJ0bMmSIe9lzzjnHZN7QpokTJ5rMG2BD59LhPb/0728HUX7zm990r+8NqJk0aZLJrrnmGpN5A7+85/RiA5VOPvlkkx199NEm8z4XvEF6P/zhD93jeK+JYx8c4uEdNQAAAACIDBs1AAAAAIgMGzUAAAAAiAwbNQAAAACIDMNEMsp6Mr53Urj309GPPPJIk3Xs2NE99gknnGCy+fPnm2zp0qUm805cT/FkyqbIG3rgDQi5+OKLM1/fG0xzxRVXmKzYYJtK8wZNeAMpvL6LiGzatMlknTp1MlmxwQ55FEKQrVu3mmxXS5Ysca+/evVqk/Xq1ctkQ4cONdkHH3xgssWLF5vMG8RUbJ3ecJiBAwearHv37ibr2bOnyYp1e+HChSZbsWKFyfr162eyHj16mGzXgS7/8NZbb5nMeyx4ns4/b/DHAQccYLIbbrjBvX7nzp1Ntnz5cpNdcsklJvOeOxEn72u593px5MiRJuvatat7m15PHn300UyXa9mypcm859/TTz/dPfaFF15oMu/rtvf15Pvf/77Jig0by8tzKO+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkGCZSYd6J6t6JwIceeqjJ9ttvP/c2vZNGzzzzTJMNGTLEZF/72tdMNnXqVPc43sAUNB5vAIY3OKTYoAxviMOll15qsloNDsnKOwG42H1s3bq1ybwhDmvXrs10nLwo53O5W7duJvM+/gcddJDJRowYYTLvY//++++7x+7SpYvJvGE3ffv2NVmrVq1M5j3GixYtco89bdo0k82bN89khx9+uMnatm1rsl0HuvzDsmXLTPbhhx+6l0V+eIND9tlnH5PdcsstJjviiCPc2/QGgtx4440mmzVrVpYlIlLe86/3PHT00UebrEWLFu5ten30nle9gTXekKZjjjnGZB/96EfdY3sDorzXIXfccYfJHnvsMZMVe67NC95RAwAAAIDIsFEDAAAAgMiwUQMAAACAyLBRAwAAAIDI1LtRU9UHVHWpqk7fKbteVReq6rTCr2HVXSaQHZ1FiugtUkNnkRo6i9Rkmfo4SkR+JiIP7ZL/OIRwW8VXlEOrVq0y2SWXXGKyrl27utf3JkR+/vOfN9mAAQNM9vjjj5vse9/7nnuc0aNHmyzRSZCjJAed9aYtHXfccSbbsmWLe/277rrLZHPnzi1/YVXmTaPq06ePe9n27dubzJsA9d5775W9rhoYJTXsbbFJmt27dzeZ1zFvuqY3uXbQoEEmK/a8csABB5jssMMOM5k3JfHtt9822VtvvWWyYp8D3tQxb5pj7969TeZ1dtu2be5xnn32WZNt3rzZvWwCRkkOnmsrzeuD93n1P//zPyY7+eSTTVasS88884zJ7r//fpMV+xrRRI2SiDurqibbYw/7Ur1Tp04m86YpFpuI6H3t9KaJt2zZ0mTFJknuynv+FPE/P959912TjRkzxmRNcUJuve+ohRAmiMjKGqwFqAg6ixTRW6SGziI1dBapKecctYtV9W+Ft5Ht1r5AVUeq6hRVnVLGsYBKoLNIUb29pbOIDJ1Fanh9gCiVulG7S0QOEJF+IlInIrcXu2AI4Z4QQv8QQv8SjwVUAp1FijL1ls4iInQWqeH1AaJV0kYthLAkhLAthLBdRO4VkYGVXRZQWXQWKaK3SA2dRWroLGKWZZiIoao9Qgh1hb+eISLTd3d5WO+//77J5s2b51522rRpJnv++edNNnLkSJNddNFFJvv617/uHufFF180WSJDGOoVe2e9k2sHDrRfK7wTiDdu3Oje5tSpU00W23AY7357Q3UuvfRS9/qtWrUymTcsYtKkSSaL7WPhqWZvvRPPRUT23ntvk3mDNlautKd5eMNE9t9/f5OtXr3aPXYIwWTe4+kNPnrttddMtmjRIpMVG8zgnbC/7777mmzEiBEm8wYAeB8zEZGFCxdmXlOKYn+urTRvqI43COr888832SmnnJLp9rznLxGRq666ymQbNmxwL4viYuqs9xzoDQSpq6sz2eTJk032sY99zD2O9/zfunVrk3nPTXPmzDHZ0qVLTea9hhER6dChg8m8599Nmza5129q6t2oqeojInKiiHRV1QUi8t8icqKq9hORICLviYjdDQCNhM4iRfQWqaGzSA2dRWrq3aiFEIY7sZ3/CkSCziJF9BapobNIDZ1FasqZ+ggAAAAAqAI2agAAAAAQmZKGiaB83smh3kmkIv7QgxUrVphsyhT7Yz0uvvhikxX7qfLeIID58+dnWg/K4w3VOP74403mnXA7e/Zs9za9k42LdawWvPvoDUe5/377XSj9+vVzb9Mb2HDnnXeabNmyZVmW2KQU+5j86le/Mpn33NC2bVuTbdmyxWQzZ8402Zo1a9xjeyfDe0OWFi9ebDLvpPeG9N1buzeMZPny5Zluz3uOFxFZt25d5jUhft5ghKOPPtpkZ599tsnatGljslWrVpnsRz/6kXtsb9gXX5/zx3tu+utf/2qyb33rWybznrtF/K+93nOW93XCew70XpvcfPPN7rE/+9nPmswbbsJgnB14Rw0AAAAAIsNGDQAAAAAiw0YNAAAAACLDRg0AAAAAIsMwkYioqps3b97cZAcccIDJRo4caTLvZOViJ7N7J+g35vCJpqRVq1Ym69+/v8m8gQnvv/++e5ve41zpx7NYZ1u2bGmy3r17m+zWW2812dChQ01WbDDDQw89ZLLHH3888/WbMq9LIiLPPfecybwBBV6XvMd48+bNJlu5cqV77CVLlpjMW2etBiZ4a586darJPvWpT5nMG/gk4g/A4Xk2fsWe67yhDL169cp0uY0bN5rslVdeMdkbb7zhHrvY5zDyxXt+8LrjDV7yMhF/uJeXZX1u8obUFRt05r2m9V4DeetpivgoAAAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkaj5MZNeTCBtyUnieTrj2foq7dzKliMjHPvYxk1177bUmGzRokMm8j++bb77pHsc7OdU7gTpPj0MsunTpkinbtGmTyebPn+/epvfYeUM+tmzZYjLvMfZOFu7evbt77BEjRpjsggsuMFmPHj0yHfvpp592j/Od73zHZB988EGm24Rv2bJlJvM+fl7mDdDwelirYSDl8tbpneDuXW7t2rXubRYb5oS4FRsmctRRR5nsM5/5TKbre8/dzzzzjMlWr17tHpvnNeysIX3wBtGUM5zG6/ehhx7qXjbrcyj93oF31AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMvVu1FS1p6q+qKozVPUtVb2skHdW1edUdXbh907VXy5QPzqL1NBZpIjeIjV0FqnJMvVxq4hcHkKYqqp7icgbqvqciJwvIs+HEG5W1atF5GoRuWp3N9SsWTMzdW7z5s3u5TzeBJisk2JqNT3GW/s+++xjsgEDBpjsyiuvdG/Tm/rYunVrk3kfiwkTJpjspptuco+zePHiTLeZgIp1thq86Uht27Y1mfcYe5MXvaljIiKf//znTTZx4kSTrVq1ymT77befyYYNG2ayoUOHusfu1q2bybz7vXLlSpPdeOONJrvvvvvc43z44YdunqBoOlvO53xjPvfWive50ZDpuHvuuWfF19SIoulttXXs2NHNv/zlL5vs6KOPNpk3sXfhwoUme/vtt022devWLEtENk2ms7XkPQfuvffe7mW958b169ebLGfPlSWr9x21EEJdCGFq4c9rRWSmiOwrIqeJyOjCxUaLyOnVWiTQEHQWqaGzSBG9RWroLFLToJ+jpqq9ReQoEZkkIt1DCHWFf1osIu4PVFLVkSIysvDnUtcJlKTczgK1RmeRoob2ls6isfFcixRkHiaiqu1EZJyIfD2EsGbnfws73sd0v88jhHBPCKF/CKE/GzXUUiU6W4NlAv8fnUWKSuktnUVj4rkWqci0UVPVPWVHoceGEJ4oxEtUtUfh33uIyNLqLBFoODqL1NBZpIjeIjV0Fimp91sfdcfbYPeLyMwQwh07/dN4ETlPRG4u/P5Ufbe1fft2c/K/d+J6sWEie+21l8l2HU4i4g9m2GMPe1c3btxoslatWrnH9m7z2GOPNdnHP/5xk3mDQ7wBI8VOVvYGrniDP2677TaTjR492mSrV692j5OXk/4r2dlaWb58ucnmzp1rsv33399kRx55pHubBx54oMkuuugik3mPu9f3Nm3amKzYu+Rex15++WWTXXfddSabM2eOybZt2+YeJy9S7GxT4H1ueM/HXj+bN2/u3maevrMkr731XoMUe5497LDDTOY99mvWrDHZW2+9ZbLZs2ebjGEilZPXzjY277X8unXr3Mt6z6t1dXUmS3SYXcVlOUftEyIyQkTeVNVphewa2VHmx1T1AhGZJyJnVWeJQIPRWaSGziJF9BapobNISr0btRDCqyJS7L8AT6rscoDy0Vmkhs4iRfQWqaGzSE3mYSIAAAAAgNpgowYAAAAAkWnQz1GrhF1PDvROKvQGhIj4gzqOP/54k3nDO3r16mUy7+TeYgM9OnfubDJvuIJ3ovmmTZtMtnbtWpNNnDjRPfaNN95osqlTp2Y6Tl4GhOSJ95isWrXKZA899JDJjj76aJN5gz9ERDp06GAyb5CBl3knr3snBr/yyivuse+9916TvfTSSybzBvoAMfOG/DRkKJbH+xzkubvxeMNAhg0b5l62U6dOJsv62E2YMMFkK1euNBlDFRA77/muffv27mW9Pu86aFDEHwLYFPGOGgAAAABEho0aAAAAAESGjRoAAAAARIaNGgAAAABEpuZn6mU5ybbYSdg9e/Y02amnnmqyvffe22Tt2rXLdLliJy96JxfPnz/fZC+88ILJfvOb35jMGxziDTcR4aTypsAbQvP73//eZN5Aj09/+tPubfbp08dk3uCRJUuWmOzXv/61yV599VWTLVu2zD32tm3bTEaPkRqvs2+//bbJvJPju3Xr5t5m9+7dTbZ48eISVodaWrhwoZt7X7e9wQgzZ87MlHlfC4DYea+dV69e7V52w4YNJvNe23iv273Ba3kftsM7agAAAAAQGTZqAAAAABAZNmoAAAAAEBk2agAAAAAQmZoPE1HVf/q7d7K2d1KhiMjTTz9tsjlz5pisTZs2JuvVq5fJ5s2bZ7KtW7e6x37zzTdNtnTpUpN5JxEzRAGlWLt2rcm8ASP/+7//615/1881Eb+LWTMAIosWLTLZihUrTFbs61iLFi1MlvVzFbXhDUMaM2aMe9kpU6aYzHsd8fe//91ky5cvN1neByMgn7xhIt7gJRGRQw45xGS9e/fOlHmDl/I+gId31AAAAAAgMmzUAAAAACAybNQAAAAAIDL1btRUtaeqvqiqM1T1LVW9rJBfr6oLVXVa4dew6i8XqB+dRWroLFJDZ5EieovUZBkmslVELg8hTFXVvUTkDVV9rvBvPw4h3NaQA2Y5Qdo7kVfEH/7hZWjyKtrZmDD4I7dy29m8effdd0127733mmzlypXu9d9//32TJfo5nNvOegM9vIExIiKvvvpqtZeDysptbxvT+vXrTfbLX/7SvWzLli1Ntueee5rM2wt4l9uyZYt7nESfV416N2ohhDoRqSv8ea2qzhSRfau9MKBUdBapobNIDZ1FiugtUtOgc9RUtbeIHCUikwrRxar6N1V9QFU7VXhtQNnoLFJDZ5EaOosU0VukIPNGTVXbicg4Efl6CGGNiNwlIgeISD/Z8b8Ttxe53khVnaKq9oeNAFVEZ5EaOovU0FmkiN4iFZk2aqq6p+wo9NgQwhMiIiGEJSGEbSGE7SJyr4gM9K4bQrgnhNA/hNC/UosG6kNnkRo6i9TQWaSI3iIlWaY+qojcLyIzQwh37JT32OliZ4jI9MovD2g4OovU0Fmkhs4iRfQWqdH6pqKo6iAReUVE3hSRf4xCukZEhsuOt4iDiLwnIhcVTtLc3W3lYwQLGk0IQeu7DJ1FTOhsvjRv3txkLVq0MNmO14PWxo0bTRbhdLI36nvHgM4iMvV2VoTe1pL3XCki0q1bN5O1a9fOZGvWrDGZN01369atJawuDpleH9TyCwSlRrmylLqS6CzKRWfzhY1a5dFZVEBNOytCb+vDRq1+WV4fNGjqIwAAAACg+tioAQAAAEBk2KgBAAAAQGT2aOwFAACQiu3bt5ts06ZNJit2jlps56M1a2b/v9a7jwDQEMWeR1asWGEy73y0rM+V3nPY7o6fGt5RAwAAAIDIsFEDAAAAgMiwUQMAAACAyLBRAwAAAIDI1HqYyHIRmVf4c9fC3/MgT/dFJN7706sRjkln0xDr/aGzlRPFfcl6gnuGy0Vxf4qccF/r3ua1syL5uj8x35fGfK6N+eNSiorcn2LPgVu2bMmUVUjMj02mzmpjTaBS1Sm1/iny1ZKn+yKSv/tTKXn6uOTpvojk7/5USp4+Lnm6LyL5uz+VkrePS57uT57uSyXl7eOSp/uTh/vCtz4CAAAAQGTYqAEAAABAZBpzo3ZPIx670vJ0X0Tyd38qJU8flzzdF5H83Z9KydPHJU/3RSR/96dS8vZxydP9ydN9qaS8fVzydH+Svy+Ndo4aAAAAAMDHtz4CAAAAQGRqvlFT1VNUdZaqvquqV9f6+OVS1d8J7k4AAALeSURBVAdUdamqTt8p66yqz6nq7MLvnRpzjVmpak9VfVFVZ6jqW6p6WSFP8v5UC52NB53Nhs7Gg85ml3Jv89RZEXqbVcqdFclXb/Pa2Zpu1FS1uYj8XEQ+JSKHichwVT2slmuogFEicsou2dUi8nwI4SAReb7w9xRsFZHLQwiHichxIvK1wuOR6v2pODobHTpbDzobHTqbQQ56O0ry01kReluvHHRWJF+9zWVna/2O2kAReTeEMDeEsFlEHhWR02q8hrKEECaIyMpd4tNEZHThz6NF5PSaLqpEIYS6EMLUwp/XishMEdlXEr0/VUJnI0JnM6GzEaGzmSXd2zx1VoTeZpR0Z0Xy1du8drbWG7V9ReT9nf6+oJClrnsIoa7w58Ui0r0xF1MKVe0tIkeJyCTJwf2pIDobKTpbFJ2NFJ3drTz2NhePMb0tKo+dFcnBY5ynzjJMpMLCjjGaSY3SVNV2IjJORL4eQliz87+leH/QMCk+xnS2aUvxMaazTVuqjzG9bdpSfIzz1tlab9QWikjPnf6+XyFL3RJV7SEiUvh9aSOvJzNV3VN2FHpsCOGJQpzs/akCOhsZOlsvOhsZOptJHnub9GNMb+uVx86KJPwY57Gztd6oTRaRg1S1j6q2EJGzRWR8jddQDeNF5LzCn88TkacacS2ZqaqKyP0iMjOEcMdO/5Tk/akSOhsROpsJnY0Inc0sj71N9jGmt5nksbMiiT7Gue1sCKGmv0RkmIi8IyJzROTaWh+/Aut/RETqRGSL7Ph+5AtEpIvsmCQzW0T+KCKdG3udGe/LINnxFvDfRGRa4dewVO9PFT9OdDaSX3Q288eJzkbyi8426GOVbG/z1NnC/aG32T5OyXa2sP7c9DavndXCnQMAAAAARIJhIgAAAAAQGTZqAAAAABAZNmoAAAAAEBk2agAAAAAQGTZqAAAAABAZNmoAAAAAEBk2agAAAAAQGTZqAAAAABCZ/wdbflFs14JW3AAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# Browse examples\n", + "fig, axes = plt.subplots(2, 5)\n", + "\n", + "for index in range(5):\n", + " sample = get_sample(index)\n", + " decoded = model(sample[None])['decoded'][0].detach()\n", + " show_image(axes[0, index], sample)\n", + " show_image(axes[1, index], decoded)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAACzCAYAAAD48u9xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3XuwlWXd//HPJSc5Cew4CshBxcQTFCokQyY9xg91MEuLsrHRkcfxybGmmXSsqWdspnHGMqesx8wUdUoztTTU0pCf+nMQObQFgQwUkJNyFjbHDVy/P1jNQ3y/y32z19p7X9fi/ZphgA9rrfu+1/qstdfN2td3hxijAAAAAADpOK6tdwAAAAAA8O84UQMAAACAxHCiBgAAAACJ4UQNAAAAABLDiRoAAAAAJIYTNQAAAABIDCdqAAAAAJAYTtQAAAAAIDEVnaiFECaFEN4OISwPIdxarZ0CWgqdRY7oLXJDZ5EbOosUhRhj864YQjtJ/5T0H5LWSJoraWqMcclHXKd5GwNKYoyhudels2gLlXRWOvre0llUwaYYY5/mXpnOog20amdL16G3VRKC/TJ53HH2s6SDBw+arLnnMSko8v6gfQW3f56k5THGdyUphPCYpCmSypYaaGN0Fjmit2htqyq8Pp09gvdG1OO9OS33RvRoLlvkcuWum8kbYTqbCe+50L69PR3p0qWLyfbu3Vsok7LpbZMq+dbHgZJWH/b3NaUMSBWdRY7oLXJDZ5EbOoskVfKJWiEhhGmSprX0doBqobPIDZ1FbugsckRv0doqOVFbK2nwYX8fVMr+TYzxPkn3SXw/L9ocnUWOmuwtnUVi6Cxyw/sDJKmSE7W5kk4NIQzToTJ/WdJXqrJXQMugs8gRvUVu6OwRiq6X8YYleGvRJKlz584m89b67Nmzx2Teup5aWdPTTHS2lXg989at7du3z2SNjY2Fbq+WNPtELca4P4TwDUl/ldRO0gMxxsVV2zOgyugsckRvkRs6i9zQWaSq2eP5m7UxPiZGhSoddX606CwqRWeRofkxxjGttTE6+7+KjimX/Kl4lXyi5n2al5FW7axEb6upY8eOJmvXrp3JvE/ZDhw40CL71BqKvD+o6AdeAwAAAACqjxM1AAAAAEhMi4/nR9vq2rWrybyPmCVp69atLb07AABkp+gPrC6nkmUmvXr1cvPJkyebzPs2sFmzZplsw4YNJsv8Wx+RCe+55HXvaH4oey3jEzUAAAAASAwnagAAAACQGE7UAAAAACAxnKgBAAAAQGI4UQMAAACAxDD1sYbU1dWZ7Hvf+57JVq5c6V7/F7/4hcly/kGCAAB8lEqnOXoqmUznXbdbt27uZa+55hqTrVu3zmSLFi0ymTf1EWgN3g9wL/o89KaWNzY2upetlfevfKIGAAAAAInhRA0AAAAAEsOJGgAAAAAkhhM1AAAAAEgMw0Qy1b69fej+9Kc/meyTn/ykyZYsWeLe5v3332+yXbt2NWPvkDtvsa+XdenSxb3+/v37TbZ3716T1cpiX+TPW8xeyVAI5KstH3evh0OGDHEvO3To0ELXL7odoDUcPHjQZN77iz59+phs0KBBJnv77bfd7Xz44YfN2Lv08IkaAAAAACSGEzUAAAAASAwnagAAAACQmIrWqIUQVkraIemApP0xxjHV2CmgJdFb5IbOIjd0Frmhs0hRNYaJfCbGuKkKt4OjMGLECJONGWNfU9q1a2eyZ555xr3Nffv2Vb5j+Ui+t95ib2+IjPcYS1LXrl1Nduqpp5rsqquuMtnYsWNN5i1c79mzp7vtPXv2mOydd94x2e23326yZ5991mTe4uNjUPKdLcpbOC5JHTt2NFmHDh1M5vXOG7hw5plnmuy0005zt93Q0GCy9957z2Rz5swxmbdofceOHYW34w3V8QZaFM0SknxnU7v/vL5fcskl7mVPOOEEkzU2NprMe/1M7bgTknxnc+d1z3tvc/XVV5ts+PDhJrvjjjvc7TBMBAAAAADQIio9UYuSXgghzA8hTKvGDgGtgN4iN3QWuaGzyA2dRXIq/dbH8THGtSGEvpJeDCH8I8b4yuEXKJWdwiMlH9lbOosE0Vnkhs4iN7ynRXIq+kQtxri29PsGSX+UdJ5zmftijGNYlIlUNNVbOovU0Fnkhs4iN7ynRYqa/YlaCKGrpONijDtKf75Ykp0MgBZxzz33mKxz584m27Jli8kef/xx9zb3799f+Y4lLtXeeoNDjj/+eJMNHDjQZOeff757m5MmTTLZxIkTTdajRw+TeQt7vUXu3n5LfhdHjx5tsh/96Ecme/nll022fft2dzvHglQ76/H64A0IOeOMM9zre0MTvN506tTJZH369DFZr169TOY9ryS/896AJe95sHPnzkKZJNXX15ts165dJps5c6bJZs2aZTJvwbw3nKQ15dTZtuQNgvIGPk2dOtW9vtflDz74wGRbt241GQOa/l1unS33tfdIuQyN8QZEecNEvNfpcsNEakUl3/rYT9IfS2VpL+l3Mca/VGWvgJZDb5EbOovc0Fnkhs4iSc0+UYsxvivpnCruC9Di6C1yQ2eRGzqL3NBZpIrx/AAAAACQGE7UAAAAACAxlY7nRyvwFgxfcMEFJvMWjf7tb38z2cqVK6uyX6geb1H5gAEDTPaDH/zAZN6AEEnq3r27ybwhDHv37jWZN4TG61fXrl3dbXvb8bKhQ4eabPDgwSZbvHixux20HW8xuzdE5uKLLzbZd77zHfc2+/XrZ7INGzaYbN68eSabPXu2yd5//32TeYvWJX+ozoknnmgyr/N9+/Y12SmnnOJu59JLLy10m1dccYXJHnjgAZPdddddJtu8ebO77VwGC9QibwiN91o3bZqd/F6us97jOXfuXJN5fWCYSD6819rjjiv2OUtbDxYqatiwYSYbMmSIyf7xj3+YzBvGVEv4RA0AAAAAEsOJGgAAAAAkhhM1AAAAAEgMJ2oAAAAAkBiGiSSk3E+af/PNN03WsWNHk+3YscNkt912m8m84RFoW95C80984hMmmzBhgsnq6urc2/QWi3vDFZ5//nmTvf766yZbv369yXr16uVu++tf/7rJLrroIpN5PT799NNNxjCR9HhDjsaNG2eyW265xWTeoBzJH37029/+1mReH/bs2WMyb5F5uUX4Xu710xuY0q1bN5ONHz/e3Y43LOK8884zWZ8+fUx24YUXmuzuu+92t4PW4X3dbt/evrXyhiVMmjTJZGPHjjWZ9/VB8oc+vfDCCybznhvImzeAzOvd/v373esXHTLivY+odCiR97XjS1/6ksm8IUu7d+82WWNjY0X7kzo+UQMAAACAxHCiBgAAAACJ4UQNAAAAABLDiRoAAAAAJIYTNQAAAABIDFMf24g3KepnP/uZe9lTTz3VZN7UHW862qpVq5qxd2ht3mQlbyrT0Uw88qY0/vKXvzTZk08+abLt27ebzJsc5k1lkqTRo0ebbOLEiSbzplR50+7KTUStdPoUivEmjPXr189kl19+ucn69u1rsnnz5rnbufPOO022YsUKk3mdb4kueBNyd+7caTJv+t78+fPd2/TuS6/f3mvC7NmzTbZt2zaT8bxoPd5rWP/+/U125ZVXmuzLX/6yyU466SSTlZtUunr1apN5zxfkzXvN8CbNeq+1I0aMcG9z3bp1JmtoaCh0OW+abtHpp5I/7fSrX/2qybzj9jpfbrJlreATNQAAAABIDCdqAAAAAJAYTtQAAAAAIDFNnqiFEB4IIWwIIbx1WFYXQngxhLCs9Huvlt1N4OjQW+SGziI3dBa5obPITZFhItMl3SPp4cOyWyXNjDHeEUK4tfT3W6q/e7Xr9NNPN9kNN9zgXtZbpLljxw6T/eQnPzFZrS+y/AjTlVFvvcEB3qLw+vp6k5VbaD537lyTvfHGGybzhhF4QxS84SblhnwMGDCg0H56t7l582aTHSPDEaYr0c56j503TKRnz54mW7JkickeeeQRdztr1641mfca1lp98LbjPVe9RfOTJ092b/OMM84wmbdo3lvE/+CDD5qs3DChVjJdiXa2tXjPjSFDhpjsC1/4gsm8QWEdOnQwmTfARpLuvfdek7VxH3IwXQl31uuTN7Rr3LhxJpsyZYrJzjzzTHc73rAx77X6j3/8o8lWrlxpsu7du5tswoQJ7ra//e1vm6yurs697JEGDRpUaNuSPxwlx/cSTX6iFmN8RdKRI62mSHqo9OeHJNlRX0AborfIDZ1FbugsckNnkZvmrlHrF2P81+n4+5Lsf60C6aG3yA2dRW7oLHJDZ5Gsin+OWowxhhDKfpYYQpgmaVql2wGq6aN6S2eRIjqL3NBZ5Ib3tEhNcz9R+yCEMECSSr9vKHfBGON9McYxMcYxzdwWUC2FektnkRA6i9zQWeSG97RIVnM/UXtG0jWS7ij9/nTV9qgGeQMXbr/9dpOV+ynu3uLHv/zlLybzFnji3yTbW29gwrJly0z26KOPmqzc4nFvMMPgwYNN9sEHH5hsy5Yjv4Xf7+HFF1/sbvvTn/60ybznwe7du022fPly9zaPUUl01nvsvQEHr7/+eqHbe/PNN9183759R7djbcDr8dChQ0120003udfv1q2byfbs2WOyH//4xybznhsJLo5PorPVVm5oU8eOHU3mDYzxBod06tTJZN7rebnhO96ghwT7UHXec7DC4066s96Amf79+5vsM5/5jMn69u3r3qb3muW9B503b57JvNfpgQMHmuzCCy90t+0NR/EG5HnPrZNOOslk3nFL0rPPPmuyDz/80GSpP2eKjOd/VNJsSaeFENaEEK7ToTL/RwhhmaTPlv4OJIPeIjd0Frmhs8gNnUVumvxELcY4tcw/TazyvgBVQ2+RGzqL3NBZ5IbOIjfNXaMGAAAAAGghnKgBAAAAQGIqHs+Ppnk/cX3UqFEmK7eg0Vtoftddd5nMG0iBPBQd1jB79myTlVvk7i2wnTjRfneHt6jYGzDiLWi+/vrr3W2XW8B8pG3btpnMG2SCtuX101uU7Q3A8YYjlHuMDxw4UGjbbalHjx4m++lPf2qyQYMGudf3jufll1822WOPPWayvXv3FtlFtIByPWzXrp3JTj/9dJN5g0O8oQzeAIQ777zT3fb27dvdPEfegBDJv99bYJhI0ryhW977A++9gDeQQ/JfS7w+NTQ0mMzrrfe1fMaMGe62va8J559/vsl69+5tshNOOMFkY8eOdbfz9ttvm2zhwoUmS32IFZ+oAQAAAEBiOFEDAAAAgMRwogYAAAAAieFEDQAAAAASwzCRKvMWbk6ePNlk3qJPb2iI5P9k+AULFjRj75C7jRs3mmzmzJnuZdu3t09vb5jIhAkTTOb101vEe8opp7jb9hZ2e4MivAXI3sJppMdb0O8NCTl48KDJyg0+8i7blrwBEDfccIPJLr744sK36Q1cueWWW0y2YcMGk9XywITUlRt24Q1j+vznP28yr/Ovvvqqyb71rW+ZbM2aNe62U3u+FOW9T/K+Xknl3xfVKu857g3+WLRokcm8AU/lBntt3rzZZK+99prJ3nnnHZN5Q0c2bdpkstWrVxfetjf447LLLjOZN6hnwIAB7nbOOOMMk61YscJk3tetlF5r+UQNAAAAABLDiRoAAAAAJIYTNQAAAABIDCdqAAAAAJAYholUoF27dibr37+/ya677jqT9evXz2TlFi/edtttJkv9J6mjZXgdaWhocC87e/Zsk3kDQUaNGmWywYMHm8xb7O0NW5D8fnqL6b0BI507dzZZuYX8KS34rWXe/ew9Jt5Cb28QgvfaWe42i267qHLX7dKli8luvPFGk33/+983mXc85QZAXHnllSZbvHixybznBlqHN0xp5MiR7mV//vOfm8z7+r5y5UqT3X333SZ7//33TZbi0BDveeRl3tcI73XCG54jSevXr2/G3uWr6CAu7+v+qlWrTFZu0MZbb71lsiVLlpjMG1DiDfzyHvtyQ6O8wSPekCVvH7332CeeeKK7nY9//OMmmzNnjsm2bt1qspTeW/CJGgAAAAAkhhM1AAAAAEgMJ2oAAAAAkBhO1AAAAAAgMU2eqIUQHgghbAghvHVY9t8hhLUhhPrSr8ktu5tAcXQWOaK3yA2dRW7oLHJTZOrjdEn3SHr4iPynMcYfV32PEuVNtOnbt6/JbrjhBpOdf/75JvMmIf397393t+1N78NHmq5jqLPlJoJt3rzZZN50uS1btphs+PDhJuvQoUPhbe/atctkxx9/vMm6d+9usquvvtpkv/rVr9ztbNy40c0zNV2J9tabgOX1xptENnDgwELXlfxJiXv27Cm0P95rtDeNsWfPnu62r7jiCpN997vfNZn32u1N6psyZYq7nUWLFpkspQljR2m6Eu1sJbyOeNOXJemss84ymdfZp556ymQLFy40mfeaWm5Kqtcb7/qV9svbfteuXU3mTcv0vpaceeaZJps3b5677RaY+jhdmXXWe/y8yYkrVqwwmTdhU/LfC3iTNxsbGwvtj/f+oE+fPu62P/vZz5psyJAhJjvppJNM5r1n6NWrl7sd7z4q2tuUJq02+YlajPEVSf5XVSBBdBY5orfIDZ1FbugsclPJGrVvhBAWlj5G9k9nJYUQpoUQ5oUQ/P8uAVoPnUWOmuwtnUVi6Cxyw/sDJKm5J2r/I+lkSaMkrZf0k3IXjDHeF2McE2Mc08xtAdVAZ5GjQr2ls0gInUVueH+AZDXrRC3G+EGM8UCM8aCkX0s6r7q7BVQXnUWO6C1yQ2eRGzqLlBUZJmKEEAbEGP+1wvPzkt76qMvXgv79+5vMW1zsDULwhih4nnjiCTc/cOBAoeujvGOxs/v37zfZtm3bTLZ8+XKTnXLKKSbzFth7C40laceOHSbzFiD369fPZDfddJPJRowY4W7nlltuMZk32CHXYQ2p9Na7/7zXpY4dO5rs3HPPNdnZZ5/tbsdbDO8NjPG67XXO25/zzvPfg02dOtVknTt3Npk3CMXrbH19vbudXLtYVCqdLcobJOANuxg1apR7fW+4jNdF73LDhg0zmfc66w0nkfznwbp160zmPVe9HpYbWuLt02mnnWayoUOHmmzs2LEm845nyZIl7rY91X4O5dZZSdq7d6/Jli5dajJvmIvkD+XwXhu94Rvetk888USTXXbZZe62P/e5z5nM66g3oORo3od4Q82890ApDQ7xNHmiFkJ4VNKFknqHENZI+oGkC0MIoyRFSSsl/WcL7iNwVOgsckRvkRs6i9zQWeSmyRO1GKP9b0bpNy2wL0BV0FnkiN4iN3QWuaGzyE0lUx8BAAAAAC2AEzUAAAAASEyzhonUMm9hseQvvPQWyXqXK2rePH4sB6rHWyDb0NBgslWrVpnMG+rgDVHYsGGDu+333nvPZN5C8ylTppisR48eJps0aZK7nddee81kDz30kMnKLcZH83kLvT3eIJhu3bq5l/WG2HjDbrxhDatXrzbZyJEjTTZu3Dh3294idW/R/NNPP22yP//5zyZLfYE6DvG+5nuDQ7x+SP7j7PXbe6275JJLTOYN9Ni8ebO77VdeecVkzz//vMm81/O+ffuazBtuIvkDgbzrn3zyySZ79913Tfa73/3OZMuWLXO3zfPI590vL7zwgsnWr19vMsl/rLzHecKECSbzhix5Xw+8oTyS/1zyhomEEApd7q23/Nkvjz32mMk2bdrkXjZlfKIGAAAAAInhRA0AAAAAEsOJGgAAAAAkhhM1AAAAAEgMw0SOUG7hqrcA8fjjj2/2dvbt22eyjRs3Nvv2gCN5i9K9Re7eAJyFCxea7A9/+IPJvKEhktTY2GiyTp06mcxbaH7ttdeabMCAAe52Jk6caLKnnnrKZAwTqYy3qNvjLTIfPny4ybwuSP5wmq1bt5ps9+7dJvOG0Hj7XW4Iivfa7217xowZJvP6jjyUGyB2JK8LklRXV2cyr9/e8Aavc97+eM8hyR+Wc8UVV5gsxmiy3r17m8x7XyJJa9euNdmCBQtM9vDDD5tszpw5JvMGh+zatcvdtrfv8LvjfT1es2aNe33vtdob5jR1qv2xc96AqI4dO5rMew9STteuXU3mvX6vW7fOZPfcc497m6+++qrJvEFUqQ+s4RM1AAAAAEgMJ2oAAAAAkBhO1AAAAAAgMZyoAQAAAEBiGCZSkLfQddu2bSbzFpXv37/fZL///e9NtmLFimbuHWB5i3vHjRtnsnPOOcdkb7zxhsm8QQ/ec0DyO+8tLH7yySdNNnToUJN98YtfdLdz1llnmWz06NEme+mllwrtI4rzFvl7j7E3WKHccBLv9XP79u0m27lzp8m8gTFHs23veLzOe4MQkC/vdeC5554zWZ8+fdzrX3/99Sbr169foW17g0MOHDhgsr1797rX37Jli8m8QTve88UbjjJ//nx3O0888YTJ6uvrTeY9V73nlXeMDA2pXNH7WpIaGhpM5g0RW716tcm8oSPewJvLL7/c3fb48eNN5g3l8Xr717/+1WQvvviiux3v/UmOPeMTNQAAAABIDCdqAAAAAJAYTtQAAAAAIDFNnqiFEAaHEGaFEJaEEBaHEG4u5XUhhBdDCMtKv/dq+d0FmkZnkRs6ixzRW+SGziI3RT5R2y/p2zHGkZLGSvqvEMJISbdKmhljPFXSzNLfgRTQWeSGziJH9Ba5obPISpNTH2OM6yWtL/15RwhhqaSBkqZIurB0sYck/V9Jt7TIXiZg3759Jps1a5bJvIl1Hm+KkjfxDEfvWOtsuSl23bt3N9lFF11ksrPPPttk3gQ9b7KSNxVP8qeZebwJTOvWrTNZualnXbp0Mdm5555rstmzZ5tsx44dRXaxVeTYWW9a3tq1a022adMmk/Xo0cO9Ta+z3muvN4XXu81BgwaZ7GMf+5i7be955E08Y1ro/8qxt0c6ePCgyZYvX26yH/7wh+71vcm1N998s8nGjh1rMu81aMaMGSYr93q6YMECk3nPQW9Sqfd+w3uuSf59lOP0PKk2OttSvMd58+bNJvOmjXpTRL2pzJL0qU99qtC2vfcX3tRHb4KllG9Hj3RUa9RCCEMljZY0R1K/UuEl6X1JxebRAq2IziI3dBY5orfIDZ1FDgr/HLUQQjdJT0r6Zoxx++H/+xhjjCEE99Q1hDBN0rRKdxQ4WnQWuaGzyFFzektn0ZZ4rUUuCn2iFkLooEOF/m2M8alS/EEIYUDp3wdIcr8HKsZ4X4xxTIxxTDV2GCiCziI3dBY5am5v6SzaCq+1yEmRqY9B0m8kLY0x3nXYPz0j6ZrSn6+R9HT1dw84enQWuaGzyBG9RW7oLHJT5FsfL5D0NUmLQgj1pew2SXdIejyEcJ2kVZKuapldTIO30PHxxx832TnnnGOyrl27msxbpF5uKASO2jHV2XILZr3cG8JwwgknmGzChAkmW7p0qcm8oSPlcm9B/KhRo0x26aWXmsx7Dkn+4ndvwEiHDh1M5j3f2nDxcXad9R7j119/3WSPPPKIybzXSUmqq6szWd++fQtdbvjw4SYbOXKkybyBJZK0cuVKk3kL172F9Mfwa3d2vW2ucoM26uvrTXbttdearF27diZriSEdtTJAoQUdM51tKV7HvPe0b775pnv9K6+80mTe15OFCxeabPHixSar9QFPRaY+/j9J5b4KTazu7gCVo7PIDZ1FjugtckNnkZujmvoIAAAAAGh5nKgBAAAAQGI4UQMAAACAxBT+OWqw1q5da7L33nvPZN7CS29hsZcBzeUNDvE66/Wzd+/eJrvxxhtNdtFFF7nb9rrsLfg9+eSTTdanTx+TlRvWsHHjRpM1NDSYjAX2lfHuPy/btm2byX7961+b7Oyzz3a387Wvfc1k3mCbnj17mswbOtKpU6dC+yhJr732msmWLFlist27d7vXB/7Fe27U+sADHNu8YSAvvPCCe9lJkyaZbMSIESabO3euybzX31r/+s4nagAAAACQGE7UAAAAACAxnKgBAAAAQGI4UQMAAACAxDBMpAIffvihyR588EGT7du3z2T//Oc/TdbY2FidHQPkD9W4//77TTZ+/PhCWV1dncnGjRtXeH8OHjxosvbt7UuQt+jee65J/mLjBQsWmGzv3r1FdhEtwBtqM3/+fPey3kLxq666ymQXXHCBybwF5Zs2bTLZSy+95G77ueeeM9ny5ctNduDAAZMdd5z9P0+v7+X2EwByVnS4lCTde++9JvOGRtXX15vMe19T6/hEDQAAAAASw4kaAAAAACSGEzUAAAAASAwnagAAAACQmNCaC5tDCDW/ijqEYDIWj1dPjNHewS2o1jrr9bNPnz4m+8pXvmKySy65pNDtSVL37t1N5g0E2bp1q8kWL15ssqVLl7rb8fIVK1aYbM+ePSbzhkK0BDpbnDeUo3PnziYbNmyYyQYPHmyynTt3mswbECL5C9+9AU/e67k3OKTc634mXw/mxxjHtNbGcu4sktGqnZXobXN16NDBZN26dTOZN1zKGwyWyWuqq8j7Az5RAwAAAIDEcKIGAAAAAInhRA0AAAAAEtPkiVoIYXAIYVYIYUkIYXEI4eZS/t8hhLUhhPrSr8ktv7tA0+gsckNnkRs6ixzRW+SmyWEiIYQBkgbEGBeEELpLmi/pcklXSWqIMf648MZYeIkKFVl4SWdbRrt27UxW7vWjkqE63uVqfbEwnW0ZDHdqtiYHM9BZJKbQMBF6i5QUeX/QvsCNrJe0vvTnHSGEpZIGVr57QMugs8gNnUVu6CxyRG+Rm6NaoxZCGCpptKQ5pegbIYSFIYQHQgi9qrxvQMXoLHJDZ5EbOosc0VvkoPCJWgihm6QnJX0zxrhd0v9IOlnSKB3634mflLnetBDCvBDCvCrsL1AYnUVu6CxyQ2eRI3qLXBT6gdchhA6SZkj6a4zxLuffh0qaEWM8s4nb4ft5UZGiPzyYzlYfa9Sah862HdaoNVvR9T50Fqko/AOv6S1SUZUfeB0OfaX7jaSlhxe6tCDzXz4v6a3m7CRQbXQWuaGzyA2dRY7oLXJTZOrjeEmvSlok6WApvk3SVB36iDhKWinpP0uLND/qtvjfB1Sk4AQ9Ootk0FlkqMjURzqLlBT9FJjeIhmF3h+05reBUGpUqui3kVULnUWl6CwyVPiakt3UAAAEJ0lEQVTbyKqBzqIKWrWzEr1F5aryrY8AAAAAgNbFiRoAAAAAJIYTNQAAAABIDCdqAAAAAJAYTtQAAAAAIDGcqAEAAABAYjhRAwAAAIDEcKIGAAAAAIlp38rb2yRpVenPvUt/rwW1dCxSusczpA22SWfzkOrx0NnqqaVjkdI+ntbuba12Vqqt40n5WNrytTbl+6U5aul4Uj6WQp0NMbbND1YPIcxr7Z8i31Jq6Vik2jueaqml+6WWjkWqveOpllq6X2rpWKTaO55qqbX7pZaOp5aOpZpq7X6ppeOphWPhWx8BAAAAIDGcqAEAAABAYtryRO2+Ntx2tdXSsUi1dzzVUkv3Sy0di1R7x1MttXS/1NKxSLV3PNVSa/dLLR1PLR1LNdXa/VJLx5P9sbTZGjUAAAAAgI9vfQQAAACAxLT6iVoIYVII4e0QwvIQwq2tvf1KhRAeCCFsCCG8dVhWF0J4MYSwrPR7r7bcx6JCCINDCLNCCEtCCItDCDeX8iyPp6XQ2XTQ2WLobDrobHE597aWOivR26Jy7qxUW72t1c626olaCKGdpF9I+j+SRkqaGkIY2Zr7UAXTJU06IrtV0swY46mSZpb+noP9kr4dYxwpaayk/yo9HrkeT9XR2eTQ2SbQ2eTQ2QJqoLfTVTudlehtk2qgs1Jt9bYmO9van6idJ2l5jPHdGOM+SY9JmtLK+1CRGOMrkrYcEU+R9FDpzw9JurxVd6qZYozrY4wLSn/eIWmppIHK9HhaCJ1NCJ0thM4mhM4WlnVva6mzEr0tKOvOSrXV21rtbGufqA2UtPqwv68pZbnrF2NcX/rz+5L6teXONEcIYaik0ZLmqAaOp4robKLobFl0NlF09iPVYm9r4jGmt2XVYmelGniMa6mzDBOpsnhojGZWozRDCN0kPSnpmzHG7Yf/W47Hg6OT42NMZ49tOT7GdPbYlutjTG+PbTk+xrXW2dY+UVsrafBhfx9UynL3QQhhgCSVft/QxvtTWAihgw4V+rcxxqdKcbbH0wLobGLobJPobGLobCG12NusH2N626Ra7KyU8WNci51t7RO1uZJODSEMCyF0lPRlSc+08j60hGckXVP68zWSnm7DfSkshBAk/UbS0hjjXYf9U5bH00LobELobCF0NiF0trBa7G22jzG9LaQWOytl+hjXbGdjjK36S9JkSf+U9I6k77b29quw/49KWi+pUYe+H/k6SR/ToUkyyyT9TVJdW+9nwWMZr0MfAS+UVF/6NTnX42nB+4nOJvKLzha+n+hsIr/o7FHdV9n2tpY6Wzoeelvsfsq2s6X9r5ne1mpnQ+ngAAAAAACJYJgIAAAAACSGEzUAAAAASAwnagAAAACQGE7UAAAAACAxnKgBAAAAQGI4UQMAAACAxHCiBgAAAACJ4UQNAAAAABLz/wEh+Ywm1FUU8AAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "samples = torch.randn(5, model.representation_length)\n", + "\n", + "fig, axes = plt.subplots(1, 5)\n", + "\n", + "for index in range(5):\n", + " decoded = model.decoder(samples[index][None])[0].detach()\n", + " show_image(axes[index], decoded)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor([[ 0.8606, 0.9047, 0.1575, -0.7448, -0.3117, 0.0745, -0.3145, 1.4116,\n", + " -1.5365, -0.6043, 2.6963, 0.4136, -1.0794, -0.8664, 0.7766, -0.4429]])" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + " model.encode(get_sample(0)[None])" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAABpCAYAAABLV9A4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJztnXmULlV5vZ8KisZoVFAIYUYwSsAIMioqKiCjoCLBiWv8RYKiCCErEsiSRMUgGoyzIUrESBAQFMSBSQaJigwyhEFA5SIEQQRBjcZo6vfH7X1qF7fu7am+/qq797MWi7pvT1X7O3VO1Xn3eU9V1zUhhBBCCCGEEIbD74z7BEIIIYQQQgghtMmLWgghhBBCCCEMjLyohRBCCCGEEMLAyItaCCGEEEIIIQyMvKiFEEIIIYQQwsDIi1oIIYQQQgghDIy8qIUQQgghhBDCwJjVi1pVVbtWVfXdqqpuq6rqiL5OajETTfsnmo6G6No/0bR/omn/RNP+iab9E037J5rOPdVMN7yuqmoV4BZgZ+BO4ArglXVd39jf6S0uomn/RNPREF37J5r2TzTtn2jaP9G0f6Jp/0TT8fCIWfzsNsBtdV1/H6Cqqs8CewMr/MCqqprZW+Hi4D7gJUTTPomm/XNfXddPZpr3fzRdKdG0f6Jp/8xI04nvia4roK7rimjaNxn7+yea9o/61JUyG+vj2sAP7d93TsRaVFV1YFVVV1ZVdeUs/tZiYCnRtG+iaf8snfj/pLpG0ykTTfsnmvbPlDWF6DpNomm/ZOzvn2jaP0sn/5bZZdSmRF3XJwAnQN6s+yKa9k807Z9o2j/RtH+i6WiIrv0TTfsnmvZPNO2X2WTU7gLWtX+vMxELMyea9k80HQ3RtX+iaf9E0/6Jpv0TTfsnmvZPNB0Ds3lRuwLYpKqqDauqWhXYHzi7n9NatETT/ommoyG69k807Z9o2j/RtH+iaf9E0/6JpmNgxtbHuq5/U1XVm4FzgVWAE+u6vqG3M1uERNP+iaajIbr2TzTtn2jaP9G0f6Jp/0TT/omm42HG5fln9MfiVV0ZV9V1vdV0fyiarpRo2j/RtH+iaf9E0/6ZkaYQXVfGRNXHaRNNV0ru//6Jpv0zJU1nteF1CCGEEEIIIYT+GXnVx/lGVS2b3FpllVWW+5pnH3/729/O2TnNd37nd5bNB3Rp+n//93/lOJpOnS5N1T5dUz8Oy6P73Y+lLXRrOpcuhPlIl6Yek5bRceq4fl1Ey9kjjaNlCGFIJKMWQgghhBBCCANj0WbUfIbyd3/3d8vxXnvtBcCSJUtKTDPsp556aomdccYZAPzsZz8rscU+E+eaPuYxjynHu+yyCwCvfe1rS0yz6ieddFKJfe1rXwPgv//7v0ssmjaa/t7v/V45fuELXwjAy1/+8hL7+c9/DsCnP/3pErvmmmsA+J//+Z+Rnud8wjNmrumzn/1sAHbaaacSu+uuZZWHv/CFL5TYnXfeCcBvfvObkZ7nfGJFmm6xxRat/wPccsstAFx++eUl9tOf/hRIBthZkaZPfepTAfjDP/zDErv11lsBWLq02T/1V7/6FZA+9OG4rj72r7XWWgA86lGPKrH/+q//AuChhx4qsTg/lsfHKdfv93//94F2G3zwwQcB+N///d8SSxtdHtf0kY98ZDmWvj7+aHxP/zl13I2kY9dvSON7MmohhBBCCCGEMDDyohZCCCGEEEIIA2PRWR9le3j84x9fYq973evK8etf/3qgbStRWtktDzfddBMAV199dYn9+te/7v+E5wFKG8vmAPCqV72qHEvT9dZbr8Rky/H08h133AHAjTfeWGJuj1hMdGn6ile8ohz/2Z/9GQAbbrhhicky6jaJ973vfQB8//vfL7EhpfTnki5N99lnn3KsfmCjjTYqMVmeZIsC+PCHPww0tihYvHYoaer9qezj0NidXdMf/ehHQGMfh8YCff/995fYYrXxPOIRy4Zlb6e77757OVbfuv7665eYrI+nn356iZ199rJ9aGWJhsVtMVO/+IQnPKHEdt1113L8spe9DIAnPelJJXbFFVcAcOaZZy4Xi50cVl11VQBWX331EnvRi15UjnfbbTegbYc877zzgGapA8Dtt98OLN6xyW2O0nS11VYrsec+97nlWMse9AwFcM455wBw7bXXlthPfvITYPH2o13W0cc97nEl5nb85z3veQA88MADJXb++ecDTdsE+MUvfgHMfT+ajFoIIYQQQgghDIxFkVHzN+snP/nJAPz5n/95iR100EHlWDND/sasGYk//uM/LjHNFHn2ZzFl1FzTNdZYA2hnJl1TzVD6zI5+frvttisxzRp3ZX8Ww0ywL3JXBscLsBx44IHl2Gd9hWaNdt555xJT5vdf/uVfSmxcs0LjwBcMr7POOkB3thdgzTXXBLqL4uy7774lpmIYp5xySol5AZyFjjI+ABtssAHQzvYecMAB5fgP/uAPlvsZZYq8D9Y9/+Uvf7nEFlPGwrPgT3nKU4B2oaD999+/HMvt4ZqqP/Asm4refOMb3yixxeJQ0D3smZxnPvOZQJM5A9hjjz3KscYx/yzUvp/1rGeV2CGHHALADTfcUGKLIROk8cmzEsr0vPSlLy0xFWWC5nnL+9TNNtsMaGfe3va2twHwwx/+sMQWg0tB97BnzzR+77nnniXm2Z+usX+HHXYA4JJLLimxY489Fmgya7A4smvKSOp+Bnjxi18MtJ+NnvGMZ5Rjfa/ro37Cs+l6jtIzFMzNc1QyaiGEEEIIIYQwMPKiFkIIIYQQQggDY0FbH5Wql6UJ4OijjwbaViZP5cvC5Pum/PKXvwTae67IpucLYi+77DJgYafsZSXzIhZvf/vbgXaq/rGPfWw5lqaeLpatya0p+kzcqrMYNJXVxlPxRxxxBNBO1fteStJSbRMa+42301e/+tVAe88qHS9kTR/96EcDsP3225fYX/3VXwGNTQTamqroglvuZIXwwg6y+F511VUldt1117W+fyGie1o2EoCDDz4YgK222qrEfA9FaertVBq5hUdWabeTqUDGQrPoug1MhS3222+/ElP7kkUMmvYM3e1Uv1P2Xmgsej/4wQ9KTNayhaYptK3j6667LgBvfetbS0xjthe1cvuodPUlDNL96U9/eokdeuihABx55JElpgI5C01Xt4Fuu+22ABx11FElJjvpE5/4xM6f19jvY42+1wtkSNNjjjmmxO677z5g4Wnq97Is43/5l39ZYnq28u9z1Je6prrv3YKuPX4/8IEPlJj2q1womqrf8+fNww47DIDXvOY1Jbb22msD7fbsGqgwi4/fm2yyCQBvfvObS0zaf+YznymxuVhKkoxaCCGEEEIIIQyMBZdR8+IBeiP+6Ec/WmJbb7010J6t8EIAWtT+3e9+t8S0AHvjjTcuMS0y/tM//dMSU9nehVZYwGchtKj6Ix/5SIltuummQLOIE9oa3HbbbQAsXbq0xDT74IvfNQu69957l9i3v/1toD0jvxDw9veCF7wAgH/6p38qMbUv19TLbKughS++1ufkM8aaSdpll11KTFtKLJSMmmbVPDumhcDvfOc7S6yrCIO3UxVe8bL7yg55pkKfjWfmVFRooRQUkqZdmURle6FZhO3ZDG+nKhd99913l5hK+Xs7fepTnwq0izaoL14ohRqkqRcO0Gytsl/QZBw88+aa6v71dqrPwTVV8SvP1N91113Awrn3oWl76uugue+9IIvuZZ81f/DBB8uxxpp77rmnxFTUSWMTwJZbbgk04x7AvffeCywcXfUc5QXUtNWLrh+aMcf7PWXCoHkmkj7Q9KU+9qsAyR/90R+VmIpgLJTsj8ZyL6Lyrne9C2iPL2rP/szj+klTLxKi+15jEzRunK985SvL/ex81tT7RbmHPHum7KSPXfoZ70eVBQe48sorgXZ/oMym3/t6rvjmN79ZYtdffz2QjFoIIYQQQgghLCryohZCCCGEEEIIA2NS62NVVScCewL31nW92URsNeBUYAPgdmC/uq4fWNHvmAtkZ3J7ohb8ufVDaWVPG1944YXlWMVBrrnmmhKTpUqFGaBZXCzbGjTWKln9pskqVVWdz4A0VareLR7aR+JpT3taiUlTtzx4kZXzzjsPaKxl0Nh7lixZUmKyP73whS8sMe3Dcscdd8zoGobWVlU8xYsvfPCDHwSa/ZOgSdW7Deeiiy4qx2effTbQtpPKpuP72el3ut3i4x//ONBO/U+HoWkqG6nv3/OOd7wDaFvBhNvwXNPPf/7zQFsXWUlcU1l2vNDLv//7vwNw//33z+gahqapbGLev6kYi/ZGg8bu4e3Q+9PTTz8daOui+9z3UVNRAm+nX/rSl4B2YafpMDRNteBdRS0A3vjGNwJtO6Ssnr6fpPpQaPb1cV3UR/teixqj/L5QvzxTe/7QNIWmGJgvQ9hrr72AtsVceqnwD8C5555bji+44AKgbTmTrvqcoHnG8L2tZlP0aiiauqVMhX7e9KY3lZjbIIWs996PfvWrXy3H3/nOd4C2LUyaut1XNjN/rlDRqxkWaBrE89RkBW5U6M4LA33ve98D2ntKur5a9uC/e/PNNweaQhrQPA+4xVQFsBaKphpL3vCGN5SYxq4uu7jGeIBvfetb5Vh7Tnp/IYuvF3qRpnrWh6YA1igLiU0lo/YpYNeHxY4ALqzrehPgwol/h9mxFtF0FKSt9k807Z9o2j/RtH+iaf9E0/7J81T/RNMxMWlGra7rS6uq2uBh4b2BHSeOTwIuBt7W43lNCZ8B0kzMJz7xiRLTLK2jDIWyCgCnnXZaOdbMr884KqOmhZgAO+20E9De/XzXXZe9z3rxkmm8ZT+BZVrCGDX12QppesIJJ5SYZ9eEFrX7dZ911lnlWJk2n6nUDLJnLjXz69spPOc5zwGaGQ+Y9szF2Nuqa6pZbi8coqyXzzpqpvJDH/pQifmiYC0uVqEbaLLEKmwBTebOF8RqJthnk+ebpl40SDO97373u0tMmTSf4VbW5/3vf3+JeeZXmroWmoX3LPmf/MmfAO3svWb2VJDg4b9nCgxKU83QausNaGYRvXDAzTffDMCxxx5bYr4VhPpT76v1814IR64H71/091RmGqa9YHtQmuoaDz/88BJT5sJnf9WG3vOe95SYFqz79/rvVjv3bLG0VFEtaLYD8L54vmkK7YJAaqvuzlCRAXdinHHGGUDjCoF29lx9qRfPEu5s0L3umWX9Pc+MTINBaNpViG2bbbYpMbUx3z5Hpd8vvvjiEvNnJ7UtL4qlz06l4v1ve0EofZ+PcdNgcM9Tcgd51ubHP/4x0B7b9RzVVdDOcU31O/371I59HPJ+eAYMQlNH16ssIzSOhH/9138tMT2PSm9oPxtIF9dUmXrva+SImuvidjOt+rhmXdcaEX4ErLmib6yq6kDgwBV9PRQeEU1HwpTaajSdFtG0f6Jp/0TT/snY3z/RtH/yPNU/0XRMzLo8f13XdVVVK5ySq+v6BOAEgJV9X2iIpqNhZbpG05kRTfsnmvZPNO2fjFP9E037J5r2TzSdW2b6onZPVVVr1XV9d1VVawH3TvoTPaI0pdsOZWfwVL3S7r54WDvfu+2rK1XvdhDZGdz6KCuEFhcC7LjjjgCceOKJnb97En4zBE1lxYGmyIUXvpAuWpwJcNxxxwHtBe/ard1/xjWV/cltUloM7pYyaepWymkuhB9bW5WmKpwCjT3P96KRNcHb1z/+4z8CcP7555dYl6ZuZVAq39u7bGNuJ5XF1Bco/+pXv5rGlY1fU98jRft6+eJ+aeH7ncjy6MUuutqS21V0n7v1VnYLv1e0/5cWz8O0bVCD0vQtb3kL0C4gIK283Rx//PFA2/LZdd1uJ1ObdAulNHc7mfZU0uJ6mLYNalCayprnRYNkDVXRFWisT7feemuJ+V5yXTYdfV0WPGjsZH7vy37lVr5pFsAYxNivggHQFGfx+1Havfe97y0xFWd44IGm/sFktjDFZBmFpq36Zyvrmf+OaVhKx6qp8HtUVnm3HWsM1tgEjT3P72W/bunh9jEV1fFnpy68H54BY32eEt4e1O6++MUvlpjuQ7c+qk9Y0X2p3+ltV+3TNVU/7HuDzZJBaNq1RMT3S9V1+5it2GT3pH9dWnr/KWu5W6bnYk+6md4JZwMyhC8BzlrJ94ap8VOi6ShIW+2faNo/0bR/omn/RNP+iab9k+ep/ommY2Iq5flPYdlC1ydVVXUncDRwLHBaVVX/D1gK7DfKk3w4WnDqC9233nrr5b5PGQovWaxCCz5bMdkbsb73rrvuKjEVF9BMOsBmm23WOj+YVvbnbmDncWmqGUovx6tCHj4rpAzYoYceWmIqCLKiWTXhv0czwL74XTNJPlOswg0+UzwNTZ/EGNuqZln33XffEtPWA66FshF/8zd/U2Iqo9uV7XW6NPVF2ppJ8tlSaaqFsTCtjNpYNdU5exn3XXbZBWhrodK7KtMPcOWVVwLtzGRXwY+uWXFv28I1VfbHY9PIqI1VU91v2267bYm5vkKFV3z2UjPq3n66ZtR9dlwz6r6oXtkfn3nXrL4XOZhGRm2smuo61C4Att9+e6C9EP1zn/sc0C5wo3FmRdcqTV0/leD2rJLuFb/PNfM+w6ICY9UUmrbg5bFVNMVn0D/72c8C7RLnyiysqMiP2qhnz5SlX2eddUpMuncVc5hhRm2smuq6vZ3o2F0yGvs9w60xp+ueh+bz8kyPtjXxmPpNzxzrZ2eo6Vifp4Sfr8Ydz56pL/Cs12RjUpf7YLfddgPa7h09T3lWdJYMTlONsb6VidrNZM+jTle/st9+yy7P+1kVJ/JnrLnIqE2l6uMrV/Cl5UfyMBt+W9d1NO2X++q6/glpq30STfsnmvZPNO2faDoCoulIyPNU/0TTMTErE3AIIYQQQgghhP6ZddXHucJTv1rg/upXv3q5r/u+XPr6D37wgxKbye7hSm36XjdKY/t5KUU6y0Wwc4afu6wzBx10UIkpHeyaagf4m266qcSmuhjdU8T6HDyFLPuU6ydL5iz3/xgLWrjvO9vLLuMFPw477DCgXYhCtqfppNVlG/G9QpT+d0uZrLnzUVPtbXLIIYeUmGyx//mf/1lif//3fw+09/xR+1pRH9C1SFuaegECafr4xz++xHQ8HzWVzem1r31tiamN+L2vPf18n77JFmlLD9dFx26XVHt3i7M+6/moqe437bcJTRuRLRcai572ooRGiy7NoLGJyUIKjWWnywrt7XQ+96fQnLf2MoPG7nXppZeW2GWXXQa0bc7CxxfvF9X2fM9JFX1yW6DGu9VXX73E9PX5qiu0i6NIU+2TCI3ty/vHLnuiW8X0e7zI2/Oe9zygbRNXX6B7HtqfzXzFbdtde51pLPE22bX/mWuq+1lWamiKrvnnoH1V/XfP5/bZha7Nde6y0UtLH6fcuqx26v31dtttB7SXMOi5d4b7Jc6Y+fFGEUIIIYQQQgiLiHkzZeFvx/vvvz/QLtqhGcnDDz+8xG6//XZgZlm0LnxW42lPexrQfpO/7777gHbmbcj4TMuee+4JtBdSqwSpshPQlD2eraaa2fFZ4a7iAcpkzPVO8DPFz/35z38+0F6geu+9yyraausDgBtuuAFoL6SeyQJVaeqzkqutttpy56X2Oc1S52PD2+kznvEMoL2Fg9qpb4uhTFDXNa5oVrEr+yP83tfMu/dJ+uymWep8bPg1rrfeekC78IXKHivjA03hEG+balcrmrVVvCvT41rp6z7L7jOe8wG/RhX1UKELaApQeTEBjVt+rX6vCtdF2Rv/PhUd6iq05D87HzNqfq7q21zXpUuXAk37hCaT5sUVNGZ5W/UsksY+L8et7/XMnDIaXX3CfNJV6DrUt0JzjcrKQDOWONLH+0IvEqIxfcMNNywxae4ZdX02nv3V850X2pgv/av0WHvttUvs6U9/OtAucqFjb3P+HCBcU/XXKl4HTX/jz0ldzg/1M/43+no+HjVqa/7cL9eS959qp66zrtevu+uzee5zn1ti0s0dSirM4s4PxXxc7LvASDJqIYQQQgghhDAw8qIWQgghhBBCCANj3lgf3aKgBX+exjz55JOBZq8k6C9NrpSrUs4Am2++OdBOG1999dXA/LGUue1wr732AtqaaY8fLcyG/q5N1gDt6QWwwQYbLHcOsrB12QGGiKfEX/aylwHtNPhXv/pVAC688MIS08LULsvYZEUaHNmanv3sZ5fYGmusAbT1k9VyvthIfCG/2qnbvlQwxAuHyPbgti8dr+i6palrLhuF75co+5VbK2Rrmy+4ptrbzy0lWjTteybpZ9ymI9vUiqw0sox5v6H9f9x6ov7ArTuyks+X4kxuv9H+W66VLLpuT5Qt2q1hsjG5tc5tYrL6PfTQQyWm8dHbuz4T/3uKddkrh4pf00YbbQS0rc+6b739anx225famz9LONqLyQsH6W/7va7PwvVX+50vbdU11R5x2jcVYJNNNgHaNnpp7zrrXvbv8/tAtjFfDqK+wPVTn+sW0y6Lb1dRiKHg5yldfD9KjcvelvS84Nej9ul9tD9XqACb97n6nb5nmvrPrkJiXgxDf3uImvr9pOI9z3zmM0tMVkU9+0D3Ehpp6Vr486+eCVxn3ef33HPPcjH/e+qnfezqu53Oj14lhBBCCCGEEBYReVELIYQQQgghhIExeOujUp+qsgiNXcTtNKpG6Gnl2eBpU6VSP/zhD5eYUv1eEUaV/IZufezSVFYST6fLzjVZxcUuy1iXjc/TzrIGHXXUUSWmtLMsQgCf/vSngf4+11Gha9Qef9Bco1vtZK9xi4K06qqc17VnDTRauj3ixS9+MdDeX1Bf932azjnnHGD47VTX6xXeZCVxLXRtXqGsay8VxVxTtwDJwuCayr6qvX/8Z9wSccUVVwDzR1O/9/fdd1+gbROTBcnbqfRxS55bnoRbn9R3eNuWfdXvFf2MayqLX1d1yCHZdNTGVDkMmj3pZOeGpn369ciKJLsjNLapFdl0ZI3yPlHt0/cA0897e9fPdO3RNiRNobnPvK2+5jWvAWCrrbYqMfWp3ieoXXrVR7Vvty2papz/PR8DtV+b75mm9u9VEPX3uqynQ6qqp/us6/5/wQteUGKy101WyU7X6F/ztty1F5qqa3qbVv/gSyG+/vWvA02lZGi09Hto3O1W44XsotCMU7vtttty36+KutBUtOwah7xv9aqPatPe3tUneNtVFd9ddtmlxGTr9T1H1dd3VZ32fmIudZambgnXvmZuJ9Uzo2ye0LRdH8fVDv0+9r5S1+afg/T1JU+77rrrcr/7ggsuANr7NWvc8+eB2WiajFoIIYQQQgghDIzBZ9T09ul7fGjGwbMEmlXrekOdbG8Tn+3VjJP/Pe0l5LMamoX4h3/4hxJTVm/cMzyToZkC39les1t33nlniamgQNeMYNeMrOOz6ppB86zE+9//fqA946nZpeOPP77Ebr75ZmD4mup6VZgBmuu+4447SkyZF58N75pp0WfkOrpW66+/PgA777xziR188MFAe7H3/fffD8A///M/l5g0HdJMbxea+VYGBppsg7ItABdddBHQ3m9Hi6W7iom4Pj5jp31pvJ0qS+kzw8qMnHrqqSXW1/6Co8Dbldrky1/+8hJT1sfb6de+9jWgPUuoPs9nEzVj6EUzPAOqjITv+bPlllsC7X5XM72+x5jOZ4j3vp+72qRnsnW93p+qnapAEjRFFjzLozYp7aCdAVG/7eORPkPXSvfD5ZdfXmIq4ODnr+MhFBfyLMFTnvIUAA444IAS06z63XffXWLS9aqrrioxFVDxrI2KEKj9QXe79UyGjl0btVXt0wrNM4kXI9C90TWrPpe4prpG17SrrV588cUAXHfddSUmDVwffR7eFn0vVhUq8XGsq0CLNPWiOXJO6dkOmqyPf9+4NVW7eulLX1piysL4OPXtb38baO/3p77Ux3Y9Q3iGzgvgeFbo4bguOkf/PPT5e5ZSx+6eGMc45poqY+5ZXmWwNdYCXHvttUC7P5CWvn+tMpveTv05QNfr96qeIXzsl/bunpDm3g7lDHGXz2wK4iWjFkIIIYQQQggDIy9qIYQQQgghhDAwBm99VGrY94ZSmtKLTsjS4SnbLkueYp7O1F5T0CwWfNe73lViskf4XiCy7v3bv/1biQ294IWQHWTHHXdc7mtu51BKvCtl69p2Wco8NSybldstlMb21PA73/lOAD7zmc+U2GSFTMaJa6BF6W5FFL5wVwuJu9qK25FkhfJUvX9esgP6faHP1e0W0vSLX/xiieleGaKlzO9LWZ/22GOPEpNGbnOSFcL3i1I/4J+RLBFaZA2w5557lmPtyaL9gvx8ZBcFeO973ws0i4ihKSo0ROujWxVlm9tnn31KTNf4zW9+s8S+853vAI11Fpr24jYm3ccqnAPtxesqGOIFBvR7rr/++hL70Ic+BDQFBGDl/c+4cUudtHzJS15SYmp3spBCY0F0y76+zwu0yErphRW22WabcixLj9sl1fZ9H1EVYtJn6X973Ha8hyMdvJ0sWbIEaN+jKhTg+1BK17vuuqvE1OalJTT3vS9rkDUPGtui38Oy3fm9IXuuf46yDXZZ2seFNPX9Cv/iL/4CaNu7tVei9viEps34+Kx73a3NapduzXVrpOySbk2T9ddtleoLfLzX59ml6bi07dL0TW96E9AujqQlDj5GaJzyveJkkXRruGx/Pvb7uKjCGdq3Fxqrqtv/1X96fy1LXlfhEGcu9ZWmXtzjkEMOAdrWZFmcfYzQPejWZGnqS3tkT/V+262eep4499xzS0zt1PtZ9ZtehEi/x/twPRN3FQ6ZbClWF8mohRBCCCGEEMLAGHxGTVkaz9Zo9tBntDSD5m/MWuTXNav+/Oc/v8QOOuigcqzZDH+L1iyPZtIB3ve+97XOZeh4tkYzNT4roGv0WUlp7gtZNRPjsweaTfMCBbvvvns57lpQrNmev/7rvy6x0047DRj/QuGp4oUqlFHwmTbNBPpsorT0Uuhqn/55POc5zwHaC5Sf9axnLfd7vG1rYa1reumllwLNwlgYtqZ+n2sBsM96d2kqLb1ctmYgfUG2MnN+73v2TG3atdIM27HHHltimm3uaqeIb6M4AAAOJ0lEQVRD0rarXb3yla8E2rO1yrB23fuuqfpWFRCA5jPyGWHPYuge8fLJp59+OgCf/OQnS0yzzZNpOm59teDdi9BIU9dZRVi8QIP6P88a6dgL5ijb4e2+qw/2jMRJJ50EwJe+9KUS0+y5t2dli4akKTS6ejtSptKvXe3E26ruW5+RVyEWba8BTcbDxy5vbyrycP7555eYMqK33HJLiWnM98ybZvRdyy6t5xLdez5uqDiSo3biW2PoXvc2LceBZ8y7tkq68cYby/F5550HwJe//OUS03PbZMUruorbdOk8l2hc8WyNin+oH4Xm+cYdCXr29MybssW+3YS094IfnplXESsvqqE26bromc+LdHQ5TfTZjcsNIk19XNax95/S0jOsan+bb755iek51Md2XZsKugCccMIJ5fg//uM/gPZnqLbmz846V+9DunTW59HVhmfSdifNqFVVtW5VVRdVVXVjVVU3VFX11on4alVVnV9V1a0T/3/iZL8rrJxo2j/RtH+iaf9E095ZA6Jrz0TTERFN+yea9k80HQ9TsT7+Bji8rutNge2Ag6uq2hQ4AriwrutNgAsn/h1mzqOJpn0TTfsnmvZPNO2fNTJO9U40HQHRdCSkT+2faDomJrU+1nV9N3D3xPHPqqq6CVgb2BvYceLbTgIuBt42krOknUJUitHTna9//euBJoUJzcJjTzXvvffeQLuggNscZSu57bbbSux1r3sd0F5QPII08aqMUFNPdSt966la2XK23nrrEpOd9IYbbigx2Sl8cfu2224LtPfr8N+txbP+2WhRsxYyw0jsDCPV1K9RbcgLN2hxuhcBkTXSC37I/uTaq5CGp9gd2UfOOeecEnvHO94BtIvszBdN1T5dP1kaPaa2K8sdNAuFfeG7LBF+78uS51YGt+xo8fCnPvWpEpMdt6uoRo+MVFO3jmmhv9t2FVPfCI2dx61hspa57U/3vC9Od/vImWeeCbRtjrLoenGmEWj6S0Y4TnkBKt37vuBfltF99923xKSvf5/sOb7nj9q76+P72X384x8H2nYytf0R28Znpana48r2OnXruHTytiqd9ttvvxKTXj7+aJ9Jb/uyTLllzDVUESu3W2mcH3FBm5Fqqj32PObjimyivg+gbHpuY5YV18c9WdDPPvvsEjvllFPKseyk3i7FiO2LM+5TV6apxg5/fhReREV79XlfqbarYhfQ6Ov25G9961sAfOQjHykxL6ChZSozeQZdWSGLKXweM9ZUunVZL3Wfaxz3c3Hr/Q477AC0rby6z93mqOcpH7NVWOkTn/hEiamwm59XF/7uoecFb8+z1HRKTGuNWlVVGwBbAJcDa068xAH8CFhzBT9zIHDgzE9x0fBzYKNo2ivRtH+iaf9E0/55DFMcp6LplJmyphBdp0E07Z/0qf0TTcdENdU3vqqqHgtcAhxT1/WZVVX9tK7rJ9jXH6jreqV+1aqqpv16qZmfN7zhDSX27ne/G2jPjOsN12fIH/47/Niv2xcmazb9ox/9aIk98MADy/3MCLgK2HhUmnYVVPm7v/u7ElOGy2cPNAPpsw2a/fBZYX0OPsvgpbePO+44oF1SeY6KsIxUU5/p1YzOBz/4wRLbaaedgHab7GqfmqHsyvT4rJDrd/zxxwPtRe5ztD3ESDX1YkCaQVP7gSaLPlnJZs2A++yvMj2+XYIyZgCf//zngbbmXYuBR8BINO0qef6qV70KgLe85S0lprbrs7pqi65p16y4ZiV9+4cvfOEL5VgFL8ZQzOZ7dV1vPN1xajJNpYvPih922GFAu/BP10y5+gu/frVJb8/f+MY3gPY2JSpBD032bI7apjMjTSe+Z6W66j71DPjf/u3fAo1jA5oZdu9HlVHzcUrFazw7duKJJwJNgQtoF2zo6pvngrquq1FoqjHaC1VozPftc5QR9qyhNPU2puzZJZdcUmIf+9jHgLYzZiBbFIykT9U97IUvjj76aKBdzEbt2a9fP+s6azskdxxoHHJnwhAK/jAiTeXm8m14jjzySKA9dun+9Ock6ezt9KabbgLgmGOOaU58ovz+uO7xlXBVXddbTfZNUyrPX1XVI4EzgJPruj5zInxPVVVrTXx9LeDeFf18mDLRtH+iaf9E0/6Jpv3y04n/R9f+iKajI5r2TzTtn2g6BqZS9bECPgncVNf18fals4ElE8dLgLP6P71FRzTtn2jaP9G0f6LpaIiu/RNN+yea9k807Z9oOgYmtT5WVbUD8HXgekDegiNZ5qs+DVgPWArsV9f1/Z2/pPld087fKs2pBa8AJ598MtBeQKj0vadFZf3x3e5lG3GrjvaggWah+xhSpLexrKrmyDWVJcILgsjy6fumdGkqe4lbRlSEwa062r8Lmv1ZxrBPx0g1dTup0vde5OIDH/gA0F5k7IVrhIoGaOE1NPaxr3zlKyXmBQW69k2ZI0aqqbc1aeX787397W8H2pqqaIvbH+644w6gvQhbC4q9UJAKDj385+eYkWrqNmUVaJEFEuCNb3wj0LadSnvf6+iKK64A4KyzmrFZe049+OCDJTYQ69MvgX2Z5jg1VU19T0gVFnFNX/GKVwDtfQGlrxf7UREGt4uq2IWPQePa4+hhzEhTmLpNV/cyNPZSL8iiPeb8+9RWvUiI9pq6+OKLS0xtdCBaOnswQk29r9RzlNvMZIP0fkKWMreJf+5znwMaaxnMmd1+JoykT5WmWj4CjbVUe59C27InNGZ7cTbZcL1PGGD7FCPVdM01m+VuWvbgVmh9n/eLep73e1/jlFtHB8yUrI9Tqfp4GbCisiYvmu5ZhRXyYF3XPyGa9kk07Z9o2j/RtH9urOtaJf2iaz9E0xEQTUdC+tT+iaZjYsrFRHr5YzPI/ggv0b3FFlsAcMABB5SYSnt6xuLHP/4x0MwEQZOh8BkMf0Mf5+LXqbxZP5zZaOpabb/99gDsvvvuJbbxxhsD7Rmem2++GWhmLKEpaOHFBgYyKzRnmmq2p6sYhped1UycCtRAs9D16quvLjF9fYxZnhUxVk1V9tiLOSgL5/e02qnHNPs7kIXZzpxr2jXL7jEVwdA2ENAUaPBCQAO5z7sYqaaeTe9qpyrF7RliaapxCRq3xwAXuXcxI01hZrpKO89Kqv/0TI608xl0FWwYcPss1HW94vreK2EmLgVlyjwjqQJrXZr6mD7AsWhljPT+9yJVOvaiddLcNVWbHMjz5kwYqaZenE2aekz3suunNjnP2qbTXzGREEIIIYQQQghzR17UQgghhBBCCGFgzBvr48N+D9Cdfna67A8DTjXPufXRUaq+y9LjmknLAevojFVT6de1c73rN0+0FIPQ1Jln+nUxVk0XKNG0f0ZufVyMjNr6uEjJ/d8/0bR/Yn0MIYQQQgghhPnIpFUfh4hm0H2Hdz8O02c+LLqeb6idLoCMz2CIliGEEEJYLCSjFkIIIYQQQggDIy9qIYQQQgghhDAw8qIWQgghhBBCCAMjL2ohhBBCCCGEMDDyohZCCCGEEEIIAyMvaiGEEEIIIYQwMPKiFkIIIYQQQggDY673UbsP+MXE/xcCT6K/a1l/hj8XTVdMNF1GNO2foWi6tOdzGSfRtH+GoCksrPs/mo6GIegaTVdMNF3GnGtazfUGslVVXVnX9VZz+kdHxFCuZSjn0QdDuZahnEcfDOVahnIefTCkaxnSucyGIV3HkM5lNgzpOoZ0LrNhSNcxpHOZLUO5lqGcRx8M5VqGch59MI5rifUxhBBCCCGEEAZGXtRCCCGEEEIIYWCM40XthDH8zVExlGsZynn0wVCuZSjn0QdDuZahnEcfDOlahnQus2FI1zGkc5kNQ7qOIZ3LbBjSdQzpXGbLUK5lKOfRB0O5lqGcRx/M+bXM+Rq1EEIIIYQQQggrJ9bHEEIIIYQQQhgYeVELIYQQQgghhIExpy9qVVXtWlXVd6uquq2qqiPm8m/Phqqq1q2q6qKqqm6squqGqqreOhFfraqq86uqunXi/08cw7lF0/7PLZr2f27zUlMYrq7RdCTnFU37P69o2v95RdPRnNu81DWa9s+gNK3rek7+A1YBvgdsBKwKXAtsOld/f5bnvhaw5cTx44BbgE2B44AjJuJHAO+Z4/OKptE0mi5CXaNpNI2m0TSaRtdouvA1ncuM2jbAbXVdf7+u618DnwX2nsO/P2Pqur67ruurJ45/BtwErM2y8z9p4ttOAvaZ41OLpv0TTftn3moKg9U1mvZPNO2faNo/0XQ0zFtdo2n/DEnTuXxRWxv4of37zonYvKKqqg2ALYDLgTXrur574ks/Atac49OJpv0TTftnQWgKg9I1mvZPNO2faNo/0XQ0LAhdo2n/jFvTFBOZBlVVPRY4Azi0ruuH/Gv1sjxo9jqYJtG0f6LpaIiu/RNN+yea9k807Z9o2j/RtH+GoOlcvqjdBaxr/15nIjYvqKrqkSz7sE6u6/rMifA9VVWtNfH1tYB75/i0omn/RNP+mdeawiB1jab9E037J5r2TzQdDfNa12jaP0PRdC5f1K4ANqmqasOqqlYF9gfOnsO/P2OqqqqATwI31XV9vH3pbGDJxPES4Kw5PrVo2j/RtH/mraYwWF2jaf9E0/6Jpv0TTUfDvNU1mvbPoDTtqyrJVP4DdmdZ5ZTvAUfN5d+e5XnvwLL05nXANRP/7Q6sDlwI3ApcAKw2hnOLptE0mi5CXaNpNI2m0TSaRtdourA1rSZOKIQQQgghhBDCQEgxkRBCCCGEEEIYGHlRCyGEEEIIIYSBkRe1EEIIIYQQQhgYeVELIYQQQgghhIGRF7UQQgghhBBCGBh5UQshhBBCCCGEgZEXtRBCCCGEEEIYGP8fXaTHKTFPSgIAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "idx1 = 1\n", + "idx2 = 5\n", + "\n", + "N = 10\n", + "\n", + "encoding1 = model.encode(get_sample(idx1)[None])\n", + "encoding2 = model.encode(get_sample(idx2)[None])\n", + "\n", + "fig, axes = plt.subplots(1, N)\n", + "\n", + "for i in range(10):\n", + " beta = float(i) / float(N - 1)\n", + " alpha = 1.0 - beta\n", + " \n", + " combined = model.decoder(encoding1 * alpha + encoding2 * beta)[0]\n", + " \n", + " show_image(axes[i], combined)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From eebb84cdb34b13952796f3a2f6856ac0f32773b9 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 9 Apr 2019 08:30:05 -0700 Subject: [PATCH 022/162] Small README updates. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 597aa4e8..f81728f4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Vel 0.3 +# Vel 0.4 [![Build Status](https://travis-ci.org/MillionIntegrals/vel.svg?branch=master)](https://travis-ci.org/MillionIntegrals/vel) [![PyPI version](https://badge.fury.io/py/vel.svg)](https://badge.fury.io/py/vel) @@ -121,7 +121,7 @@ that are ready to run and easy to modify for other similar usecases: # Implemented models - Unsupervised learning -- Autoencoders and Variational autoencoders with an examples on MNIST dataset. +- Autoencoders and Variational autoencoders with examples on MNIST dataset. # Examples From 9c26921ff261319c453b2affca48bf444705be11 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Wed, 10 Apr 2019 07:59:30 -0700 Subject: [PATCH 023/162] Small autoencoder changes. --- .../autoencoders/mnist/mnist_cnn_autoencoder.yaml | 2 +- examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml | 2 +- vel/models/autoencoder/mnist_cnn_autoencoder.py | 4 +++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml b/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml index 00501f4c..ea1782b4 100644 --- a/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml +++ b/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml @@ -13,8 +13,8 @@ model: source: name: vel.sources.vision.mnist batch_size: 128 - normalize: False num_workers: 4 + normalize: False unsupervised: true diff --git a/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml b/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml index 14224ef0..74c499c9 100644 --- a/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml @@ -20,7 +20,7 @@ source: optimizer: name: vel.optimizers.adam - lr: 1.0e-4 + lr: 1.0e-3 commands: diff --git a/vel/models/autoencoder/mnist_cnn_autoencoder.py b/vel/models/autoencoder/mnist_cnn_autoencoder.py index 1ad5536f..fa90e4d5 100644 --- a/vel/models/autoencoder/mnist_cnn_autoencoder.py +++ b/vel/models/autoencoder/mnist_cnn_autoencoder.py @@ -56,6 +56,7 @@ def __init__(self, img_rows, img_cols, img_channels, channels=None, representati ), nn.ReLU(True), nn.ConvTranspose2d(in_channels=channels[0], out_channels=img_channels, kernel_size=3, padding=1), + nn.Sigmoid() ) @staticmethod @@ -85,7 +86,8 @@ def decode(self, sample): def loss_value(self, x_data, y_true, y_pred): """ Calculate a value of loss function """ - return F.mse_loss(y_pred, y_true) + # return F.mse_loss(y_pred, y_true) + return F.binary_cross_entropy(y_pred, y_true) def metrics(self): """ Set of metrics for this model """ From 053bc3c1a7137d0407ee107fcfbc834a07038e55 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 14 Apr 2019 21:42:33 -0700 Subject: [PATCH 024/162] Download data. --- .../cats_vs_dogs_resnet34.yaml | 3 ++- vel/sources/img_dir_source.py | 25 ++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml index c10bcd23..41860c8b 100644 --- a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml +++ b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml @@ -10,7 +10,8 @@ model: source: name: vel.sources.img_dir_source - # Dataset downloaded from http://files.fast.ai/data/dogscats.zip + url: http://files.fast.ai/data/dogscats.zip + extract_parent: true path: data/dogscats num_workers: 8 batch_size: 64 diff --git a/vel/sources/img_dir_source.py b/vel/sources/img_dir_source.py index 1e0c138e..bbca4ad6 100644 --- a/vel/sources/img_dir_source.py +++ b/vel/sources/img_dir_source.py @@ -1,15 +1,19 @@ import os.path +import zipfile import torchvision.datasets as ds +import torchvision.datasets.utils as ds_util from vel.api import SupervisedTrainingData class ImageDirSource(ds.ImageFolder): + """ Source where images are grouped by class in folders """ pass -def create(model_config, path, num_workers, batch_size, augmentations=None, tta=None): +def create(model_config, path, num_workers, batch_size, augmentations=None, tta=None, url=None, + extract_parent=False): """ Create an ImageDirSource with supplied arguments """ if not os.path.isabs(path): path = model_config.project_top_dir(path) @@ -17,6 +21,25 @@ def create(model_config, path, num_workers, batch_size, augmentations=None, tta= train_path = os.path.join(path, 'train') valid_path = os.path.join(path, 'valid') + if not os.path.exists(train_path) or not os.path.exists(valid_path): + filename = url.rpartition('/')[2] + ds_util.download_url(url, root=path, filename=filename) + + full_archive_path = os.path.join(path, filename) + + # Unpack zip archive + if full_archive_path.endswith(".zip"): + zip_ref = zipfile.ZipFile(full_archive_path, 'r') + + if extract_parent: + zip_ref.extractall(os.path.dirname(path)) + else: + zip_ref.extractall(path) + + zip_ref.close() + + os.remove(full_archive_path) + train_ds = ImageDirSource(train_path) val_ds = ImageDirSource(valid_path) From 457724cebb2515ed495e60d935b4015885e90645 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 18 Apr 2019 10:35:16 -0700 Subject: [PATCH 025/162] Additional parameter to the IMDB data set. --- vel/sources/nlp/imdb.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vel/sources/nlp/imdb.py b/vel/sources/nlp/imdb.py index c6ac6fe9..92eb2384 100644 --- a/vel/sources/nlp/imdb.py +++ b/vel/sources/nlp/imdb.py @@ -45,9 +45,9 @@ def __init__(self, path, text_field, label_field, **kwargs): data.Dataset.__init__(self, examples, fields, **kwargs) -def create(model_config, batch_size, vectors=None): +def create(model_config, batch_size, data_dir='imdb', vectors=None): """ Create an IMDB dataset """ - path = model_config.data_dir('imdb') + path = model_config.data_dir(data_dir) text_field = data.Field(lower=True, tokenize='spacy', batch_first=True) label_field = data.LabelField(is_target=True) From d9619db3ebb6eb85f4f5fb6e17516ea7949ed59b Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 18 Apr 2019 10:35:37 -0700 Subject: [PATCH 026/162] New "script" model config. --- vel/notebook/__init__.py | 2 +- vel/notebook/loader.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/vel/notebook/__init__.py b/vel/notebook/__init__.py index 3b31f630..68058abb 100644 --- a/vel/notebook/__init__.py +++ b/vel/notebook/__init__.py @@ -1,2 +1,2 @@ -from .loader import load_config +from .loader import load_config, script from .defaults import reasonable_notbook_defaults diff --git a/vel/notebook/loader.py b/vel/notebook/loader.py index 55bd13fc..d28c048d 100644 --- a/vel/notebook/loader.py +++ b/vel/notebook/loader.py @@ -8,3 +8,12 @@ def load_config(config_path, run_number=0, device='cuda:0'): run_number=run_number, device=device ) + + +def script(model_name: str = 'script', run_number=0, device='cuda:0'): + """ Create an ad-hoc script model config """ + return ModelConfig.script( + model_name=model_name, + run_number=run_number, + device=device + ) From 77347483d828ebba91edb996a6e16dc3dce8b311 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 4 May 2019 07:45:26 -0700 Subject: [PATCH 027/162] Small formatting change. --- .../reinforcers/buffered_off_policy_iteration_reinforcer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py index f8fc81e9..d9f873b3 100644 --- a/vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py @@ -111,7 +111,8 @@ def roll_out_and_store(self, batch_info): self.model.train() if self.env_roller.is_ready_for_sampling(): - rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) + rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps) + rollout = rollout.to_device(self.device) # Store some information about the rollout, no training phase batch_info['frames'] = rollout.frames() @@ -122,7 +123,8 @@ def roll_out_and_store(self, batch_info): with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar: while not self.env_roller.is_ready_for_sampling(): - rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps).to_device(self.device) + rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps) + rollout = rollout.to_device(self.device) new_frames = rollout.frames() frames += new_frames From 01762d7eee833a39d16526fc9ade7f1335677c00 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 4 May 2019 07:45:41 -0700 Subject: [PATCH 028/162] Change initial memory size hint for parallel envs. --- vel/rl/buffers/circular_replay_buffer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vel/rl/buffers/circular_replay_buffer.py b/vel/rl/buffers/circular_replay_buffer.py index 6595e0c5..b5eed39a 100644 --- a/vel/rl/buffers/circular_replay_buffer.py +++ b/vel/rl/buffers/circular_replay_buffer.py @@ -35,7 +35,7 @@ def is_ready_for_sampling(self) -> bool: def initial_memory_size_hint(self) -> typing.Optional[int]: """ Hint how much data is needed to begin sampling, required only for diagnostics """ - return self.buffer_initial_size + return self.buffer_initial_size * self.backend.num_envs def _get_transitions(self, indexes): """ Return batch with given indexes """ From ba975251ecc8f77abf83415b74798ea9faf705a1 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 4 May 2019 07:45:54 -0700 Subject: [PATCH 029/162] Small profiling utility. --- vel/util/profiling.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 vel/util/profiling.py diff --git a/vel/util/profiling.py b/vel/util/profiling.py new file mode 100644 index 00000000..0c21a5fb --- /dev/null +++ b/vel/util/profiling.py @@ -0,0 +1,15 @@ +import contextlib +import time + + +@contextlib.contextmanager +def timing_context(label=None): + """ Measure time of expression as a context """ + start = time.time() + yield + end = time.time() + + if label is None: + print("Context took {:.2f}s".format(end - start)) + else: + print("{} took {:.2f}s".format(label, end - start)) From ff6f81431403ac49549b3d73a3ec0313be4fba97 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 4 May 2019 07:46:17 -0700 Subject: [PATCH 030/162] Machine translation datasets. --- vel/sources/nlp/imdb.py | 4 +- vel/sources/nlp/multi30k.py | 88 +++++++++++++++++++++++++++++++++++++ vel/sources/nlp/wmt14.py | 88 +++++++++++++++++++++++++++++++++++++ 3 files changed, 178 insertions(+), 2 deletions(-) create mode 100644 vel/sources/nlp/multi30k.py create mode 100644 vel/sources/nlp/wmt14.py diff --git a/vel/sources/nlp/imdb.py b/vel/sources/nlp/imdb.py index 92eb2384..ff351b0a 100644 --- a/vel/sources/nlp/imdb.py +++ b/vel/sources/nlp/imdb.py @@ -3,14 +3,14 @@ import io import pickle -import torchtext.datasets.imdb as imdb import torchtext.data as data +import torchtext.datasets as ds from vel.api import SupervisedTextData -class IMDBCached(imdb.IMDB): +class IMDBCached(ds.IMDB): """ Cached version of the IMDB dataset (to save time on tokenization) """ def __init__(self, path, text_field, label_field, **kwargs): diff --git a/vel/sources/nlp/multi30k.py b/vel/sources/nlp/multi30k.py new file mode 100644 index 00000000..c52f5ae8 --- /dev/null +++ b/vel/sources/nlp/multi30k.py @@ -0,0 +1,88 @@ +import io +import os.path +import pickle +import re +import spacy + +import torchtext.data as data +import torchtext.datasets as ds + +from vel.api import SupervisedTextData + + +class Multi30kCached(ds.Multi30k): + """ Cached version of the Multi30K dataset, to save time on tokenization every time """ + + def __init__(self, path, exts, fields, **kwargs): + # Each one is a + if os.path.isdir(path): + cache_file = os.path.join(path, '_cache.pk') + else: + cache_file = path + '_cache.pk' + + if not isinstance(fields[0], (tuple, list)): + fields = [('src', fields[0]), ('trg', fields[1])] + + if os.path.exists(cache_file): + with open(cache_file, 'rb') as fp: + examples = pickle.load(fp) + else: + src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts) + + examples = [] + + with io.open(src_path, mode='r', encoding='utf-8') as src_file, \ + io.open(trg_path, mode='r', encoding='utf-8') as trg_file: + for src_line, trg_line in zip(src_file, trg_file): + src_line, trg_line = src_line.strip(), trg_line.strip() + if src_line != '' and trg_line != '': + examples.append(data.Example.fromlist( + [src_line, trg_line], fields)) + + with open(cache_file, 'wb') as fp: + pickle.dump(examples, file=fp) + + data.Dataset.__init__(self, examples, fields, **kwargs) + + +def create(model_config, batch_size, data_dir='wmt14'): + """ Create an Multi30k dataset. English-German """ + path = model_config.data_dir(data_dir) + + spacy_de = spacy.load('de') + spacy_en = spacy.load('en') + + url = re.compile('(.*)') + + def tokenize_de(text): + return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))] + + def tokenize_en(text): + return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))] + + en_field = data.Field( + lower=True, tokenize=tokenize_en, batch_first=True, init_token='', eos_token='' + ) + + de_field = data.Field( + lower=True, tokenize=tokenize_de, batch_first=True, init_token='', eos_token='' + ) + + train_source, val_source, test_source = Multi30kCached.splits( + root=path, + exts=('.en', '.de'), + fields=(en_field, de_field) + ) + + en_field.build_vocab(train_source.src, min_freq=2) + de_field.build_vocab(train_source.tgt, max_size=17_000) + + train_iter, val_iter, test_iter = data.BucketIterator.splits( + (train_source, val_source, test_source), + batch_size=batch_size, + repeat=False + ) + + return SupervisedTextData( + train_source, val_source, train_iter, val_iter, en_field, de_field + ) diff --git a/vel/sources/nlp/wmt14.py b/vel/sources/nlp/wmt14.py new file mode 100644 index 00000000..1d1f87f8 --- /dev/null +++ b/vel/sources/nlp/wmt14.py @@ -0,0 +1,88 @@ +import io +import os.path +import pickle +import re +import spacy + +import torchtext.data as data +import torchtext.datasets as ds + +from vel.api import SupervisedTextData + + +class WMT14Cached(ds.WMT14): + """ Cached version of the WMT14 dataset, to save time on tokenization every time """ + + def __init__(self, path, exts, fields, **kwargs): + # Each one is a + if os.path.isdir(path): + cache_file = os.path.join(path, '_cache.pk') + else: + cache_file = path + '_cache.pk' + + if not isinstance(fields[0], (tuple, list)): + fields = [('src', fields[0]), ('trg', fields[1])] + + if os.path.exists(cache_file): + with open(cache_file, 'rb') as fp: + examples = pickle.load(fp) + else: + src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts) + + examples = [] + + with io.open(src_path, mode='r', encoding='utf-8') as src_file, \ + io.open(trg_path, mode='r', encoding='utf-8') as trg_file: + for src_line, trg_line in zip(src_file, trg_file): + src_line, trg_line = src_line.strip(), trg_line.strip() + if src_line != '' and trg_line != '': + examples.append(data.Example.fromlist( + [src_line, trg_line], fields)) + + with open(cache_file, 'wb') as fp: + pickle.dump(examples, file=fp) + + data.Dataset.__init__(self, examples, fields, **kwargs) + + +def create(model_config, batch_size, data_dir='wmt14'): + """ Create an WMT14 dataset. English-German """ + path = model_config.data_dir(data_dir) + + spacy_de = spacy.load('de') + spacy_en = spacy.load('en') + + url = re.compile('(.*)') + + def tokenize_de(text): + return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))] + + def tokenize_en(text): + return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))] + + en_field = data.Field( + lower=True, tokenize=tokenize_en, batch_first=True, init_token='', eos_token='' + ) + + de_field = data.Field( + lower=True, tokenize=tokenize_de, batch_first=True, init_token='', eos_token='' + ) + + train_source, val_source, test_source = WMT14Cached.splits( + root=path, + exts=('.en', '.de'), + fields=(en_field, de_field) + ) + + en_field.build_vocab(train_source.src, min_freq=2) + de_field.build_vocab(train_source.tgt, max_size=17_000) + + train_iter, val_iter, test_iter = data.BucketIterator.splits( + (train_source, val_source, test_source), + batch_size=batch_size, + repeat=False + ) + + return SupervisedTextData( + train_source, val_source, train_iter, val_iter, en_field, de_field + ) From dee86ab12a60240769f0fa585eb534023514eb0e Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 16 May 2019 09:11:23 -0700 Subject: [PATCH 031/162] Bumped up version and implemented better tracking of requirements. --- Makefile | 5 ++- requirements.in | 17 +++++++++ requirements.txt | 89 +++++++++++++++++++++--------------------------- setup.py | 6 ++-- 4 files changed, 63 insertions(+), 54 deletions(-) create mode 100644 requirements.in diff --git a/Makefile b/Makefile index e26492f2..f3f23e63 100644 --- a/Makefile +++ b/Makefile @@ -30,4 +30,7 @@ serve-visdom: python -m visdom.server test: - pytest . \ No newline at end of file + pytest . + +requirements.txt: + pip-compile requirements.in \ No newline at end of file diff --git a/requirements.in b/requirements.in new file mode 100644 index 00000000..a1fd3edb --- /dev/null +++ b/requirements.in @@ -0,0 +1,17 @@ +attrs +cloudpickle +matplotlib +numpy +opencv-python +pandas +pyyaml +scikit-learn +torch~=1.1 +torchtext +torchvision +tqdm +visdom +pymongo +dnspython +gym[atari,box2d,classic_control] +pytest diff --git a/requirements.txt b/requirements.txt index d7ae8969..5ab31544 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,61 +1,50 @@ -atari-py==0.1.7 -atomicwrites==1.3.0 +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile requirements.in +# +atari-py==0.1.7 # via gym +atomicwrites==1.3.0 # via pytest attrs==19.1.0 -bleach==3.1.0 -box2d-py==2.3.8 -certifi==2019.3.9 -chardet==3.0.4 +box2d-py==2.3.8 # via gym +certifi==2019.3.9 # via requests +chardet==3.0.4 # via requests cloudpickle==0.8.0 -cymem==2.0.2 -cytoolz==0.9.0.1 -dill==0.2.9 +cycler==0.10.0 # via matplotlib dnspython==1.16.0 -docutils==0.14 -future==0.17.1 -gym==0.12.0 -idna==2.8 -more-itertools==6.0.0 -msgpack==0.5.6 -msgpack-numpy==0.4.3.2 -murmurhash==1.0.2 +future==0.17.1 # via pyglet +gym[atari,box2d,classic_control]==0.12.1 +idna==2.8 # via requests +kiwisolver==1.1.0 # via matplotlib +matplotlib==3.0.3 +more-itertools==7.0.0 # via pytest numpy==1.16.2 opencv-python==4.0.0.21 pandas==0.24.1 -Pillow==5.4.1 -pkginfo==1.5.0.1 -plac==0.9.6 -pluggy==0.9.0 -preshed==2.0.1 -py==1.8.0 -pyglet==1.3.2 -Pygments==2.3.1 -pymongo==3.7.2 -PyOpenGL==3.1.0 -pytest==4.3.1 -python-dateutil==2.8.0 -pytz==2018.9 -PyYAML==5.1 -pyzmq==18.0.1 -readme-renderer==24.0 -regex==2018.1.10 -requests==2.21.0 -requests-toolbelt==0.9.1 +pillow==5.4.1 # via gym, torchvision, visdom +pluggy==0.11.0 # via pytest +py==1.8.0 # via pytest +pyglet==1.3.2 # via gym +pymongo==3.8.0 +pyopengl==3.1.0 # via gym +pyparsing==2.4.0 # via matplotlib +pytest==4.5.0 +python-dateutil==2.8.0 # via matplotlib, pandas +pytz==2018.9 # via pandas +pyyaml==5.1 +pyzmq==18.0.1 # via visdom +requests==2.21.0 # via gym, torchtext, visdom scikit-learn==0.20.3 -scipy==1.2.1 -six==1.12.0 -spacy==2.0.18 -thinc==6.12.1 -toolz==0.9.0 -torch==1.0.1.post2 -torchfile==0.1.0 +scipy==1.2.1 # via gym, scikit-learn, visdom +six==1.12.0 # via atari-py, cycler, gym, pytest, python-dateutil, torchvision, visdom, websocket-client +torch==1.1.0 +torchfile==0.1.0 # via visdom torchtext==0.3.1 torchvision==0.2.2.post3 -tornado==6.0.1 +tornado==6.0.2 # via visdom tqdm==4.31.1 -twine==1.13.0 -ujson==1.35 -urllib3==1.24.1 +urllib3==1.24.1 # via requests visdom==0.1.8.8 -webencodings==0.5.1 -websocket-client==0.55.0 -wrapt==1.10.11 +wcwidth==0.1.7 # via pytest +websocket-client==0.56.0 # via visdom diff --git a/setup.py b/setup.py index b336050f..f134a862 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ setup( name='vel', - version='0.3.0', + version='0.4.0', description="Velocity in deep-learning research", long_description=long_description, url='https://github.com/MillionIntegrals/vel', @@ -34,7 +34,7 @@ 'pandas', 'pyyaml', 'scikit-learn', - 'torch ~= 1.0', + 'torch ~= 1.1', 'torchtext', 'torchvision', 'tqdm' @@ -44,7 +44,7 @@ 'mongo': ['pymongo', 'dnspython'], 'gym': ['gym[atari,box2d,classic_control]'], 'mujoco': ['gym[mujoco,robotics]'], - 'dev': ['pytest', 'ipython', 'jupyter'], + 'dev': ['pytest', 'ipython', 'jupyter', 'pip-tools'], 'text': ['spacy'], 'all': ['visdom', 'pymongo', 'dnspython', 'gym[all]', 'pytest', 'spacy', 'ipython', 'jupyter'] }, From 6f4f74801d2c8c11336974bf8bec80e728c845c9 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 16 May 2019 09:33:46 -0700 Subject: [PATCH 032/162] Fix makefile. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f3f23e63..54ce78e9 100644 --- a/Makefile +++ b/Makefile @@ -33,4 +33,4 @@ test: pytest . requirements.txt: - pip-compile requirements.in \ No newline at end of file + pip-compile requirements.in From 9f314091b71c50b4337f3f8542b68b34758a74a4 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 16 May 2019 09:33:54 -0700 Subject: [PATCH 033/162] Upgrade cloudpickle. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cdec04d4..97b7112f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ attrs==19.1.0 box2d-py==2.3.8 # via gym certifi==2019.3.9 # via requests chardet==3.0.4 # via requests -cloudpickle==0.8.0 +cloudpickle==0.8.1 cycler==0.10.0 # via matplotlib dnspython==1.16.0 future==0.17.1 # via pyglet From fe2d26b278f3af140af8c63f04e852afc759f0b0 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 8 Jun 2019 09:38:37 -0700 Subject: [PATCH 034/162] Fixing a name bug in stochastic_policy_rnn_model.py(#50) --- vel/rl/models/stochastic_policy_rnn_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vel/rl/models/stochastic_policy_rnn_model.py b/vel/rl/models/stochastic_policy_rnn_model.py index d69e3143..78afc436 100644 --- a/vel/rl/models/stochastic_policy_rnn_model.py +++ b/vel/rl/models/stochastic_policy_rnn_model.py @@ -101,7 +101,7 @@ def step(self, observations, state, argmax_sampling=False): return { 'actions': actions, 'values': value_output, - 'logprobs': logprobs, + 'action:logprobs': logprobs, 'state': new_state } From 38e123146d10d4137107888d9087299fe140584c Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Sun, 9 Jun 2019 20:00:11 -0700 Subject: [PATCH 035/162] Redid formatting of image_ops. --- vel/api/data/image_ops.py | 43 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/vel/api/data/image_ops.py b/vel/api/data/image_ops.py index 55c86d5e..6bf45d78 100644 --- a/vel/api/data/image_ops.py +++ b/vel/api/data/image_ops.py @@ -4,32 +4,28 @@ def crop_square(im, r, c, sz): - ''' - crop image into a square of size sz, - ''' - return im[r:r+sz, c:c+sz] + """ Crop image into a square of size sz. """ + return im[r:r + sz, c:c + sz] def crop(im, r, c, sz_h, sz_w): - ''' - crop image into a square of size sz, - ''' - return im[r:r+sz_h, c:c+sz_w] + """ Crop image into a of size sz_w x sz_h. """ + return im[r:r + sz_h, c:c + sz_w] def center_crop(im, min_sz=None): """ Returns a center crop of an image""" # return F.center_crop(im, min_sz) - r,c,*_ = im.shape - if min_sz is None: min_sz = min(r,c) - start_r = math.ceil((r-min_sz)/2) - start_c = math.ceil((c-min_sz)/2) + r, c, *_ = im.shape + if min_sz is None: min_sz = min(r, c) + start_r = math.ceil((r - min_sz) / 2) + start_c = math.ceil((c - min_sz) / 2) return crop_square(im, start_r, start_c, min_sz) def scale_to(x, ratio, targ): - '''Calculate dimension of an image during scaling with aspect ratio''' - return max(math.floor(x*ratio), targ) + """ Calculate dimension of an image during scaling with aspect ratio """ + return max(math.floor(x * ratio), targ) def scale_min(im, targ, interpolation=cv2.INTER_AREA): @@ -39,9 +35,9 @@ def scale_min(im, targ, interpolation=cv2.INTER_AREA): im (array): image targ (int): target size """ - r,c,*_ = im.shape + r, c, *_ = im.shape - ratio = targ/min(r,c) + ratio = targ / min(r, c) sz = (scale_to(c, ratio, targ), scale_to(r, ratio, targ)) @@ -59,9 +55,12 @@ def rotate_img(im, deg, mode=cv2.BORDER_CONSTANT, interpolation=cv2.INTER_AREA): Arguments: deg (float): degree to rotate. """ - r,c,*_ = im.shape - M = cv2.getRotationMatrix2D((c//2,r//2),deg,1) - return cv2.warpAffine(im,M,(c,r), borderMode=mode, flags=cv2.WARP_FILL_OUTLIERS+interpolation) + r, c, *_ = im.shape + M = cv2.getRotationMatrix2D((c // 2, r // 2), deg, 1) + return cv2.warpAffine( + im, M, (c, r), borderMode=mode, + flags=cv2.WARP_FILL_OUTLIERS + interpolation + ) def pad(img, pad, mode=cv2.BORDER_REFLECT): @@ -78,7 +77,7 @@ def mode_to_cv2(mode='constant'): def lighting(im, b, c): - ''' adjusts image's balance and contrast''' - if b==0 and c==1: return im + """ Adjusts image's balance and contrast. """ + if b == 0 and c == 1: return im mu = np.average(im) - return np.clip((im-mu)*c+mu+b,0.,1.).astype(np.float32) + return np.clip((im - mu) * c + mu + b, 0., 1.).astype(np.float32) From e409b4845577b66efa50577f05a180b0e1c9e80b Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 13 Jun 2019 10:52:54 -0700 Subject: [PATCH 036/162] Rename comment. --- vel/models/autoencoder/mnist_cnn_vae.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vel/models/autoencoder/mnist_cnn_vae.py b/vel/models/autoencoder/mnist_cnn_vae.py index 0cdfadde..6765c874 100644 --- a/vel/models/autoencoder/mnist_cnn_vae.py +++ b/vel/models/autoencoder/mnist_cnn_vae.py @@ -109,7 +109,7 @@ def forward(self, sample): } def calculate_gradient(self, x_data, y_true): - """ Calculate a value of loss function """ + """ Calculate a gradient of loss function """ output = self(x_data) y_pred = output['decoded'] From 2238dce6ea72ad1bab4a4d596f53d78276c8cdc9 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 13 Jun 2019 11:28:33 -0700 Subject: [PATCH 037/162] New dependencies. --- Makefile | 7 +++++++ requirements.txt | 47 +++++++++++++++++++++++++---------------------- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/Makefile b/Makefile index 54ce78e9..fd79b924 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,7 @@ +.PHONY: default test requpgrade + +default: test; + tag := $(shell git symbolic-ref -q --short HEAD) docker-build: @@ -34,3 +38,6 @@ test: requirements.txt: pip-compile requirements.in + +requpgrade: + pip-compile --upgrade diff --git a/requirements.txt b/requirements.txt index 97b7112f..e09fcbe8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,49 +2,52 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile requirements.in +# pip-compile # -atari-py==0.1.7 # via gym +atari-py==0.1.15 # via gym atomicwrites==1.3.0 # via pytest attrs==19.1.0 box2d-py==2.3.8 # via gym certifi==2019.3.9 # via requests chardet==3.0.4 # via requests -cloudpickle==0.8.1 +cloudpickle==1.2.1 cycler==0.10.0 # via matplotlib dnspython==1.16.0 future==0.17.1 # via pyglet -gym[atari,box2d,classic_control]==0.12.1 +gym[atari,box2d,classic_control]==0.12.5 idna==2.8 # via requests +importlib-metadata==0.18 # via pluggy, pytest +joblib==0.13.2 # via scikit-learn kiwisolver==1.1.0 # via matplotlib -matplotlib==3.0.3 +matplotlib==3.1.0 more-itertools==7.0.0 # via pytest -numpy==1.16.2 -opencv-python==4.0.0.21 -pandas==0.24.1 -pillow==5.4.1 # via gym, torchvision, visdom -pluggy==0.11.0 # via pytest +numpy==1.16.4 +opencv-python==4.1.0.25 +packaging==19.0 # via pytest +pandas==0.24.2 +pillow==6.0.0 # via gym, torchvision, visdom +pluggy==0.12.0 # via pytest py==1.8.0 # via pytest pyglet==1.3.2 # via gym pymongo==3.8.0 -pyopengl==3.1.0 # via gym -pyparsing==2.4.0 # via matplotlib -pytest==4.5.0 +pyparsing==2.4.0 # via matplotlib, packaging +pytest==4.6.3 python-dateutil==2.8.0 # via matplotlib, pandas -pytz==2018.9 # via pandas -pyyaml==5.1 +pytz==2019.1 # via pandas +pyyaml==5.1.1 pyzmq==18.0.1 # via visdom -requests==2.21.0 # via gym, torchtext, visdom -scikit-learn==0.20.3 -scipy==1.2.1 # via gym, scikit-learn, visdom -six==1.12.0 # via atari-py, cycler, gym, pytest, python-dateutil, torchvision, visdom, websocket-client +requests==2.22.0 # via torchtext, visdom +scikit-learn==0.21.2 +scipy==1.3.0 # via gym, scikit-learn, visdom +six==1.12.0 # via atari-py, cycler, gym, packaging, pytest, python-dateutil, torchvision, visdom, websocket-client torch==1.1.0 torchfile==0.1.0 # via visdom torchtext==0.3.1 -torchvision==0.2.2.post3 +torchvision==0.3.0 tornado==6.0.2 # via visdom -tqdm==4.31.1 -urllib3==1.24.2 # via requests +tqdm==4.32.1 +urllib3==1.25.3 # via requests visdom==0.1.8.8 wcwidth==0.1.7 # via pytest websocket-client==0.56.0 # via visdom +zipp==0.5.1 # via importlib-metadata From 950f41c0ef81f163783ef922518e02ea56d8708d Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 13 Jun 2019 11:29:35 -0700 Subject: [PATCH 038/162] Large rename of recurrent models. --- vel/api/__init__.py | 3 +- vel/api/model.py | 94 +++---------------- vel/modules/rnn_cell.py | 4 +- vel/modules/rnn_layer.py | 4 +- vel/rl/api/model.py | 2 +- vel/rl/commands/enjoy.py | 4 +- vel/rl/commands/evaluate_env_command.py | 6 +- vel/rl/commands/record_movie_command.py | 4 +- vel/rl/env_roller/step_env_roller.py | 8 +- .../trajectory_replay_env_roller.py | 2 +- .../transition_replay_env_roller.py | 2 +- vel/rl/models/backbone/lstm.py | 4 +- vel/rl/models/backbone/nature_cnn_rnn.py | 4 +- vel/rl/models/stochastic_policy_rnn_model.py | 7 +- 14 files changed, 40 insertions(+), 108 deletions(-) diff --git a/vel/api/__init__.py b/vel/api/__init__.py index eeaadd26..47c4284a 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -2,8 +2,7 @@ from .info import BatchInfo, EpochInfo, TrainingInfo from .learner import Learner from .model import ( - Model, SupervisedModel, LossFunctionModel, - BackboneModel, LinearBackboneModel, RnnLinearBackboneModel, RnnModel, RnnSupervisedModel + Model, SupervisedModel, LossFunctionModel, BackboneModel, LinearBackboneModel ) from .model_factory import ModelFactory from .optimizer import OptimizerFactory diff --git a/vel/api/model.py b/vel/api/model.py index 1060170b..699d8a45 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -1,4 +1,3 @@ -import hashlib import torch import torch.nn as nn @@ -34,7 +33,7 @@ def train(self, mode=True): return self - def summary(self, input_size=None, hashsummary=False): + def summary(self, input_size=None): """ Print a model summary """ if input_size is None: @@ -46,23 +45,8 @@ def summary(self, input_size=None, hashsummary=False): else: summary(self, input_size) - if hashsummary: - for idx, hashvalue in enumerate(self.hashsummary()): - print(f"{idx}: {hashvalue}") - - def hashsummary(self): - """ Print a model summary - checksums of each layer parameters """ - children = list(self.children()) - - result = [] - - for child in children: - result.extend(hashlib.sha256(x.detach().cpu().numpy().tobytes()).hexdigest() for x in child.parameters()) - - return result - def get_layer_groups(self): - """ Return layers grouped """ + """ Return layers grouped for optimization purposes """ return [self] def reset_weights(self): @@ -70,15 +54,19 @@ def reset_weights(self): pass @property - def is_recurrent(self) -> bool: - """ If the network is recurrent and needs to be fed state as well as the observations """ + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ return False class SupervisedModel(Model): """ Model for a supervised learning problem """ - def calculate_gradient(self, x_data, y_true): + def calculate_gradient(self, x_data, y_true) -> dict: + """ + Calculate gradient for given batch of supervised learning. + Returns a dictionary of metrics + """ raise NotImplementedError @@ -89,7 +77,7 @@ def metrics(self) -> list: """ Set of metrics for this model """ return [Loss()] - def calculate_gradient(self, x_data, y_true): + def calculate_gradient(self, x_data, y_true) -> dict: y_pred = self(x_data) loss_value = self.loss_value(x_data, y_true, y_pred) @@ -103,7 +91,7 @@ def calculate_gradient(self, x_data, y_true): 'output': y_pred } - def loss_value(self, x_data, y_true, y_pred): + def loss_value(self, x_data, y_true, y_pred) -> torch.tensor: """ Calculate a value of loss function """ raise NotImplementedError @@ -114,67 +102,11 @@ class BackboneModel(Model): class LinearBackboneModel(BackboneModel): """ - Model that serves as a backbone network to connect your heads to - one that spits out a single-dimension output - """ - - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - raise NotImplementedError - - -class RnnModel(Model): - """ Class representing recurrent model """ - - @property - def is_recurrent(self) -> bool: - """ If the network is recurrent and needs to be fed previous state """ - return True - - @property - def state_dim(self) -> int: - """ Dimension of model state """ - raise NotImplementedError - - def zero_state(self, batch_size): - """ Initial state of the network """ - return torch.zeros(batch_size, self.state_dim) - - -class RnnLinearBackboneModel(BackboneModel): - """ - Model that serves as a backbone network to connect your heads to - - one that spits out a single-dimension output and is a recurrent neural network + Model that serves as a backbone network to connect your heads to. + Has a final output of a single-dimensional linear layer. """ - @property - def is_recurrent(self) -> bool: - """ If the network is recurrent and needs to be fed previous state """ - return True - @property def output_dim(self) -> int: """ Final dimension of model output """ raise NotImplementedError - - @property - def state_dim(self) -> int: - """ Dimension of model state """ - raise NotImplementedError - - def zero_state(self, batch_size): - """ Initial state of the network """ - return torch.zeros(batch_size, self.state_dim, dtype=torch.float32) - - -class RnnSupervisedModel(RnnModel): - """ Model for a supervised learning problem """ - - def loss(self, x_data, y_true): - """ Forward propagate network and return a value of loss function """ - y_pred = self(x_data) - return y_pred, self.loss_value(x_data, y_true, y_pred) - - def loss_value(self, x_data, y_true, y_pred): - """ Calculate a value of loss function """ - raise NotImplementedError diff --git a/vel/modules/rnn_cell.py b/vel/modules/rnn_cell.py index 3ee521fb..08d9582e 100644 --- a/vel/modules/rnn_cell.py +++ b/vel/modules/rnn_cell.py @@ -3,10 +3,10 @@ import torch.nn.init as init -from vel.api import RnnLinearBackboneModel +from vel.api import LinearBackboneModel -class RnnCell(RnnLinearBackboneModel): +class RnnCell(LinearBackboneModel): """ Generalization of RNN cell (Simple RNN, LSTM or GRU) """ def __init__(self, input_size, hidden_size, rnn_type, bias=True, nonlinearity='tanh'): diff --git a/vel/modules/rnn_layer.py b/vel/modules/rnn_layer.py index 5c5f6886..7533dc35 100644 --- a/vel/modules/rnn_layer.py +++ b/vel/modules/rnn_layer.py @@ -3,10 +3,10 @@ import torch.nn.init as init -from vel.api import RnnLinearBackboneModel +from vel.api import LinearBackboneModel -class RnnLayer(RnnLinearBackboneModel): +class RnnLayer(LinearBackboneModel): """ Generalization of RNN layer (Simple RNN, LSTM or GRU) """ def __init__(self, input_size, hidden_size, rnn_type, bias=True, bidirectional=False, nonlinearity='tanh'): diff --git a/vel/rl/api/model.py b/vel/rl/api/model.py index c2d02da2..fb5a691c 100644 --- a/vel/rl/api/model.py +++ b/vel/rl/api/model.py @@ -25,7 +25,7 @@ class RlRnnModel(Model): """ Reinforcement learning recurrent model """ @property - def is_recurrent(self) -> bool: + def is_stateful(self) -> bool: """ If the network is recurrent and needs to be fed previous state """ return True diff --git a/vel/rl/commands/enjoy.py b/vel/rl/commands/enjoy.py index c0a37508..a0f056bc 100644 --- a/vel/rl/commands/enjoy.py +++ b/vel/rl/commands/enjoy.py @@ -46,14 +46,14 @@ def run_model(self, model, environment, device): seconds_per_frame = 1.0 / self.fps - if model.is_recurrent: + if model.is_stateful: hidden_state = model.zero_state(1).to(device) while True: observation_array = np.expand_dims(np.array(observation), axis=0) observation_tensor = torch.from_numpy(observation_array).to(device) - if model.is_recurrent: + if model.is_stateful: output = model.step(observation_tensor, hidden_state, **self.sample_args) hidden_state = output['state'] actions = output['actions'] diff --git a/vel/rl/commands/evaluate_env_command.py b/vel/rl/commands/evaluate_env_command.py index eb5ef763..e9d7c2e4 100644 --- a/vel/rl/commands/evaluate_env_command.py +++ b/vel/rl/commands/evaluate_env_command.py @@ -51,12 +51,12 @@ def run(self): observations = env.reset() observations_tensor = torch.from_numpy(observations).to(device) - if model.is_recurrent: + if model.is_stateful: hidden_state = model.zero_state(observations.shape[0]).to(device) with tqdm.tqdm(total=self.takes) as progress_bar: while len(episode_rewards) < self.takes: - if model.is_recurrent: + if model.is_stateful: output = model.step(observations_tensor, hidden_state, **self.sample_args) hidden_state = output['state'] actions = output['actions'] @@ -75,7 +75,7 @@ def run(self): episode_lengths.append(info['episode']['l']) progress_bar.update(1) - if model.is_recurrent: + if model.is_stateful: # Zero state belongiong to finished episodes dones_tensor = torch.from_numpy(dones.astype(np.float32)).to(device) hidden_state = hidden_state * (1.0 - dones_tensor.unsqueeze(-1)) diff --git a/vel/rl/commands/record_movie_command.py b/vel/rl/commands/record_movie_command.py index b5c1ca75..78e60d5f 100644 --- a/vel/rl/commands/record_movie_command.py +++ b/vel/rl/commands/record_movie_command.py @@ -50,7 +50,7 @@ def record_take(self, model, env_instance, device, take_number): observation = env_instance.reset() - if model.is_recurrent: + if model.is_stateful: hidden_state = model.zero_state(1).to(device) frames.append(env_instance.render('rgb_array')) @@ -61,7 +61,7 @@ def record_take(self, model, env_instance, device, take_number): observation_array = np.expand_dims(np.array(observation), axis=0) observation_tensor = torch.from_numpy(observation_array).to(device) - if model.is_recurrent: + if model.is_stateful: output = model.step(observation_tensor, hidden_state, **self.sample_args) hidden_state = output['state'] actions = output['actions'] diff --git a/vel/rl/env_roller/step_env_roller.py b/vel/rl/env_roller/step_env_roller.py index b9af23bc..a25cecfe 100644 --- a/vel/rl/env_roller/step_env_roller.py +++ b/vel/rl/env_roller/step_env_roller.py @@ -32,14 +32,14 @@ def rollout(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information - if self.hidden_state is None and model.is_recurrent: + if self.hidden_state is None and model.is_stateful: self.hidden_state = model.zero_state(self.last_observation.size(0)).to(self.device) # Remember rollout initial state, we'll use that for training as well initial_hidden_state = self.hidden_state for step_idx in range(number_of_steps): - if model.is_recurrent: + if model.is_stateful: step = model.step(self.last_observation.to(self.device), state=self.hidden_state) self.hidden_state = step['state'] else: @@ -61,7 +61,7 @@ def rollout(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> self.last_observation = torch.from_numpy(new_obs).clone() - if model.is_recurrent: + if model.is_stateful: # Zero out state in environments that have finished self.hidden_state = self.hidden_state * (1.0 - dones_tensor.unsqueeze(-1)).to(self.device) @@ -69,7 +69,7 @@ def rollout(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> episode_information.append(new_infos) - if model.is_recurrent: + if model.is_stateful: final_values = model.value(self.last_observation.to(self.device), state=self.hidden_state).cpu() else: final_values = model.value(self.last_observation.to(self.device)).cpu() diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index 87412ee1..38d1f9aa 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -32,7 +32,7 @@ def environment(self): @torch.no_grad() def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout: """ Calculate env rollout """ - assert not model.is_recurrent, "Replay env roller does not support recurrent models" + assert not model.is_stateful, "Replay env roller does not support recurrent models" accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information diff --git a/vel/rl/env_roller/transition_replay_env_roller.py b/vel/rl/env_roller/transition_replay_env_roller.py index fc7e7fba..d64628ae 100644 --- a/vel/rl/env_roller/transition_replay_env_roller.py +++ b/vel/rl/env_roller/transition_replay_env_roller.py @@ -54,7 +54,7 @@ def environment(self): @torch.no_grad() def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout: """ Calculate env rollout """ - assert not model.is_recurrent, "Replay env roller does not support recurrent models" + assert not model.is_stateful, "Replay env roller does not support stateful models" accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information diff --git a/vel/rl/models/backbone/lstm.py b/vel/rl/models/backbone/lstm.py index eb2b90b4..874f7ca6 100644 --- a/vel/rl/models/backbone/lstm.py +++ b/vel/rl/models/backbone/lstm.py @@ -1,7 +1,7 @@ -from vel.api import RnnLinearBackboneModel, ModelFactory +from vel.api import LinearBackboneModel, ModelFactory -class LstmBackbone(RnnLinearBackboneModel): +class LstmBackbone(LinearBackboneModel): """ Simple 'LSTM' model backbone """ diff --git a/vel/rl/models/backbone/nature_cnn_rnn.py b/vel/rl/models/backbone/nature_cnn_rnn.py index 837e7a58..832926e4 100644 --- a/vel/rl/models/backbone/nature_cnn_rnn.py +++ b/vel/rl/models/backbone/nature_cnn_rnn.py @@ -1,9 +1,9 @@ -from vel.api import RnnLinearBackboneModel, ModelFactory +from vel.api import LinearBackboneModel, ModelFactory from vel.rl.models.backbone.nature_cnn import NatureCnn from vel.modules.rnn_cell import RnnCell -class NatureCnnRnnBackbone(RnnLinearBackboneModel): +class NatureCnnRnnBackbone(LinearBackboneModel): """ Long-Short-Term Memory rnn cell together with DeepMind-style 'Nature' cnn preprocessing """ diff --git a/vel/rl/models/stochastic_policy_rnn_model.py b/vel/rl/models/stochastic_policy_rnn_model.py index 78afc436..cda7d69a 100644 --- a/vel/rl/models/stochastic_policy_rnn_model.py +++ b/vel/rl/models/stochastic_policy_rnn_model.py @@ -2,7 +2,7 @@ import torch import typing -from vel.api import RnnLinearBackboneModel, ModelFactory, BackboneModel +from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.modules.input.identity import IdentityFactory from vel.rl.api import Rollout, Trajectories, Evaluator, RlRnnModel from vel.rl.modules.action_head import ActionHead @@ -54,7 +54,8 @@ class StochasticPolicyRnnModel(RlRnnModel): RNN version """ - def __init__(self, input_block: BackboneModel, backbone: RnnLinearBackboneModel, action_space: gym.Space): + def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, + action_space: gym.Space): super().__init__() self.input_block = input_block @@ -66,7 +67,7 @@ def __init__(self, input_block: BackboneModel, backbone: RnnLinearBackboneModel, ) self.value_head = ValueHead(input_dim=self.backbone.output_dim) - assert self.backbone.is_recurrent, "Backbone must be a recurrent model" + assert self.backbone.is_stateful, "Backbone must be a recurrent model" @property def state_dim(self) -> int: From 8c0428e3d24fb9becae4583bb83059b35ef99c54 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 13 Jun 2019 11:33:07 -0700 Subject: [PATCH 039/162] Large scale rename and move. --- vel/api/{data => }/augmentation.py | 0 vel/api/data/__init__.py | 3 --- vel/api/{data => }/dataflow.py | 0 vel/api/metrics/__init__.py | 4 ---- vel/api/source.py | 2 +- vel/{augmentations => augmentation}/__init__.py | 0 vel/{augmentations => augmentation}/center_crop.py | 2 +- vel/{augmentations => augmentation}/normalize.py | 2 +- vel/{augmentations => augmentation}/random_crop.py | 2 +- vel/{augmentations => augmentation}/random_horizontal_flip.py | 2 +- vel/{augmentations => augmentation}/random_lighting.py | 2 +- vel/{augmentations => augmentation}/random_rotate.py | 2 +- vel/{augmentations => augmentation}/random_scale.py | 2 +- vel/{augmentations => augmentation}/scale_min_size.py | 2 +- vel/{augmentations => augmentation}/to_array.py | 2 +- vel/{augmentations => augmentation}/to_tensor.py | 2 +- vel/{augmentations => augmentation}/tta/__init__.py | 0 vel/{augmentations => augmentation}/tta/train_tta.py | 0 vel/{augmentations => augmentation}/unsupervised.py | 2 +- vel/{callbacks => callback}/__init__.py | 0 vel/{callbacks => callback}/time_tracker.py | 0 vel/{commands => command}/__init__.py | 0 vel/{commands => command}/augvis_command.py | 0 vel/{commands => command}/lr_find_command.py | 0 vel/{commands => command}/phase_train_command.py | 0 vel/{commands => command}/rnn/__init__.py | 0 vel/{commands => command}/rnn/generate_text.py | 0 vel/{commands => command}/summary_command.py | 0 vel/{commands => command}/train_command.py | 0 vel/{commands => command}/vis_store_command.py | 0 vel/data/__init__.py | 1 + vel/{api => }/data/image_ops.py | 0 vel/{exceptions.py => exception.py} | 0 vel/{internals => internal}/__init__.py | 0 vel/{internals => internal}/context.py | 0 vel/{internals => internal}/generic_factory.py | 0 vel/{internals => internal}/parser.py | 0 vel/{internals => internal}/provider.py | 0 vel/{internals/tests => internal/test}/__init__.py | 0 vel/{internals/tests => internal/test}/fixture_a.py | 0 vel/{internals/tests => internal/test}/fixture_b.py | 0 vel/{internals/tests => internal/test}/test_parser.py | 0 vel/{internals/tests => internal/test}/test_provider.py | 0 vel/{metrics => metric}/__init__.py | 0 vel/{metrics => metric}/accuracy.py | 0 vel/{api/metrics => metric}/averaging_metric.py | 0 vel/{api/metrics => metric}/base_metric.py | 0 vel/{metrics => metric}/loss_metric.py | 0 vel/{api/metrics => metric}/summing_metric.py | 0 vel/{api/metrics => metric}/value_metric.py | 0 vel/{models => model}/__init__.py | 0 vel/{models => model}/autoencoder/__init__.py | 0 vel/{models => model}/autoencoder/mnist_cnn_autoencoder.py | 0 vel/{models => model}/autoencoder/mnist_cnn_vae.py | 0 vel/{models => model}/imagenet/__init__.py | 0 vel/{models => model}/imagenet/resnet34.py | 0 vel/{models => model}/rnn/__init__.py | 0 .../rnn/multilayer_rnn_sequence_classification.py | 0 vel/{models => model}/rnn/multilayer_rnn_sequence_model.py | 0 vel/{models => model}/vision/__init__.py | 0 vel/{models => model}/vision/cifar10_cnn_01.py | 0 vel/{models => model}/vision/cifar_resnet_v1.py | 0 vel/{models => model}/vision/cifar_resnet_v2.py | 0 vel/{models => model}/vision/cifar_resnext.py | 0 vel/{models => model}/vision/mnist_cnn_01.py | 0 vel/{modules => module}/__init__.py | 0 vel/{modules => module}/input/__init__.py | 0 vel/{modules => module}/input/embedding.py | 0 vel/{modules => module}/input/identity.py | 0 vel/{modules => module}/input/image_to_tensor.py | 0 vel/{modules => module}/input/normalize_observations.py | 0 vel/{modules => module}/input/one_hot_encoding.py | 0 vel/{modules => module}/layers.py | 0 vel/{modules => module}/resnet_v1.py | 0 vel/{modules => module}/resnet_v2.py | 0 vel/{modules => module}/resnext.py | 0 vel/{modules => module}/rnn_cell.py | 0 vel/{modules => module}/rnn_layer.py | 0 vel/{optimizers => optimizer}/__init__.py | 0 vel/{optimizers => optimizer}/adadelta.py | 0 vel/{optimizers => optimizer}/adam.py | 0 vel/{optimizers => optimizer}/rmsprop.py | 0 vel/{optimizers => optimizer}/rmsprop_tf.py | 0 vel/{optimizers => optimizer}/sgd.py | 0 vel/{schedules => schedule}/__init__.py | 0 vel/{schedules => schedule}/constant.py | 0 vel/{schedules => schedule}/linear.py | 0 vel/{schedules => schedule}/linear_and_constant.py | 0 vel/{sources => source}/__init__.py | 0 vel/{sources => source}/img_dir_source.py | 0 vel/{sources => source}/nlp/__init__.py | 0 vel/{sources => source}/nlp/imdb.py | 0 vel/{sources => source}/nlp/multi30k.py | 0 vel/{sources => source}/nlp/text_url.py | 0 vel/{sources => source}/nlp/wmt14.py | 0 vel/{sources => source}/vision/__init__.py | 0 vel/{sources => source}/vision/cifar10.py | 0 vel/{sources => source}/vision/mnist.py | 0 98 files changed, 13 insertions(+), 19 deletions(-) rename vel/api/{data => }/augmentation.py (100%) delete mode 100644 vel/api/data/__init__.py rename vel/api/{data => }/dataflow.py (100%) delete mode 100644 vel/api/metrics/__init__.py rename vel/{augmentations => augmentation}/__init__.py (100%) rename vel/{augmentations => augmentation}/center_crop.py (95%) rename vel/{augmentations => augmentation}/normalize.py (95%) rename vel/{augmentations => augmentation}/random_crop.py (99%) rename vel/{augmentations => augmentation}/random_horizontal_flip.py (96%) rename vel/{augmentations => augmentation}/random_lighting.py (96%) rename vel/{augmentations => augmentation}/random_rotate.py (96%) rename vel/{augmentations => augmentation}/random_scale.py (97%) rename vel/{augmentations => augmentation}/scale_min_size.py (95%) rename vel/{augmentations => augmentation}/to_array.py (94%) rename vel/{augmentations => augmentation}/to_tensor.py (95%) rename vel/{augmentations => augmentation}/tta/__init__.py (100%) rename vel/{augmentations => augmentation}/tta/train_tta.py (100%) rename vel/{augmentations => augmentation}/unsupervised.py (91%) rename vel/{callbacks => callback}/__init__.py (100%) rename vel/{callbacks => callback}/time_tracker.py (100%) rename vel/{commands => command}/__init__.py (100%) rename vel/{commands => command}/augvis_command.py (100%) rename vel/{commands => command}/lr_find_command.py (100%) rename vel/{commands => command}/phase_train_command.py (100%) rename vel/{commands => command}/rnn/__init__.py (100%) rename vel/{commands => command}/rnn/generate_text.py (100%) rename vel/{commands => command}/summary_command.py (100%) rename vel/{commands => command}/train_command.py (100%) rename vel/{commands => command}/vis_store_command.py (100%) create mode 100644 vel/data/__init__.py rename vel/{api => }/data/image_ops.py (100%) rename vel/{exceptions.py => exception.py} (100%) rename vel/{internals => internal}/__init__.py (100%) rename vel/{internals => internal}/context.py (100%) rename vel/{internals => internal}/generic_factory.py (100%) rename vel/{internals => internal}/parser.py (100%) rename vel/{internals => internal}/provider.py (100%) rename vel/{internals/tests => internal/test}/__init__.py (100%) rename vel/{internals/tests => internal/test}/fixture_a.py (100%) rename vel/{internals/tests => internal/test}/fixture_b.py (100%) rename vel/{internals/tests => internal/test}/test_parser.py (100%) rename vel/{internals/tests => internal/test}/test_provider.py (100%) rename vel/{metrics => metric}/__init__.py (100%) rename vel/{metrics => metric}/accuracy.py (100%) rename vel/{api/metrics => metric}/averaging_metric.py (100%) rename vel/{api/metrics => metric}/base_metric.py (100%) rename vel/{metrics => metric}/loss_metric.py (100%) rename vel/{api/metrics => metric}/summing_metric.py (100%) rename vel/{api/metrics => metric}/value_metric.py (100%) rename vel/{models => model}/__init__.py (100%) rename vel/{models => model}/autoencoder/__init__.py (100%) rename vel/{models => model}/autoencoder/mnist_cnn_autoencoder.py (100%) rename vel/{models => model}/autoencoder/mnist_cnn_vae.py (100%) rename vel/{models => model}/imagenet/__init__.py (100%) rename vel/{models => model}/imagenet/resnet34.py (100%) rename vel/{models => model}/rnn/__init__.py (100%) rename vel/{models => model}/rnn/multilayer_rnn_sequence_classification.py (100%) rename vel/{models => model}/rnn/multilayer_rnn_sequence_model.py (100%) rename vel/{models => model}/vision/__init__.py (100%) rename vel/{models => model}/vision/cifar10_cnn_01.py (100%) rename vel/{models => model}/vision/cifar_resnet_v1.py (100%) rename vel/{models => model}/vision/cifar_resnet_v2.py (100%) rename vel/{models => model}/vision/cifar_resnext.py (100%) rename vel/{models => model}/vision/mnist_cnn_01.py (100%) rename vel/{modules => module}/__init__.py (100%) rename vel/{modules => module}/input/__init__.py (100%) rename vel/{modules => module}/input/embedding.py (100%) rename vel/{modules => module}/input/identity.py (100%) rename vel/{modules => module}/input/image_to_tensor.py (100%) rename vel/{modules => module}/input/normalize_observations.py (100%) rename vel/{modules => module}/input/one_hot_encoding.py (100%) rename vel/{modules => module}/layers.py (100%) rename vel/{modules => module}/resnet_v1.py (100%) rename vel/{modules => module}/resnet_v2.py (100%) rename vel/{modules => module}/resnext.py (100%) rename vel/{modules => module}/rnn_cell.py (100%) rename vel/{modules => module}/rnn_layer.py (100%) rename vel/{optimizers => optimizer}/__init__.py (100%) rename vel/{optimizers => optimizer}/adadelta.py (100%) rename vel/{optimizers => optimizer}/adam.py (100%) rename vel/{optimizers => optimizer}/rmsprop.py (100%) rename vel/{optimizers => optimizer}/rmsprop_tf.py (100%) rename vel/{optimizers => optimizer}/sgd.py (100%) rename vel/{schedules => schedule}/__init__.py (100%) rename vel/{schedules => schedule}/constant.py (100%) rename vel/{schedules => schedule}/linear.py (100%) rename vel/{schedules => schedule}/linear_and_constant.py (100%) rename vel/{sources => source}/__init__.py (100%) rename vel/{sources => source}/img_dir_source.py (100%) rename vel/{sources => source}/nlp/__init__.py (100%) rename vel/{sources => source}/nlp/imdb.py (100%) rename vel/{sources => source}/nlp/multi30k.py (100%) rename vel/{sources => source}/nlp/text_url.py (100%) rename vel/{sources => source}/nlp/wmt14.py (100%) rename vel/{sources => source}/vision/__init__.py (100%) rename vel/{sources => source}/vision/cifar10.py (100%) rename vel/{sources => source}/vision/mnist.py (100%) diff --git a/vel/api/data/augmentation.py b/vel/api/augmentation.py similarity index 100% rename from vel/api/data/augmentation.py rename to vel/api/augmentation.py diff --git a/vel/api/data/__init__.py b/vel/api/data/__init__.py deleted file mode 100644 index 76bebdab..00000000 --- a/vel/api/data/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .augmentation import Augmentation -from .dataflow import DataFlow -from .image_ops import * \ No newline at end of file diff --git a/vel/api/data/dataflow.py b/vel/api/dataflow.py similarity index 100% rename from vel/api/data/dataflow.py rename to vel/api/dataflow.py diff --git a/vel/api/metrics/__init__.py b/vel/api/metrics/__init__.py deleted file mode 100644 index f496db8a..00000000 --- a/vel/api/metrics/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .base_metric import BaseMetric -from .averaging_metric import AveragingMetric, AveragingNamedMetric, AveragingSupervisedMetric -from .value_metric import ValueMetric -from .summing_metric import SummingMetric, SummingNamedMetric diff --git a/vel/api/source.py b/vel/api/source.py index a566521a..9396b487 100644 --- a/vel/api/source.py +++ b/vel/api/source.py @@ -1,6 +1,6 @@ import torch.utils.data as data -from .data import DataFlow +from vel.data import DataFlow class Source: diff --git a/vel/augmentations/__init__.py b/vel/augmentation/__init__.py similarity index 100% rename from vel/augmentations/__init__.py rename to vel/augmentation/__init__.py diff --git a/vel/augmentations/center_crop.py b/vel/augmentation/center_crop.py similarity index 95% rename from vel/augmentations/center_crop.py rename to vel/augmentation/center_crop.py index b586c61b..21cf33bd 100644 --- a/vel/augmentations/center_crop.py +++ b/vel/augmentation/center_crop.py @@ -3,7 +3,7 @@ https://github.com/fastai/fastai/blob/master/fastai/transforms.py """ -import vel.api.data as data +import vel.data as data class CenterCrop(data.Augmentation): diff --git a/vel/augmentations/normalize.py b/vel/augmentation/normalize.py similarity index 95% rename from vel/augmentations/normalize.py rename to vel/augmentation/normalize.py index 2a1333d9..36ca29fd 100644 --- a/vel/augmentations/normalize.py +++ b/vel/augmentation/normalize.py @@ -1,6 +1,6 @@ import numpy as np -import vel.api.data as data +import vel.data as data class Normalize(data.Augmentation): diff --git a/vel/augmentations/random_crop.py b/vel/augmentation/random_crop.py similarity index 99% rename from vel/augmentations/random_crop.py rename to vel/augmentation/random_crop.py index 3cb8faf5..bbc56dec 100644 --- a/vel/augmentations/random_crop.py +++ b/vel/augmentation/random_crop.py @@ -6,7 +6,7 @@ import numbers import random -import vel.api.data as data +import vel.data as data class RandomCrop(data.Augmentation): diff --git a/vel/augmentations/random_horizontal_flip.py b/vel/augmentation/random_horizontal_flip.py similarity index 96% rename from vel/augmentations/random_horizontal_flip.py rename to vel/augmentation/random_horizontal_flip.py index a5549673..2d8bdafd 100644 --- a/vel/augmentations/random_horizontal_flip.py +++ b/vel/augmentation/random_horizontal_flip.py @@ -1,7 +1,7 @@ import random import numpy as np -import vel.api.data as data +import vel.data as data class RandomHorizontalFlip(data.Augmentation): diff --git a/vel/augmentations/random_lighting.py b/vel/augmentation/random_lighting.py similarity index 96% rename from vel/augmentations/random_lighting.py rename to vel/augmentation/random_lighting.py index b5c0da5a..82fc9cb1 100644 --- a/vel/augmentations/random_lighting.py +++ b/vel/augmentation/random_lighting.py @@ -1,6 +1,6 @@ import random -import vel.api.data as data +import vel.data as data class RandomLighting(data.Augmentation): diff --git a/vel/augmentations/random_rotate.py b/vel/augmentation/random_rotate.py similarity index 96% rename from vel/augmentations/random_rotate.py rename to vel/augmentation/random_rotate.py index 1a646b22..c2c02246 100644 --- a/vel/augmentations/random_rotate.py +++ b/vel/augmentation/random_rotate.py @@ -5,7 +5,7 @@ import cv2 import random -import vel.api.data as data +import vel.data as data class RandomRotate(data.Augmentation): diff --git a/vel/augmentations/random_scale.py b/vel/augmentation/random_scale.py similarity index 97% rename from vel/augmentations/random_scale.py rename to vel/augmentation/random_scale.py index c60d5852..882a3eb6 100644 --- a/vel/augmentations/random_scale.py +++ b/vel/augmentation/random_scale.py @@ -6,7 +6,7 @@ import collections.abc as abc import random -import vel.api.data as data +import vel.data as data class RandomScale(data.Augmentation): diff --git a/vel/augmentations/scale_min_size.py b/vel/augmentation/scale_min_size.py similarity index 95% rename from vel/augmentations/scale_min_size.py rename to vel/augmentation/scale_min_size.py index a89f0c8e..c1ebfa5d 100644 --- a/vel/augmentations/scale_min_size.py +++ b/vel/augmentation/scale_min_size.py @@ -4,7 +4,7 @@ """ import PIL.Image as Image -import vel.api.data as data +import vel.data as data class ScaleMinSize(data.Augmentation): diff --git a/vel/augmentations/to_array.py b/vel/augmentation/to_array.py similarity index 94% rename from vel/augmentations/to_array.py rename to vel/augmentation/to_array.py index ffbd353d..e1f3a5f0 100644 --- a/vel/augmentations/to_array.py +++ b/vel/augmentation/to_array.py @@ -1,6 +1,6 @@ import numpy as np -import vel.api.data as data +import vel.data as data class ToArray(data.Augmentation): diff --git a/vel/augmentations/to_tensor.py b/vel/augmentation/to_tensor.py similarity index 95% rename from vel/augmentations/to_tensor.py rename to vel/augmentation/to_tensor.py index dc4030e0..33285e77 100644 --- a/vel/augmentations/to_tensor.py +++ b/vel/augmentation/to_tensor.py @@ -2,7 +2,7 @@ import torchvision.transforms.functional as F -import vel.api.data as data +import vel.data as data class ToTensor(data.Augmentation): diff --git a/vel/augmentations/tta/__init__.py b/vel/augmentation/tta/__init__.py similarity index 100% rename from vel/augmentations/tta/__init__.py rename to vel/augmentation/tta/__init__.py diff --git a/vel/augmentations/tta/train_tta.py b/vel/augmentation/tta/train_tta.py similarity index 100% rename from vel/augmentations/tta/train_tta.py rename to vel/augmentation/tta/train_tta.py diff --git a/vel/augmentations/unsupervised.py b/vel/augmentation/unsupervised.py similarity index 91% rename from vel/augmentations/unsupervised.py rename to vel/augmentation/unsupervised.py index 2caeb448..678ab7d3 100644 --- a/vel/augmentations/unsupervised.py +++ b/vel/augmentation/unsupervised.py @@ -1,4 +1,4 @@ -import vel.api.data as data +import vel.data as data class Unsupervised(data.Augmentation): diff --git a/vel/callbacks/__init__.py b/vel/callback/__init__.py similarity index 100% rename from vel/callbacks/__init__.py rename to vel/callback/__init__.py diff --git a/vel/callbacks/time_tracker.py b/vel/callback/time_tracker.py similarity index 100% rename from vel/callbacks/time_tracker.py rename to vel/callback/time_tracker.py diff --git a/vel/commands/__init__.py b/vel/command/__init__.py similarity index 100% rename from vel/commands/__init__.py rename to vel/command/__init__.py diff --git a/vel/commands/augvis_command.py b/vel/command/augvis_command.py similarity index 100% rename from vel/commands/augvis_command.py rename to vel/command/augvis_command.py diff --git a/vel/commands/lr_find_command.py b/vel/command/lr_find_command.py similarity index 100% rename from vel/commands/lr_find_command.py rename to vel/command/lr_find_command.py diff --git a/vel/commands/phase_train_command.py b/vel/command/phase_train_command.py similarity index 100% rename from vel/commands/phase_train_command.py rename to vel/command/phase_train_command.py diff --git a/vel/commands/rnn/__init__.py b/vel/command/rnn/__init__.py similarity index 100% rename from vel/commands/rnn/__init__.py rename to vel/command/rnn/__init__.py diff --git a/vel/commands/rnn/generate_text.py b/vel/command/rnn/generate_text.py similarity index 100% rename from vel/commands/rnn/generate_text.py rename to vel/command/rnn/generate_text.py diff --git a/vel/commands/summary_command.py b/vel/command/summary_command.py similarity index 100% rename from vel/commands/summary_command.py rename to vel/command/summary_command.py diff --git a/vel/commands/train_command.py b/vel/command/train_command.py similarity index 100% rename from vel/commands/train_command.py rename to vel/command/train_command.py diff --git a/vel/commands/vis_store_command.py b/vel/command/vis_store_command.py similarity index 100% rename from vel/commands/vis_store_command.py rename to vel/command/vis_store_command.py diff --git a/vel/data/__init__.py b/vel/data/__init__.py new file mode 100644 index 00000000..806cbe66 --- /dev/null +++ b/vel/data/__init__.py @@ -0,0 +1 @@ +from .image_ops import * \ No newline at end of file diff --git a/vel/api/data/image_ops.py b/vel/data/image_ops.py similarity index 100% rename from vel/api/data/image_ops.py rename to vel/data/image_ops.py diff --git a/vel/exceptions.py b/vel/exception.py similarity index 100% rename from vel/exceptions.py rename to vel/exception.py diff --git a/vel/internals/__init__.py b/vel/internal/__init__.py similarity index 100% rename from vel/internals/__init__.py rename to vel/internal/__init__.py diff --git a/vel/internals/context.py b/vel/internal/context.py similarity index 100% rename from vel/internals/context.py rename to vel/internal/context.py diff --git a/vel/internals/generic_factory.py b/vel/internal/generic_factory.py similarity index 100% rename from vel/internals/generic_factory.py rename to vel/internal/generic_factory.py diff --git a/vel/internals/parser.py b/vel/internal/parser.py similarity index 100% rename from vel/internals/parser.py rename to vel/internal/parser.py diff --git a/vel/internals/provider.py b/vel/internal/provider.py similarity index 100% rename from vel/internals/provider.py rename to vel/internal/provider.py diff --git a/vel/internals/tests/__init__.py b/vel/internal/test/__init__.py similarity index 100% rename from vel/internals/tests/__init__.py rename to vel/internal/test/__init__.py diff --git a/vel/internals/tests/fixture_a.py b/vel/internal/test/fixture_a.py similarity index 100% rename from vel/internals/tests/fixture_a.py rename to vel/internal/test/fixture_a.py diff --git a/vel/internals/tests/fixture_b.py b/vel/internal/test/fixture_b.py similarity index 100% rename from vel/internals/tests/fixture_b.py rename to vel/internal/test/fixture_b.py diff --git a/vel/internals/tests/test_parser.py b/vel/internal/test/test_parser.py similarity index 100% rename from vel/internals/tests/test_parser.py rename to vel/internal/test/test_parser.py diff --git a/vel/internals/tests/test_provider.py b/vel/internal/test/test_provider.py similarity index 100% rename from vel/internals/tests/test_provider.py rename to vel/internal/test/test_provider.py diff --git a/vel/metrics/__init__.py b/vel/metric/__init__.py similarity index 100% rename from vel/metrics/__init__.py rename to vel/metric/__init__.py diff --git a/vel/metrics/accuracy.py b/vel/metric/accuracy.py similarity index 100% rename from vel/metrics/accuracy.py rename to vel/metric/accuracy.py diff --git a/vel/api/metrics/averaging_metric.py b/vel/metric/averaging_metric.py similarity index 100% rename from vel/api/metrics/averaging_metric.py rename to vel/metric/averaging_metric.py diff --git a/vel/api/metrics/base_metric.py b/vel/metric/base_metric.py similarity index 100% rename from vel/api/metrics/base_metric.py rename to vel/metric/base_metric.py diff --git a/vel/metrics/loss_metric.py b/vel/metric/loss_metric.py similarity index 100% rename from vel/metrics/loss_metric.py rename to vel/metric/loss_metric.py diff --git a/vel/api/metrics/summing_metric.py b/vel/metric/summing_metric.py similarity index 100% rename from vel/api/metrics/summing_metric.py rename to vel/metric/summing_metric.py diff --git a/vel/api/metrics/value_metric.py b/vel/metric/value_metric.py similarity index 100% rename from vel/api/metrics/value_metric.py rename to vel/metric/value_metric.py diff --git a/vel/models/__init__.py b/vel/model/__init__.py similarity index 100% rename from vel/models/__init__.py rename to vel/model/__init__.py diff --git a/vel/models/autoencoder/__init__.py b/vel/model/autoencoder/__init__.py similarity index 100% rename from vel/models/autoencoder/__init__.py rename to vel/model/autoencoder/__init__.py diff --git a/vel/models/autoencoder/mnist_cnn_autoencoder.py b/vel/model/autoencoder/mnist_cnn_autoencoder.py similarity index 100% rename from vel/models/autoencoder/mnist_cnn_autoencoder.py rename to vel/model/autoencoder/mnist_cnn_autoencoder.py diff --git a/vel/models/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py similarity index 100% rename from vel/models/autoencoder/mnist_cnn_vae.py rename to vel/model/autoencoder/mnist_cnn_vae.py diff --git a/vel/models/imagenet/__init__.py b/vel/model/imagenet/__init__.py similarity index 100% rename from vel/models/imagenet/__init__.py rename to vel/model/imagenet/__init__.py diff --git a/vel/models/imagenet/resnet34.py b/vel/model/imagenet/resnet34.py similarity index 100% rename from vel/models/imagenet/resnet34.py rename to vel/model/imagenet/resnet34.py diff --git a/vel/models/rnn/__init__.py b/vel/model/rnn/__init__.py similarity index 100% rename from vel/models/rnn/__init__.py rename to vel/model/rnn/__init__.py diff --git a/vel/models/rnn/multilayer_rnn_sequence_classification.py b/vel/model/rnn/multilayer_rnn_sequence_classification.py similarity index 100% rename from vel/models/rnn/multilayer_rnn_sequence_classification.py rename to vel/model/rnn/multilayer_rnn_sequence_classification.py diff --git a/vel/models/rnn/multilayer_rnn_sequence_model.py b/vel/model/rnn/multilayer_rnn_sequence_model.py similarity index 100% rename from vel/models/rnn/multilayer_rnn_sequence_model.py rename to vel/model/rnn/multilayer_rnn_sequence_model.py diff --git a/vel/models/vision/__init__.py b/vel/model/vision/__init__.py similarity index 100% rename from vel/models/vision/__init__.py rename to vel/model/vision/__init__.py diff --git a/vel/models/vision/cifar10_cnn_01.py b/vel/model/vision/cifar10_cnn_01.py similarity index 100% rename from vel/models/vision/cifar10_cnn_01.py rename to vel/model/vision/cifar10_cnn_01.py diff --git a/vel/models/vision/cifar_resnet_v1.py b/vel/model/vision/cifar_resnet_v1.py similarity index 100% rename from vel/models/vision/cifar_resnet_v1.py rename to vel/model/vision/cifar_resnet_v1.py diff --git a/vel/models/vision/cifar_resnet_v2.py b/vel/model/vision/cifar_resnet_v2.py similarity index 100% rename from vel/models/vision/cifar_resnet_v2.py rename to vel/model/vision/cifar_resnet_v2.py diff --git a/vel/models/vision/cifar_resnext.py b/vel/model/vision/cifar_resnext.py similarity index 100% rename from vel/models/vision/cifar_resnext.py rename to vel/model/vision/cifar_resnext.py diff --git a/vel/models/vision/mnist_cnn_01.py b/vel/model/vision/mnist_cnn_01.py similarity index 100% rename from vel/models/vision/mnist_cnn_01.py rename to vel/model/vision/mnist_cnn_01.py diff --git a/vel/modules/__init__.py b/vel/module/__init__.py similarity index 100% rename from vel/modules/__init__.py rename to vel/module/__init__.py diff --git a/vel/modules/input/__init__.py b/vel/module/input/__init__.py similarity index 100% rename from vel/modules/input/__init__.py rename to vel/module/input/__init__.py diff --git a/vel/modules/input/embedding.py b/vel/module/input/embedding.py similarity index 100% rename from vel/modules/input/embedding.py rename to vel/module/input/embedding.py diff --git a/vel/modules/input/identity.py b/vel/module/input/identity.py similarity index 100% rename from vel/modules/input/identity.py rename to vel/module/input/identity.py diff --git a/vel/modules/input/image_to_tensor.py b/vel/module/input/image_to_tensor.py similarity index 100% rename from vel/modules/input/image_to_tensor.py rename to vel/module/input/image_to_tensor.py diff --git a/vel/modules/input/normalize_observations.py b/vel/module/input/normalize_observations.py similarity index 100% rename from vel/modules/input/normalize_observations.py rename to vel/module/input/normalize_observations.py diff --git a/vel/modules/input/one_hot_encoding.py b/vel/module/input/one_hot_encoding.py similarity index 100% rename from vel/modules/input/one_hot_encoding.py rename to vel/module/input/one_hot_encoding.py diff --git a/vel/modules/layers.py b/vel/module/layers.py similarity index 100% rename from vel/modules/layers.py rename to vel/module/layers.py diff --git a/vel/modules/resnet_v1.py b/vel/module/resnet_v1.py similarity index 100% rename from vel/modules/resnet_v1.py rename to vel/module/resnet_v1.py diff --git a/vel/modules/resnet_v2.py b/vel/module/resnet_v2.py similarity index 100% rename from vel/modules/resnet_v2.py rename to vel/module/resnet_v2.py diff --git a/vel/modules/resnext.py b/vel/module/resnext.py similarity index 100% rename from vel/modules/resnext.py rename to vel/module/resnext.py diff --git a/vel/modules/rnn_cell.py b/vel/module/rnn_cell.py similarity index 100% rename from vel/modules/rnn_cell.py rename to vel/module/rnn_cell.py diff --git a/vel/modules/rnn_layer.py b/vel/module/rnn_layer.py similarity index 100% rename from vel/modules/rnn_layer.py rename to vel/module/rnn_layer.py diff --git a/vel/optimizers/__init__.py b/vel/optimizer/__init__.py similarity index 100% rename from vel/optimizers/__init__.py rename to vel/optimizer/__init__.py diff --git a/vel/optimizers/adadelta.py b/vel/optimizer/adadelta.py similarity index 100% rename from vel/optimizers/adadelta.py rename to vel/optimizer/adadelta.py diff --git a/vel/optimizers/adam.py b/vel/optimizer/adam.py similarity index 100% rename from vel/optimizers/adam.py rename to vel/optimizer/adam.py diff --git a/vel/optimizers/rmsprop.py b/vel/optimizer/rmsprop.py similarity index 100% rename from vel/optimizers/rmsprop.py rename to vel/optimizer/rmsprop.py diff --git a/vel/optimizers/rmsprop_tf.py b/vel/optimizer/rmsprop_tf.py similarity index 100% rename from vel/optimizers/rmsprop_tf.py rename to vel/optimizer/rmsprop_tf.py diff --git a/vel/optimizers/sgd.py b/vel/optimizer/sgd.py similarity index 100% rename from vel/optimizers/sgd.py rename to vel/optimizer/sgd.py diff --git a/vel/schedules/__init__.py b/vel/schedule/__init__.py similarity index 100% rename from vel/schedules/__init__.py rename to vel/schedule/__init__.py diff --git a/vel/schedules/constant.py b/vel/schedule/constant.py similarity index 100% rename from vel/schedules/constant.py rename to vel/schedule/constant.py diff --git a/vel/schedules/linear.py b/vel/schedule/linear.py similarity index 100% rename from vel/schedules/linear.py rename to vel/schedule/linear.py diff --git a/vel/schedules/linear_and_constant.py b/vel/schedule/linear_and_constant.py similarity index 100% rename from vel/schedules/linear_and_constant.py rename to vel/schedule/linear_and_constant.py diff --git a/vel/sources/__init__.py b/vel/source/__init__.py similarity index 100% rename from vel/sources/__init__.py rename to vel/source/__init__.py diff --git a/vel/sources/img_dir_source.py b/vel/source/img_dir_source.py similarity index 100% rename from vel/sources/img_dir_source.py rename to vel/source/img_dir_source.py diff --git a/vel/sources/nlp/__init__.py b/vel/source/nlp/__init__.py similarity index 100% rename from vel/sources/nlp/__init__.py rename to vel/source/nlp/__init__.py diff --git a/vel/sources/nlp/imdb.py b/vel/source/nlp/imdb.py similarity index 100% rename from vel/sources/nlp/imdb.py rename to vel/source/nlp/imdb.py diff --git a/vel/sources/nlp/multi30k.py b/vel/source/nlp/multi30k.py similarity index 100% rename from vel/sources/nlp/multi30k.py rename to vel/source/nlp/multi30k.py diff --git a/vel/sources/nlp/text_url.py b/vel/source/nlp/text_url.py similarity index 100% rename from vel/sources/nlp/text_url.py rename to vel/source/nlp/text_url.py diff --git a/vel/sources/nlp/wmt14.py b/vel/source/nlp/wmt14.py similarity index 100% rename from vel/sources/nlp/wmt14.py rename to vel/source/nlp/wmt14.py diff --git a/vel/sources/vision/__init__.py b/vel/source/vision/__init__.py similarity index 100% rename from vel/sources/vision/__init__.py rename to vel/source/vision/__init__.py diff --git a/vel/sources/vision/cifar10.py b/vel/source/vision/cifar10.py similarity index 100% rename from vel/sources/vision/cifar10.py rename to vel/source/vision/cifar10.py diff --git a/vel/sources/vision/mnist.py b/vel/source/vision/mnist.py similarity index 100% rename from vel/sources/vision/mnist.py rename to vel/source/vision/mnist.py From 31c5b05ee83c5651098be69085cbc7f853835ae2 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 13 Jun 2019 11:36:15 -0700 Subject: [PATCH 040/162] Second stage of large renames. --- .../mnist/mnist_cnn_autoencoder.yaml | 0 .../{autoencoders => autoencoder}/mnist/mnist_cnn_vae.yaml | 0 vel/data/{image_ops.py => image_op.py} | 0 vel/math/{functions.py => function.py} | 0 vel/math/{processes.py => process.py} | 0 vel/rl/{buffers => buffer}/__init__.py | 0 vel/rl/{buffers => buffer}/backend/__init__.py | 0 vel/rl/{buffers => buffer}/backend/circular_buffer_backend.py | 0 vel/rl/{buffers => buffer}/backend/circular_vec_buffer_backend.py | 0 vel/rl/{buffers => buffer}/backend/prioritized_buffer_backend.py | 0 .../{buffers => buffer}/backend/prioritized_vec_buffer_backend.py | 0 vel/rl/{buffers => buffer}/backend/segment_tree.py | 0 vel/rl/{buffers => buffer}/circular_replay_buffer.py | 0 vel/rl/{buffers => buffer}/prioritized_circular_replay_buffer.py | 0 vel/rl/{buffers => buffer}/tests/__init__.py | 0 vel/rl/{buffers => buffer}/tests/test_circular_buffer_backend.py | 0 .../tests/test_circular_vec_env_buffer_backend.py | 0 .../tests/test_prioritized_circular_buffer_backend.py | 0 .../tests/test_prioritized_vec_buffer_backend.py | 0 vel/rl/{commands => command}/__init__.py | 0 vel/rl/{commands => command}/enjoy.py | 0 vel/rl/{commands => command}/evaluate_env_command.py | 0 vel/rl/{commands => command}/record_movie_command.py | 0 vel/rl/{commands => command}/rl_train_command.py | 0 vel/rl/{models => model}/__init__.py | 0 vel/rl/{models => model}/backbone/__init__.py | 0 vel/rl/{models => model}/backbone/double_nature_cnn.py | 0 vel/rl/{models => model}/backbone/double_noisy_nature_cnn.py | 0 vel/rl/{models => model}/backbone/lstm.py | 0 vel/rl/{models => model}/backbone/mlp.py | 0 vel/rl/{models => model}/backbone/nature_cnn.py | 0 vel/rl/{models => model}/backbone/nature_cnn_rnn.py | 0 vel/rl/{models => model}/backbone/nature_cnn_small.py | 0 vel/rl/{models => model}/backbone/noisy_nature_cnn.py | 0 vel/rl/{models => model}/deterministic_policy_model.py | 0 vel/rl/{models => model}/q_distributional_model.py | 0 vel/rl/{models => model}/q_dueling_model.py | 0 vel/rl/{models => model}/q_model.py | 0 vel/rl/{models => model}/q_noisy_model.py | 0 vel/rl/{models => model}/q_rainbow_model.py | 0 vel/rl/{models => model}/q_stochastic_policy_model.py | 0 vel/rl/{models => model}/stochastic_policy_model.py | 0 vel/rl/{models => model}/stochastic_policy_model_separate.py | 0 vel/rl/{models => model}/stochastic_policy_rnn_model.py | 0 vel/rl/{modules => module}/__init__.py | 0 vel/rl/{modules => module}/action_head.py | 0 vel/rl/{modules => module}/deterministic_action_head.py | 0 vel/rl/{modules => module}/deterministic_critic_head.py | 0 vel/rl/{modules => module}/noise/__init__.py | 0 vel/rl/{modules => module}/noise/eps_greedy.py | 0 vel/rl/{modules => module}/noise/ou_noise.py | 0 vel/rl/{modules => module}/noisy_linear.py | 0 vel/rl/{modules => module}/q_distributional_head.py | 0 vel/rl/{modules => module}/q_distributional_noisy_dueling_head.py | 0 vel/rl/{modules => module}/q_dueling_head.py | 0 vel/rl/{modules => module}/q_head.py | 0 vel/rl/{modules => module}/q_noisy_head.py | 0 vel/rl/{modules => module}/test/__init__.py | 0 vel/rl/{modules => module}/test/test_action_head.py | 0 vel/rl/{modules => module}/value_head.py | 0 vel/rl/{reinforcers => reinforcer}/__init__.py | 0 .../buffered_mixed_policy_iteration_reinforcer.py | 0 .../buffered_off_policy_iteration_reinforcer.py | 0 .../{reinforcers => reinforcer}/on_policy_iteration_reinforcer.py | 0 64 files changed, 0 insertions(+), 0 deletions(-) rename examples-configs/{autoencoders => autoencoder}/mnist/mnist_cnn_autoencoder.yaml (100%) rename examples-configs/{autoencoders => autoencoder}/mnist/mnist_cnn_vae.yaml (100%) rename vel/data/{image_ops.py => image_op.py} (100%) rename vel/math/{functions.py => function.py} (100%) rename vel/math/{processes.py => process.py} (100%) rename vel/rl/{buffers => buffer}/__init__.py (100%) rename vel/rl/{buffers => buffer}/backend/__init__.py (100%) rename vel/rl/{buffers => buffer}/backend/circular_buffer_backend.py (100%) rename vel/rl/{buffers => buffer}/backend/circular_vec_buffer_backend.py (100%) rename vel/rl/{buffers => buffer}/backend/prioritized_buffer_backend.py (100%) rename vel/rl/{buffers => buffer}/backend/prioritized_vec_buffer_backend.py (100%) rename vel/rl/{buffers => buffer}/backend/segment_tree.py (100%) rename vel/rl/{buffers => buffer}/circular_replay_buffer.py (100%) rename vel/rl/{buffers => buffer}/prioritized_circular_replay_buffer.py (100%) rename vel/rl/{buffers => buffer}/tests/__init__.py (100%) rename vel/rl/{buffers => buffer}/tests/test_circular_buffer_backend.py (100%) rename vel/rl/{buffers => buffer}/tests/test_circular_vec_env_buffer_backend.py (100%) rename vel/rl/{buffers => buffer}/tests/test_prioritized_circular_buffer_backend.py (100%) rename vel/rl/{buffers => buffer}/tests/test_prioritized_vec_buffer_backend.py (100%) rename vel/rl/{commands => command}/__init__.py (100%) rename vel/rl/{commands => command}/enjoy.py (100%) rename vel/rl/{commands => command}/evaluate_env_command.py (100%) rename vel/rl/{commands => command}/record_movie_command.py (100%) rename vel/rl/{commands => command}/rl_train_command.py (100%) rename vel/rl/{models => model}/__init__.py (100%) rename vel/rl/{models => model}/backbone/__init__.py (100%) rename vel/rl/{models => model}/backbone/double_nature_cnn.py (100%) rename vel/rl/{models => model}/backbone/double_noisy_nature_cnn.py (100%) rename vel/rl/{models => model}/backbone/lstm.py (100%) rename vel/rl/{models => model}/backbone/mlp.py (100%) rename vel/rl/{models => model}/backbone/nature_cnn.py (100%) rename vel/rl/{models => model}/backbone/nature_cnn_rnn.py (100%) rename vel/rl/{models => model}/backbone/nature_cnn_small.py (100%) rename vel/rl/{models => model}/backbone/noisy_nature_cnn.py (100%) rename vel/rl/{models => model}/deterministic_policy_model.py (100%) rename vel/rl/{models => model}/q_distributional_model.py (100%) rename vel/rl/{models => model}/q_dueling_model.py (100%) rename vel/rl/{models => model}/q_model.py (100%) rename vel/rl/{models => model}/q_noisy_model.py (100%) rename vel/rl/{models => model}/q_rainbow_model.py (100%) rename vel/rl/{models => model}/q_stochastic_policy_model.py (100%) rename vel/rl/{models => model}/stochastic_policy_model.py (100%) rename vel/rl/{models => model}/stochastic_policy_model_separate.py (100%) rename vel/rl/{models => model}/stochastic_policy_rnn_model.py (100%) rename vel/rl/{modules => module}/__init__.py (100%) rename vel/rl/{modules => module}/action_head.py (100%) rename vel/rl/{modules => module}/deterministic_action_head.py (100%) rename vel/rl/{modules => module}/deterministic_critic_head.py (100%) rename vel/rl/{modules => module}/noise/__init__.py (100%) rename vel/rl/{modules => module}/noise/eps_greedy.py (100%) rename vel/rl/{modules => module}/noise/ou_noise.py (100%) rename vel/rl/{modules => module}/noisy_linear.py (100%) rename vel/rl/{modules => module}/q_distributional_head.py (100%) rename vel/rl/{modules => module}/q_distributional_noisy_dueling_head.py (100%) rename vel/rl/{modules => module}/q_dueling_head.py (100%) rename vel/rl/{modules => module}/q_head.py (100%) rename vel/rl/{modules => module}/q_noisy_head.py (100%) rename vel/rl/{modules => module}/test/__init__.py (100%) rename vel/rl/{modules => module}/test/test_action_head.py (100%) rename vel/rl/{modules => module}/value_head.py (100%) rename vel/rl/{reinforcers => reinforcer}/__init__.py (100%) rename vel/rl/{reinforcers => reinforcer}/buffered_mixed_policy_iteration_reinforcer.py (100%) rename vel/rl/{reinforcers => reinforcer}/buffered_off_policy_iteration_reinforcer.py (100%) rename vel/rl/{reinforcers => reinforcer}/on_policy_iteration_reinforcer.py (100%) diff --git a/examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_autoencoder.yaml similarity index 100% rename from examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml rename to examples-configs/autoencoder/mnist/mnist_cnn_autoencoder.yaml diff --git a/examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml similarity index 100% rename from examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml rename to examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml diff --git a/vel/data/image_ops.py b/vel/data/image_op.py similarity index 100% rename from vel/data/image_ops.py rename to vel/data/image_op.py diff --git a/vel/math/functions.py b/vel/math/function.py similarity index 100% rename from vel/math/functions.py rename to vel/math/function.py diff --git a/vel/math/processes.py b/vel/math/process.py similarity index 100% rename from vel/math/processes.py rename to vel/math/process.py diff --git a/vel/rl/buffers/__init__.py b/vel/rl/buffer/__init__.py similarity index 100% rename from vel/rl/buffers/__init__.py rename to vel/rl/buffer/__init__.py diff --git a/vel/rl/buffers/backend/__init__.py b/vel/rl/buffer/backend/__init__.py similarity index 100% rename from vel/rl/buffers/backend/__init__.py rename to vel/rl/buffer/backend/__init__.py diff --git a/vel/rl/buffers/backend/circular_buffer_backend.py b/vel/rl/buffer/backend/circular_buffer_backend.py similarity index 100% rename from vel/rl/buffers/backend/circular_buffer_backend.py rename to vel/rl/buffer/backend/circular_buffer_backend.py diff --git a/vel/rl/buffers/backend/circular_vec_buffer_backend.py b/vel/rl/buffer/backend/circular_vec_buffer_backend.py similarity index 100% rename from vel/rl/buffers/backend/circular_vec_buffer_backend.py rename to vel/rl/buffer/backend/circular_vec_buffer_backend.py diff --git a/vel/rl/buffers/backend/prioritized_buffer_backend.py b/vel/rl/buffer/backend/prioritized_buffer_backend.py similarity index 100% rename from vel/rl/buffers/backend/prioritized_buffer_backend.py rename to vel/rl/buffer/backend/prioritized_buffer_backend.py diff --git a/vel/rl/buffers/backend/prioritized_vec_buffer_backend.py b/vel/rl/buffer/backend/prioritized_vec_buffer_backend.py similarity index 100% rename from vel/rl/buffers/backend/prioritized_vec_buffer_backend.py rename to vel/rl/buffer/backend/prioritized_vec_buffer_backend.py diff --git a/vel/rl/buffers/backend/segment_tree.py b/vel/rl/buffer/backend/segment_tree.py similarity index 100% rename from vel/rl/buffers/backend/segment_tree.py rename to vel/rl/buffer/backend/segment_tree.py diff --git a/vel/rl/buffers/circular_replay_buffer.py b/vel/rl/buffer/circular_replay_buffer.py similarity index 100% rename from vel/rl/buffers/circular_replay_buffer.py rename to vel/rl/buffer/circular_replay_buffer.py diff --git a/vel/rl/buffers/prioritized_circular_replay_buffer.py b/vel/rl/buffer/prioritized_circular_replay_buffer.py similarity index 100% rename from vel/rl/buffers/prioritized_circular_replay_buffer.py rename to vel/rl/buffer/prioritized_circular_replay_buffer.py diff --git a/vel/rl/buffers/tests/__init__.py b/vel/rl/buffer/tests/__init__.py similarity index 100% rename from vel/rl/buffers/tests/__init__.py rename to vel/rl/buffer/tests/__init__.py diff --git a/vel/rl/buffers/tests/test_circular_buffer_backend.py b/vel/rl/buffer/tests/test_circular_buffer_backend.py similarity index 100% rename from vel/rl/buffers/tests/test_circular_buffer_backend.py rename to vel/rl/buffer/tests/test_circular_buffer_backend.py diff --git a/vel/rl/buffers/tests/test_circular_vec_env_buffer_backend.py b/vel/rl/buffer/tests/test_circular_vec_env_buffer_backend.py similarity index 100% rename from vel/rl/buffers/tests/test_circular_vec_env_buffer_backend.py rename to vel/rl/buffer/tests/test_circular_vec_env_buffer_backend.py diff --git a/vel/rl/buffers/tests/test_prioritized_circular_buffer_backend.py b/vel/rl/buffer/tests/test_prioritized_circular_buffer_backend.py similarity index 100% rename from vel/rl/buffers/tests/test_prioritized_circular_buffer_backend.py rename to vel/rl/buffer/tests/test_prioritized_circular_buffer_backend.py diff --git a/vel/rl/buffers/tests/test_prioritized_vec_buffer_backend.py b/vel/rl/buffer/tests/test_prioritized_vec_buffer_backend.py similarity index 100% rename from vel/rl/buffers/tests/test_prioritized_vec_buffer_backend.py rename to vel/rl/buffer/tests/test_prioritized_vec_buffer_backend.py diff --git a/vel/rl/commands/__init__.py b/vel/rl/command/__init__.py similarity index 100% rename from vel/rl/commands/__init__.py rename to vel/rl/command/__init__.py diff --git a/vel/rl/commands/enjoy.py b/vel/rl/command/enjoy.py similarity index 100% rename from vel/rl/commands/enjoy.py rename to vel/rl/command/enjoy.py diff --git a/vel/rl/commands/evaluate_env_command.py b/vel/rl/command/evaluate_env_command.py similarity index 100% rename from vel/rl/commands/evaluate_env_command.py rename to vel/rl/command/evaluate_env_command.py diff --git a/vel/rl/commands/record_movie_command.py b/vel/rl/command/record_movie_command.py similarity index 100% rename from vel/rl/commands/record_movie_command.py rename to vel/rl/command/record_movie_command.py diff --git a/vel/rl/commands/rl_train_command.py b/vel/rl/command/rl_train_command.py similarity index 100% rename from vel/rl/commands/rl_train_command.py rename to vel/rl/command/rl_train_command.py diff --git a/vel/rl/models/__init__.py b/vel/rl/model/__init__.py similarity index 100% rename from vel/rl/models/__init__.py rename to vel/rl/model/__init__.py diff --git a/vel/rl/models/backbone/__init__.py b/vel/rl/model/backbone/__init__.py similarity index 100% rename from vel/rl/models/backbone/__init__.py rename to vel/rl/model/backbone/__init__.py diff --git a/vel/rl/models/backbone/double_nature_cnn.py b/vel/rl/model/backbone/double_nature_cnn.py similarity index 100% rename from vel/rl/models/backbone/double_nature_cnn.py rename to vel/rl/model/backbone/double_nature_cnn.py diff --git a/vel/rl/models/backbone/double_noisy_nature_cnn.py b/vel/rl/model/backbone/double_noisy_nature_cnn.py similarity index 100% rename from vel/rl/models/backbone/double_noisy_nature_cnn.py rename to vel/rl/model/backbone/double_noisy_nature_cnn.py diff --git a/vel/rl/models/backbone/lstm.py b/vel/rl/model/backbone/lstm.py similarity index 100% rename from vel/rl/models/backbone/lstm.py rename to vel/rl/model/backbone/lstm.py diff --git a/vel/rl/models/backbone/mlp.py b/vel/rl/model/backbone/mlp.py similarity index 100% rename from vel/rl/models/backbone/mlp.py rename to vel/rl/model/backbone/mlp.py diff --git a/vel/rl/models/backbone/nature_cnn.py b/vel/rl/model/backbone/nature_cnn.py similarity index 100% rename from vel/rl/models/backbone/nature_cnn.py rename to vel/rl/model/backbone/nature_cnn.py diff --git a/vel/rl/models/backbone/nature_cnn_rnn.py b/vel/rl/model/backbone/nature_cnn_rnn.py similarity index 100% rename from vel/rl/models/backbone/nature_cnn_rnn.py rename to vel/rl/model/backbone/nature_cnn_rnn.py diff --git a/vel/rl/models/backbone/nature_cnn_small.py b/vel/rl/model/backbone/nature_cnn_small.py similarity index 100% rename from vel/rl/models/backbone/nature_cnn_small.py rename to vel/rl/model/backbone/nature_cnn_small.py diff --git a/vel/rl/models/backbone/noisy_nature_cnn.py b/vel/rl/model/backbone/noisy_nature_cnn.py similarity index 100% rename from vel/rl/models/backbone/noisy_nature_cnn.py rename to vel/rl/model/backbone/noisy_nature_cnn.py diff --git a/vel/rl/models/deterministic_policy_model.py b/vel/rl/model/deterministic_policy_model.py similarity index 100% rename from vel/rl/models/deterministic_policy_model.py rename to vel/rl/model/deterministic_policy_model.py diff --git a/vel/rl/models/q_distributional_model.py b/vel/rl/model/q_distributional_model.py similarity index 100% rename from vel/rl/models/q_distributional_model.py rename to vel/rl/model/q_distributional_model.py diff --git a/vel/rl/models/q_dueling_model.py b/vel/rl/model/q_dueling_model.py similarity index 100% rename from vel/rl/models/q_dueling_model.py rename to vel/rl/model/q_dueling_model.py diff --git a/vel/rl/models/q_model.py b/vel/rl/model/q_model.py similarity index 100% rename from vel/rl/models/q_model.py rename to vel/rl/model/q_model.py diff --git a/vel/rl/models/q_noisy_model.py b/vel/rl/model/q_noisy_model.py similarity index 100% rename from vel/rl/models/q_noisy_model.py rename to vel/rl/model/q_noisy_model.py diff --git a/vel/rl/models/q_rainbow_model.py b/vel/rl/model/q_rainbow_model.py similarity index 100% rename from vel/rl/models/q_rainbow_model.py rename to vel/rl/model/q_rainbow_model.py diff --git a/vel/rl/models/q_stochastic_policy_model.py b/vel/rl/model/q_stochastic_policy_model.py similarity index 100% rename from vel/rl/models/q_stochastic_policy_model.py rename to vel/rl/model/q_stochastic_policy_model.py diff --git a/vel/rl/models/stochastic_policy_model.py b/vel/rl/model/stochastic_policy_model.py similarity index 100% rename from vel/rl/models/stochastic_policy_model.py rename to vel/rl/model/stochastic_policy_model.py diff --git a/vel/rl/models/stochastic_policy_model_separate.py b/vel/rl/model/stochastic_policy_model_separate.py similarity index 100% rename from vel/rl/models/stochastic_policy_model_separate.py rename to vel/rl/model/stochastic_policy_model_separate.py diff --git a/vel/rl/models/stochastic_policy_rnn_model.py b/vel/rl/model/stochastic_policy_rnn_model.py similarity index 100% rename from vel/rl/models/stochastic_policy_rnn_model.py rename to vel/rl/model/stochastic_policy_rnn_model.py diff --git a/vel/rl/modules/__init__.py b/vel/rl/module/__init__.py similarity index 100% rename from vel/rl/modules/__init__.py rename to vel/rl/module/__init__.py diff --git a/vel/rl/modules/action_head.py b/vel/rl/module/action_head.py similarity index 100% rename from vel/rl/modules/action_head.py rename to vel/rl/module/action_head.py diff --git a/vel/rl/modules/deterministic_action_head.py b/vel/rl/module/deterministic_action_head.py similarity index 100% rename from vel/rl/modules/deterministic_action_head.py rename to vel/rl/module/deterministic_action_head.py diff --git a/vel/rl/modules/deterministic_critic_head.py b/vel/rl/module/deterministic_critic_head.py similarity index 100% rename from vel/rl/modules/deterministic_critic_head.py rename to vel/rl/module/deterministic_critic_head.py diff --git a/vel/rl/modules/noise/__init__.py b/vel/rl/module/noise/__init__.py similarity index 100% rename from vel/rl/modules/noise/__init__.py rename to vel/rl/module/noise/__init__.py diff --git a/vel/rl/modules/noise/eps_greedy.py b/vel/rl/module/noise/eps_greedy.py similarity index 100% rename from vel/rl/modules/noise/eps_greedy.py rename to vel/rl/module/noise/eps_greedy.py diff --git a/vel/rl/modules/noise/ou_noise.py b/vel/rl/module/noise/ou_noise.py similarity index 100% rename from vel/rl/modules/noise/ou_noise.py rename to vel/rl/module/noise/ou_noise.py diff --git a/vel/rl/modules/noisy_linear.py b/vel/rl/module/noisy_linear.py similarity index 100% rename from vel/rl/modules/noisy_linear.py rename to vel/rl/module/noisy_linear.py diff --git a/vel/rl/modules/q_distributional_head.py b/vel/rl/module/q_distributional_head.py similarity index 100% rename from vel/rl/modules/q_distributional_head.py rename to vel/rl/module/q_distributional_head.py diff --git a/vel/rl/modules/q_distributional_noisy_dueling_head.py b/vel/rl/module/q_distributional_noisy_dueling_head.py similarity index 100% rename from vel/rl/modules/q_distributional_noisy_dueling_head.py rename to vel/rl/module/q_distributional_noisy_dueling_head.py diff --git a/vel/rl/modules/q_dueling_head.py b/vel/rl/module/q_dueling_head.py similarity index 100% rename from vel/rl/modules/q_dueling_head.py rename to vel/rl/module/q_dueling_head.py diff --git a/vel/rl/modules/q_head.py b/vel/rl/module/q_head.py similarity index 100% rename from vel/rl/modules/q_head.py rename to vel/rl/module/q_head.py diff --git a/vel/rl/modules/q_noisy_head.py b/vel/rl/module/q_noisy_head.py similarity index 100% rename from vel/rl/modules/q_noisy_head.py rename to vel/rl/module/q_noisy_head.py diff --git a/vel/rl/modules/test/__init__.py b/vel/rl/module/test/__init__.py similarity index 100% rename from vel/rl/modules/test/__init__.py rename to vel/rl/module/test/__init__.py diff --git a/vel/rl/modules/test/test_action_head.py b/vel/rl/module/test/test_action_head.py similarity index 100% rename from vel/rl/modules/test/test_action_head.py rename to vel/rl/module/test/test_action_head.py diff --git a/vel/rl/modules/value_head.py b/vel/rl/module/value_head.py similarity index 100% rename from vel/rl/modules/value_head.py rename to vel/rl/module/value_head.py diff --git a/vel/rl/reinforcers/__init__.py b/vel/rl/reinforcer/__init__.py similarity index 100% rename from vel/rl/reinforcers/__init__.py rename to vel/rl/reinforcer/__init__.py diff --git a/vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py similarity index 100% rename from vel/rl/reinforcers/buffered_mixed_policy_iteration_reinforcer.py rename to vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py diff --git a/vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py similarity index 100% rename from vel/rl/reinforcers/buffered_off_policy_iteration_reinforcer.py rename to vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py diff --git a/vel/rl/reinforcers/on_policy_iteration_reinforcer.py b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py similarity index 100% rename from vel/rl/reinforcers/on_policy_iteration_reinforcer.py rename to vel/rl/reinforcer/on_policy_iteration_reinforcer.py From 3dcd0f23c6527c67180c6e04d4c9809ca8594f05 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 13 Jun 2019 12:22:18 -0700 Subject: [PATCH 041/162] Fixed linter issues. --- .flake8 | 3 +++ Makefile | 7 +++++-- setup.py | 2 +- vel/api/__init__.py | 2 ++ vel/api/info.py | 9 +++++---- vel/api/learner.py | 10 ++++++---- vel/api/model_config.py | 10 +++++----- vel/api/source.py | 2 +- vel/augmentation/normalize.py | 5 ++--- vel/augmentation/random_horizontal_flip.py | 6 +++--- vel/augmentation/random_lighting.py | 7 ++++--- vel/augmentation/tta/train_tta.py | 3 ++- vel/command/phase_train_command.py | 1 - vel/data/__init__.py | 2 +- vel/data/image_op.py | 6 ++++-- vel/math/process.py | 5 ++++- vel/metric/value_metric.py | 1 - vel/model/imagenet/resnet34.py | 10 +++++----- .../multilayer_rnn_sequence_classification.py | 14 +++++++------- vel/model/rnn/multilayer_rnn_sequence_model.py | 11 ++++++----- vel/model/vision/cifar_resnext.py | 14 ++++++++------ vel/module/input/embedding.py | 8 ++++---- vel/module/layers.py | 1 - vel/module/resnet_v2.py | 4 +++- vel/module/resnext.py | 1 - vel/module/rnn_cell.py | 3 --- vel/module/rnn_layer.py | 3 --- vel/notebook/__init__.py | 4 ++-- vel/optimizer/sgd.py | 2 +- vel/phase/cycle.py | 9 +++++++-- vel/rl/algo/dqn.py | 8 ++++---- vel/rl/algo/policy_gradient/ddpg.py | 6 +++--- vel/rl/algo/policy_gradient/ppo.py | 8 ++++---- vel/rl/algo/policy_gradient/trpo.py | 16 +++++++++------- vel/rl/api/env_base.py | 1 - vel/rl/api/evaluator.py | 1 + vel/rl/api/rollout.py | 13 +++++++++---- .../backend/circular_vec_buffer_backend.py | 4 ++-- vel/rl/buffer/{tests => test}/__init__.py | 0 .../test_circular_buffer_backend.py | 0 .../test_circular_vec_env_buffer_backend.py | 0 .../test_prioritized_circular_buffer_backend.py | 0 .../test_prioritized_vec_buffer_backend.py | 4 ++-- vel/rl/command/evaluate_env_command.py | 7 +++++-- vel/rl/command/record_movie_command.py | 4 +++- .../env_roller/transition_replay_env_roller.py | 14 ++++++++------ vel/rl/model/backbone/lstm.py | 2 +- vel/rl/model/backbone/mlp.py | 4 ++-- vel/rl/model/backbone/nature_cnn_rnn.py | 8 ++++---- vel/rl/model/deterministic_policy_model.py | 8 ++++---- vel/rl/model/q_distributional_model.py | 8 ++++---- vel/rl/model/q_dueling_model.py | 8 ++++---- vel/rl/model/q_model.py | 6 +++--- vel/rl/model/q_noisy_model.py | 4 ++-- vel/rl/model/q_stochastic_policy_model.py | 8 ++++---- vel/rl/model/stochastic_policy_model.py | 8 ++++---- vel/rl/model/stochastic_policy_rnn_model.py | 8 ++++---- vel/rl/module/q_head.py | 1 - .../buffered_off_policy_iteration_reinforcer.py | 2 +- vel/schedule/linear.py | 1 - vel/scheduler/linear_batch_scaler.py | 2 -- vel/scheduler/multi_step.py | 3 +-- vel/scheduler/reduce_lr_on_plateau.py | 9 +++++---- vel/source/nlp/text_url.py | 4 +++- vel/source/vision/cifar10.py | 8 ++++---- vel/storage/backend/mongodb.py | 1 - vel/storage/strategy/checkpoint_strategy.py | 2 -- vel/storage/streaming/visdom.py | 2 +- vel/util/{intepolate.py => interpolate.py} | 1 - vel/util/module_util.py | 3 ++- vel/util/situational.py | 5 ++--- vel/util/summary.py | 4 ++-- vel/util/visdom.py | 1 - 73 files changed, 198 insertions(+), 174 deletions(-) create mode 100644 .flake8 rename vel/rl/buffer/{tests => test}/__init__.py (100%) rename vel/rl/buffer/{tests => test}/test_circular_buffer_backend.py (100%) rename vel/rl/buffer/{tests => test}/test_circular_vec_env_buffer_backend.py (100%) rename vel/rl/buffer/{tests => test}/test_prioritized_circular_buffer_backend.py (100%) rename vel/rl/buffer/{tests => test}/test_prioritized_vec_buffer_backend.py (98%) rename vel/util/{intepolate.py => interpolate.py} (99%) diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..25d4293b --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 120 +exclude = vel/openai, test, vel/api/__init__.py, vel/rl/api/__init__.py diff --git a/Makefile b/Makefile index fd79b924..23022e26 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: default test requpgrade +.PHONY: default test requpgrade lint default: test; @@ -40,4 +40,7 @@ requirements.txt: pip-compile requirements.in requpgrade: - pip-compile --upgrade + pip-compile --upgrade + +lint: + flake8 vel diff --git a/setup.py b/setup.py index f134a862..e6290b56 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ 'mongo': ['pymongo', 'dnspython'], 'gym': ['gym[atari,box2d,classic_control]'], 'mujoco': ['gym[mujoco,robotics]'], - 'dev': ['pytest', 'ipython', 'jupyter', 'pip-tools'], + 'dev': ['pytest', 'ipython', 'jupyter', 'pip-tools', 'flake8'], 'text': ['spacy'], 'all': ['visdom', 'pymongo', 'dnspython', 'gym[all]', 'pytest', 'spacy', 'ipython', 'jupyter'] }, diff --git a/vel/api/__init__.py b/vel/api/__init__.py index 47c4284a..06b8490c 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -1,3 +1,5 @@ +from .augmentation import Augmentation +from .dataflow import DataFlow from .callback import Callback from .info import BatchInfo, EpochInfo, TrainingInfo from .learner import Learner diff --git a/vel/api/info.py b/vel/api/info.py index e4372b4e..b5f978c3 100644 --- a/vel/api/info.py +++ b/vel/api/info.py @@ -5,7 +5,7 @@ import torch -from vel.exceptions import VelException +from vel.exception import VelException class TrainingHistory: @@ -33,7 +33,7 @@ class TrainingInfo(abc.MutableMapping): Data dict is any extra information processes may want to store """ - def __init__(self, start_epoch_idx=0, run_name: typing.Optional[str]=None, metrics=None, callbacks=None): + def __init__(self, start_epoch_idx=0, run_name: typing.Optional[str] = None, metrics=None, callbacks=None): self.data_dict = {} self.start_epoch_idx = start_epoch_idx @@ -162,7 +162,8 @@ class EpochInfo(abc.MutableMapping): """ def __init__(self, training_info: TrainingInfo, global_epoch_idx: int, batches_per_epoch: int, - optimizer: torch.optim.Optimizer=None, local_epoch_idx: int = None, callbacks: list=None): + optimizer: typing.Optional[torch.optim.Optimizer] = None, local_epoch_idx: int = None, + callbacks: typing.Optional[list] = None): self.training_info = training_info self.optimizer = optimizer self.batches_per_epoch = batches_per_epoch @@ -348,4 +349,4 @@ def __contains__(self, item): return item in self.data_dict def __repr__(self): - return f"[BatchInfo epoch:{self.epoch_info.global_epoch_idx} batch:{self.batch_number}/{self.batches_per_epoch}]" + return f"[BatchInfo epoch:{self.epoch_info.global_epoch_idx} batch:{self.batch_number}/{self.batches_per_epoch}]" # noqa diff --git a/vel/api/learner.py b/vel/api/learner.py index 6ebb1949..5dfd4393 100644 --- a/vel/api/learner.py +++ b/vel/api/learner.py @@ -1,15 +1,17 @@ import sys import torch +import torch.nn import tqdm import typing from .model import SupervisedModel from .info import BatchInfo, EpochInfo, TrainingInfo +from .source import Source class Learner: """ Manages training process of a single model """ - def __init__(self, device: torch.device, model: SupervisedModel, max_grad_norm: typing.Optional[float]=None): + def __init__(self, device: torch.device, model: SupervisedModel, max_grad_norm: typing.Optional[float] = None): self.device = device self.model = model.to(device) self.max_grad_norm = max_grad_norm @@ -41,7 +43,7 @@ def initialize_training(self, training_info: TrainingInfo, model_state=None, hid else: self.model.load_state_dict(model_state) - def run_epoch(self, epoch_info: EpochInfo, source: 'vel.api.Source'): + def run_epoch(self, epoch_info: EpochInfo, source: 'Source'): """ Run full epoch of learning """ epoch_info.on_epoch_begin() @@ -56,7 +58,7 @@ def run_epoch(self, epoch_info: EpochInfo, source: 'vel.api.Source'): epoch_info.on_epoch_end() - def train_epoch(self, epoch_info, source: 'vel.api.Source', interactive=True): + def train_epoch(self, epoch_info, source: 'Source', interactive=True): """ Run a single training epoch """ self.train() @@ -74,7 +76,7 @@ def train_epoch(self, epoch_info, source: 'vel.api.Source', interactive=True): iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss')) - def validation_epoch(self, epoch_info, source: 'vel.api.Source'): + def validation_epoch(self, epoch_info, source: 'Source'): """ Run a single evaluation epoch """ self.eval() diff --git a/vel/api/model_config.py b/vel/api/model_config.py index e2f98cd2..23488916 100644 --- a/vel/api/model_config.py +++ b/vel/api/model_config.py @@ -2,9 +2,9 @@ import os.path import typing -from vel.exceptions import VelInitializationException -from vel.internals.parser import Parser -from vel.internals.provider import Provider +from vel.exception import VelInitializationException +from vel.internal.parser import Parser +from vel.internal.provider import Provider from .info import TrainingInfo @@ -196,8 +196,8 @@ def banner(self, command_name) -> None: device = self.torch_device() print("=" * 80) - print(f"Pytorch version: {torch.__version__} cuda version {torch.version.cuda} cudnn version {torch.backends.cudnn.version()}") - print("Running model {}, run {} -- command {} -- device {}".format(self._model_name, self.run_number, command_name, self.device)) + print(f"Pytorch version: {torch.__version__} cuda version {torch.version.cuda} cudnn version {torch.backends.cudnn.version()}") # noqa + print("Running model {}, run {} -- command {} -- device {}".format(self._model_name, self.run_number, command_name, self.device)) # noqa if device.type == 'cuda': device_idx = 0 if device.index is None else device.index print(f"CUDA Device name {torch.cuda.get_device_name(device_idx)}") diff --git a/vel/api/source.py b/vel/api/source.py index 9396b487..be1d864e 100644 --- a/vel/api/source.py +++ b/vel/api/source.py @@ -1,6 +1,6 @@ import torch.utils.data as data -from vel.data import DataFlow +from .dataflow import DataFlow class Source: diff --git a/vel/augmentation/normalize.py b/vel/augmentation/normalize.py index 36ca29fd..b0b787a6 100644 --- a/vel/augmentation/normalize.py +++ b/vel/augmentation/normalize.py @@ -1,9 +1,9 @@ import numpy as np -import vel.data as data +import vel.api as api -class Normalize(data.Augmentation): +class Normalize(api.Augmentation): """ Normalize input mean and standard deviation """ def __init__(self, mean, std, mode='x', tags=None): @@ -22,4 +22,3 @@ def denormalize(self, x_data): def create(mean, std, mode='x', tags=None): """ Vel factory function """ return Normalize(mean=mean, std=std, mode=mode, tags=tags) - diff --git a/vel/augmentation/random_horizontal_flip.py b/vel/augmentation/random_horizontal_flip.py index 2d8bdafd..e4d6c142 100644 --- a/vel/augmentation/random_horizontal_flip.py +++ b/vel/augmentation/random_horizontal_flip.py @@ -1,10 +1,10 @@ import random import numpy as np -import vel.data as data +import vel.api as api -class RandomHorizontalFlip(data.Augmentation): +class RandomHorizontalFlip(api.Augmentation): """ Apply a horizontal flip randomly to input images """ def __init__(self, p=0.5, mode='x', tags=None): @@ -28,4 +28,4 @@ def __repr__(self): def create(p=0.5): - return RandomHorizontalFlip(p) \ No newline at end of file + return RandomHorizontalFlip(p) diff --git a/vel/augmentation/random_lighting.py b/vel/augmentation/random_lighting.py index 82fc9cb1..d85c450c 100644 --- a/vel/augmentation/random_lighting.py +++ b/vel/augmentation/random_lighting.py @@ -1,9 +1,10 @@ import random +import vel.api as api import vel.data as data -class RandomLighting(data.Augmentation): +class RandomLighting(api.Augmentation): """ Apply a horizontal flip randomly to input images """ def __init__(self, b, c, mode='x', tags=None): @@ -14,11 +15,11 @@ def __call__(self, img): """ Adjust lighting """ rand_b = random.uniform(-self.b, self.b) rand_c = random.uniform(-self.c, self.c) - rand_c = -1/(rand_c-1) if rand_c<0 else rand_c+1 + rand_c = -1/(rand_c-1) if rand_c < 0 else rand_c+1 return data.lighting(img, rand_b, rand_c) def __repr__(self): - return self.__class__.__name__ + '(p={})'.format(self.p) + return self.__class__.__name__ + '(b={}, c={})'.format(self.b, self.c) def create(b, c, mode='x', tags=None): diff --git a/vel/augmentation/tta/train_tta.py b/vel/augmentation/tta/train_tta.py index 212937da..78621428 100644 --- a/vel/augmentation/tta/train_tta.py +++ b/vel/augmentation/tta/train_tta.py @@ -70,7 +70,8 @@ # if self.index == (1 + self.n_augmentations): # new_output = torch.mean(torch.stack(self.accumulated_output, dim=-1), dim=-1) # new_context = { -# k: torch.mean(torch.stack([c[k] for c in self.accumulated_context], dim=-1), dim=-1) for k in context.keys() +# k: torch.mean(torch.stack([c[k] for c in self.accumulated_context], dim=-1), dim=-1) +# for k in context.keys() # } # # self.metric_accumulator.calculate(self.data, self.target, new_output, new_context) diff --git a/vel/command/phase_train_command.py b/vel/command/phase_train_command.py index fc541286..e80768b2 100644 --- a/vel/command/phase_train_command.py +++ b/vel/command/phase_train_command.py @@ -1,4 +1,3 @@ -import torch import numpy as np import bisect import typing diff --git a/vel/data/__init__.py b/vel/data/__init__.py index 806cbe66..dd02c4a5 100644 --- a/vel/data/__init__.py +++ b/vel/data/__init__.py @@ -1 +1 @@ -from .image_ops import * \ No newline at end of file +from .image_op import * # noqa diff --git a/vel/data/image_op.py b/vel/data/image_op.py index 6bf45d78..9b6833f2 100644 --- a/vel/data/image_op.py +++ b/vel/data/image_op.py @@ -17,7 +17,8 @@ def center_crop(im, min_sz=None): """ Returns a center crop of an image""" # return F.center_crop(im, min_sz) r, c, *_ = im.shape - if min_sz is None: min_sz = min(r, c) + if min_sz is None: + min_sz = min(r, c) start_r = math.ceil((r - min_sz) / 2) start_c = math.ceil((c - min_sz) / 2) return crop_square(im, start_r, start_c, min_sz) @@ -78,6 +79,7 @@ def mode_to_cv2(mode='constant'): def lighting(im, b, c): """ Adjusts image's balance and contrast. """ - if b == 0 and c == 1: return im + if b == 0 and c == 1: + return im mu = np.average(im) return np.clip((im - mu) * c + mu + b, 0., 1.).astype(np.float32) diff --git a/vel/math/process.py b/vel/math/process.py index cd176b83..923cad44 100644 --- a/vel/math/process.py +++ b/vel/math/process.py @@ -15,7 +15,10 @@ def __init__(self, mu, sigma, theta=.15, dt=1e-2, x0=None): self.reset() def __call__(self): - x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape) + x = ( + self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + + self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape) + ) self.x_prev = x return x diff --git a/vel/metric/value_metric.py b/vel/metric/value_metric.py index ac17adbf..309e9e57 100644 --- a/vel/metric/value_metric.py +++ b/vel/metric/value_metric.py @@ -23,4 +23,3 @@ def value(self): def _value_function(self, batch_info): raise NotImplementedError - diff --git a/vel/model/imagenet/resnet34.py b/vel/model/imagenet/resnet34.py index fc819a2a..a4a78f86 100644 --- a/vel/model/imagenet/resnet34.py +++ b/vel/model/imagenet/resnet34.py @@ -2,7 +2,7 @@ import torch.nn as nn import torch.nn.functional as F -import vel.modules.layers as l +import vel.module.layers as layers import vel.util.module_util as mu from vel.api import LossFunctionModel, ModelFactory @@ -35,8 +35,8 @@ def __init__(self, fc_layers=None, dropout=None, pretrained=True): valid_children = list(backbone.children())[:-2] valid_children.extend([ - l.AdaptiveConcatPool2d(), - l.Flatten() + layers.AdaptiveConcatPool2d(), + layers.Flatten() ]) layer_inputs = [NET_OUTPUT] + fc_layers[:-1] @@ -94,8 +94,8 @@ def loss_value(self, x_data, y_true, y_pred): def metrics(self): """ Set of metrics for this model """ - from vel.metrics.loss_metric import Loss - from vel.metrics.accuracy import Accuracy + from vel.metric.loss_metric import Loss + from vel.metric.accuracy import Accuracy return [Loss(), Accuracy()] diff --git a/vel/model/rnn/multilayer_rnn_sequence_classification.py b/vel/model/rnn/multilayer_rnn_sequence_classification.py index 0d3e7439..db5da9f0 100644 --- a/vel/model/rnn/multilayer_rnn_sequence_classification.py +++ b/vel/model/rnn/multilayer_rnn_sequence_classification.py @@ -5,17 +5,17 @@ import torch.nn as nn from vel.api import LossFunctionModel, ModelFactory, LinearBackboneModel -from vel.metrics.accuracy import Accuracy -from vel.metrics.loss_metric import Loss -from vel.modules.rnn_layer import RnnLayer +from vel.metric.accuracy import Accuracy +from vel.metric.loss_metric import Loss +from vel.module.rnn_layer import RnnLayer class MultilayerRnnSequenceClassification(LossFunctionModel): """ Multilayer GRU network for sequence modeling (n:1) """ def __init__(self, input_block: LinearBackboneModel, rnn_type: str, output_dim: int, - rnn_layers: typing.List[int], rnn_dropout: float=0.0, bidirectional: bool=False, - linear_layers: typing.List[int]=None, linear_dropout: float=0.0): + rnn_layers: typing.List[int], rnn_dropout: float = 0.0, bidirectional: bool = False, + linear_layers: typing.List[int] = None, linear_dropout: float = 0.0): super().__init__() self.output_dim = output_dim @@ -144,8 +144,8 @@ def metrics(self) -> list: def create(input_block: ModelFactory, rnn_type: str, output_dim: int, - rnn_layers: typing.List[int], rnn_dropout: float=0.0, bidirectional: bool=False, - linear_layers: typing.List[int]=None, linear_dropout: float=0.0): + rnn_layers: typing.List[int], rnn_dropout: float = 0.0, bidirectional: bool = False, + linear_layers: typing.List[int] = None, linear_dropout: float = 0.0): """ Vel factory function """ if linear_layers is None: linear_layers = [] diff --git a/vel/model/rnn/multilayer_rnn_sequence_model.py b/vel/model/rnn/multilayer_rnn_sequence_model.py index 9352ef10..2e90c2d3 100644 --- a/vel/model/rnn/multilayer_rnn_sequence_model.py +++ b/vel/model/rnn/multilayer_rnn_sequence_model.py @@ -4,15 +4,15 @@ import torch.nn.functional as F import torch.nn as nn -from vel.api import RnnSupervisedModel, ModelFactory, LinearBackboneModel -from vel.modules.rnn_layer import RnnLayer +from vel.api import LossFunctionModel, ModelFactory, LinearBackboneModel +from vel.module.rnn_layer import RnnLayer -class MultilayerRnnSequenceModel(RnnSupervisedModel): +class MultilayerRnnSequenceModel(LossFunctionModel): """ Multilayer GRU network for sequence modeling (n:n) """ def __init__(self, input_block: LinearBackboneModel, rnn_type: str, hidden_layers: typing.List[int], - output_dim: int, dropout: float=0.0): + output_dim: int, dropout: float = 0.0): super().__init__() self.output_dim = output_dim @@ -115,7 +115,8 @@ def create(input_block: ModelFactory, rnn_type: str, hidden_layers: typing.List[ """ Vel factory function """ def instantiate(**_): return MultilayerRnnSequenceModel( - input_block.instantiate(), rnn_type=rnn_type, hidden_layers=hidden_layers, output_dim=output_dim, dropout=dropout + input_block.instantiate(), rnn_type=rnn_type, hidden_layers=hidden_layers, output_dim=output_dim, + dropout=dropout ) return ModelFactory.generic(instantiate) diff --git a/vel/model/vision/cifar_resnext.py b/vel/model/vision/cifar_resnext.py index ffa0fc76..edb6d8a2 100644 --- a/vel/model/vision/cifar_resnext.py +++ b/vel/model/vision/cifar_resnext.py @@ -7,7 +7,7 @@ import torch.nn.functional as F from vel.api import LossFunctionModel, ModelFactory -from vel.modules.resnext import ResNeXtBottleneck +from vel.module.resnext import ResNeXtBottleneck class ResNeXt(LossFunctionModel): @@ -44,8 +44,7 @@ def __init__(self, block, layers, inplanes, image_features, cardinality=4, divis nn.init.constant_(m.bias, 0.0) def _make_layer(self, block, in_channels, out_channels, blocks, stride=1): - layers = [] - layers.append(block(in_channels, out_channels, self.cardinality, self.divisor, stride=stride)) + layers = [block(in_channels, out_channels, self.cardinality, self.divisor, stride=stride)] for i in range(1, blocks): layers.append(block(out_channels, out_channels, self.cardinality, self.divisor, stride=1)) @@ -74,8 +73,8 @@ def loss_value(self, x_data, y_true, y_pred): def metrics(self): """ Set of metrics for this model """ - from vel.metrics.loss_metric import Loss - from vel.metrics.accuracy import Accuracy + from vel.metric.loss_metric import Loss + from vel.metric.accuracy import Accuracy return [Loss(), Accuracy()] @@ -87,6 +86,9 @@ def create(blocks, mode='basic', inplanes=64, cardinality=4, image_features=64, } def instantiate(**_): - return ResNeXt(block_dict[mode], blocks, inplanes=inplanes, image_features=image_features, cardinality=cardinality, divisor=divisor, num_classes=num_classes) + return ResNeXt( + block_dict[mode], blocks, inplanes=inplanes, image_features=image_features, + cardinality=cardinality, divisor=divisor, num_classes=num_classes + ) return ModelFactory.generic(instantiate) diff --git a/vel/module/input/embedding.py b/vel/module/input/embedding.py index ab4aee1c..37d3387b 100644 --- a/vel/module/input/embedding.py +++ b/vel/module/input/embedding.py @@ -6,8 +6,8 @@ class EmbeddingInput(LinearBackboneModel): """ Learnable Embedding input layer """ - def __init__(self, alphabet_size: int, output_dim: int, pretrained: bool=False, frozen: bool=False, - source: SupervisedTextData=None): + def __init__(self, alphabet_size: int, output_dim: int, pretrained: bool = False, frozen: bool = False, + source: SupervisedTextData = None): super().__init__() self._output_dim = output_dim @@ -34,7 +34,8 @@ def forward(self, input_data): return self.layer(input_data) -def create(alphabet_size: int, output_dim: int, pretrained: bool=False, frozen: bool=False, source: SupervisedTextData=None): +def create(alphabet_size: int, output_dim: int, pretrained: bool = False, frozen: bool = False, + source: SupervisedTextData = None): """ Vel factory function """ def instantiate(**_): return EmbeddingInput(alphabet_size, output_dim, pretrained=pretrained, frozen=frozen, source=source) @@ -44,4 +45,3 @@ def instantiate(**_): # Scripting interface EmbeddingInputFactory = create - diff --git a/vel/module/layers.py b/vel/module/layers.py index 9a95e31d..08fadb0a 100644 --- a/vel/module/layers.py +++ b/vel/module/layers.py @@ -58,4 +58,3 @@ def __init__(self, num_classes): def forward(self, x): return one_hot_encoding(x, self.num_classes) - diff --git a/vel/module/resnet_v2.py b/vel/module/resnet_v2.py index 31445f35..8a7e2518 100644 --- a/vel/module/resnet_v2.py +++ b/vel/module/resnet_v2.py @@ -81,7 +81,9 @@ def __init__(self, in_channels, out_channels, stride=1, divisor=4): self.conv1 = nn.Conv2d(in_channels, self.bottleneck_channels, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(self.bottleneck_channels) - self.conv2 = nn.Conv2d(self.bottleneck_channels, self.bottleneck_channels, kernel_size=3, stride=stride, padding=1, bias=False) + self.conv2 = nn.Conv2d( + self.bottleneck_channels, self.bottleneck_channels, kernel_size=3, stride=stride, padding=1, bias=False + ) self.bn3 = nn.BatchNorm2d(self.bottleneck_channels) self.conv3 = nn.Conv2d(self.bottleneck_channels, out_channels, kernel_size=1, bias=False) diff --git a/vel/module/resnext.py b/vel/module/resnext.py index 3ae26e49..7e943402 100644 --- a/vel/module/resnext.py +++ b/vel/module/resnext.py @@ -40,7 +40,6 @@ def __init__(self, in_channels, out_channels, cardinality, divisor, stride=1): self.conv_expand = nn.Conv2d(D * C, out_channels, kernel_size=1, stride=1, padding=0, bias=False) self.bn_expand = nn.BatchNorm2d(out_channels) - def forward(self, x): bottleneck = self.conv_reduce(x) bottleneck = F.relu(self.bn_reduce(bottleneck), inplace=True) diff --git a/vel/module/rnn_cell.py b/vel/module/rnn_cell.py index 08d9582e..5b62a046 100644 --- a/vel/module/rnn_cell.py +++ b/vel/module/rnn_cell.py @@ -55,6 +55,3 @@ def forward(self, input_data, state): else: new_hidden_state = self.rnn_cell(input_data, state) return new_hidden_state, new_hidden_state - - - diff --git a/vel/module/rnn_layer.py b/vel/module/rnn_layer.py index 7533dc35..54d61904 100644 --- a/vel/module/rnn_layer.py +++ b/vel/module/rnn_layer.py @@ -73,6 +73,3 @@ def forward(self, input_data, state=None): return output, new_state else: return self.rnn_cell(input_data, state) - - - diff --git a/vel/notebook/__init__.py b/vel/notebook/__init__.py index 68058abb..a8655a0b 100644 --- a/vel/notebook/__init__.py +++ b/vel/notebook/__init__.py @@ -1,2 +1,2 @@ -from .loader import load_config, script -from .defaults import reasonable_notbook_defaults +from .loader import load_config, script # noqa +from .defaults import reasonable_notbook_defaults # noqa diff --git a/vel/optimizer/sgd.py b/vel/optimizer/sgd.py index 93272f0b..128ccd1e 100644 --- a/vel/optimizer/sgd.py +++ b/vel/optimizer/sgd.py @@ -8,7 +8,7 @@ class SgdFactory(OptimizerFactory): """ SGD optimizer factory """ - def __init__(self, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, layer_groups: bool=False): + def __init__(self, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, layer_groups: bool = False): self.lr = lr self.momentum = momentum self.dampening = dampening diff --git a/vel/phase/cycle.py b/vel/phase/cycle.py index 1c89915c..b1862323 100644 --- a/vel/phase/cycle.py +++ b/vel/phase/cycle.py @@ -57,7 +57,9 @@ def on_batch_begin(self, batch_info: BatchInfo): cycle_length = self.cycle_lengths[batch_info.local_epoch_number - 1] cycle_start = self.cycle_starts[batch_info.local_epoch_number - 1] - numerator = (batch_info.local_epoch_number - cycle_start - 1) * batch_info.batches_per_epoch + batch_info.batch_number + numerator = ( + (batch_info.local_epoch_number - cycle_start - 1) * batch_info.batches_per_epoch + batch_info.batch_number + ) denominator = cycle_length * batch_info.batches_per_epoch interpolation_number = numerator / denominator @@ -66,7 +68,10 @@ def on_batch_begin(self, batch_info: BatchInfo): lr = self.init_lr else: if isinstance(self.max_lr, list): - lr = [interp.interpolate_single(max_lr, min_lr, interpolation_number, how=self.interpolate) for max_lr, min_lr in zip(self.max_lr, self.min_lr)] + lr = [ + interp.interpolate_single(max_lr, min_lr, interpolation_number, how=self.interpolate) + for max_lr, min_lr in zip(self.max_lr, self.min_lr) + ] else: lr = interp.interpolate_single(self.max_lr, self.min_lr, interpolation_number, how=self.interpolate) diff --git a/vel/rl/algo/dqn.py b/vel/rl/algo/dqn.py index f3de2eaa..1437a062 100644 --- a/vel/rl/algo/dqn.py +++ b/vel/rl/algo/dqn.py @@ -3,15 +3,15 @@ import torch.nn.utils from vel.api import ModelFactory -from vel.api.metrics.averaging_metric import AveragingNamedMetric +from vel.metric.averaging_metric import AveragingNamedMetric from vel.rl.api import OptimizerAlgoBase class DeepQLearning(OptimizerAlgoBase): """ Deep Q-Learning algorithm """ - def __init__(self, model_factory: ModelFactory, discount_factor: float, double_dqn: bool, target_update_frequency: int, - max_grad_norm: float): + def __init__(self, model_factory: ModelFactory, discount_factor: float, double_dqn: bool, + target_update_frequency: int, max_grad_norm: float): super().__init__(max_grad_norm) self.model_factory = model_factory @@ -91,7 +91,7 @@ def metrics(self) -> list: def create(model: ModelFactory, discount_factor: float, target_update_frequency: int, - max_grad_norm: float, double_dqn: bool=False): + max_grad_norm: float, double_dqn: bool = False): """ Vel factory function """ return DeepQLearning( model_factory=model, diff --git a/vel/rl/algo/policy_gradient/ddpg.py b/vel/rl/algo/policy_gradient/ddpg.py index 72560f80..2150cab4 100644 --- a/vel/rl/algo/policy_gradient/ddpg.py +++ b/vel/rl/algo/policy_gradient/ddpg.py @@ -4,13 +4,13 @@ import torch.nn.functional as F from vel.rl.api import OptimizerAlgoBase -from vel.api.metrics.averaging_metric import AveragingNamedMetric +from vel.metric.averaging_metric import AveragingNamedMetric class DeepDeterministicPolicyGradient(OptimizerAlgoBase): """ Deep Deterministic Policy Gradient (DDPG) - policy gradient calculations """ - def __init__(self, model_factory, discount_factor: float, tau: float, max_grad_norm: typing.Optional[float]=None): + def __init__(self, model_factory, discount_factor: float, tau: float, max_grad_norm: typing.Optional[float] = None): super().__init__(max_grad_norm) self.model_factory = model_factory @@ -84,7 +84,7 @@ def metrics(self) -> list: ] -def create(model, discount_factor: float, tau: float, max_grad_norm: float=None): +def create(model, discount_factor: float, tau: float, max_grad_norm: float = None): """ Vel factory function """ return DeepDeterministicPolicyGradient( tau=tau, diff --git a/vel/rl/algo/policy_gradient/ppo.py b/vel/rl/algo/policy_gradient/ppo.py index 5f0b3538..d835cf6a 100644 --- a/vel/rl/algo/policy_gradient/ppo.py +++ b/vel/rl/algo/policy_gradient/ppo.py @@ -2,17 +2,17 @@ import numbers -from vel.api.metrics.averaging_metric import AveragingNamedMetric -from vel.math.functions import explained_variance +from vel.math.function import explained_variance +from vel.metric.averaging_metric import AveragingNamedMetric from vel.rl.api import OptimizerAlgoBase, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae -from vel.schedules.constant import ConstantSchedule +from vel.schedule.constant import ConstantSchedule class PpoPolicyGradient(OptimizerAlgoBase): """ Proximal Policy Optimization - https://arxiv.org/abs/1707.06347 """ def __init__(self, entropy_coefficient, value_coefficient, cliprange, max_grad_norm, discount_factor: float, - normalize_advantage: bool=True, gae_lambda: float=1.0): + normalize_advantage: bool = True, gae_lambda: float = 1.0): super().__init__(max_grad_norm) self.entropy_coefficient = entropy_coefficient diff --git a/vel/rl/algo/policy_gradient/trpo.py b/vel/rl/algo/policy_gradient/trpo.py index b73f3a67..d97d1bf3 100644 --- a/vel/rl/algo/policy_gradient/trpo.py +++ b/vel/rl/algo/policy_gradient/trpo.py @@ -4,8 +4,8 @@ import torch.nn.functional as F import torch.nn.utils -from vel.api.metrics.averaging_metric import AveragingNamedMetric -from vel.math.functions import explained_variance +from vel.metric.averaging_metric import AveragingNamedMetric +from vel.math.function import explained_variance from vel.rl.api import AlgoBase, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae @@ -30,11 +30,11 @@ def conjugate_gradient_method(matrix_vector_operator, loss_gradient, nsteps, rdo rdotr = torch.dot(r, r) for i in range(nsteps): - Avp = matrix_vector_operator(p) - alpha = rdotr / torch.dot(p, Avp) + avp = matrix_vector_operator(p) + alpha = rdotr / torch.dot(p, avp) x += alpha * p - r -= alpha * Avp + r -= alpha * avp new_rdotr = torch.dot(r, r) betta = new_rdotr / rdotr @@ -122,8 +122,10 @@ def optimizer_step(self, batch_info, device, model, rollout): expected_improvement = (-policy_grad) @ full_step original_parameter_vec = p2v(model.policy_parameters()).detach_() - policy_optimization_success, ratio, policy_loss_improvement, new_policy_loss, kl_divergence_step = self.line_search( - model, rollout, policy_loss, policy_params, original_parameter_vec, full_step, expected_improvement + (policy_optimization_success, ratio, policy_loss_improvement, new_policy_loss, kl_divergence_step) = ( + self.line_search( + model, rollout, policy_loss, policy_params, original_parameter_vec, full_step, expected_improvement + ) ) gradient_norms = [] diff --git a/vel/rl/api/env_base.py b/vel/rl/api/env_base.py index 07e0be1a..eb0c6fba 100644 --- a/vel/rl/api/env_base.py +++ b/vel/rl/api/env_base.py @@ -25,4 +25,3 @@ def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv: def instantiate_single(self, seed=0, preset='default') -> VecEnv: """ Create a new VecEnv instance - single """ raise NotImplementedError - diff --git a/vel/rl/api/evaluator.py b/vel/rl/api/evaluator.py index dd5cb9ec..c8a98307 100644 --- a/vel/rl/api/evaluator.py +++ b/vel/rl/api/evaluator.py @@ -108,6 +108,7 @@ def is_provided(self, name): return True elif name.startswith('rollout:'): rollout_name = name[8:] + return self.is_provided(rollout_name) else: return False diff --git a/vel/rl/api/rollout.py b/vel/rl/api/rollout.py index 01720b97..4acf8cbb 100644 --- a/vel/rl/api/rollout.py +++ b/vel/rl/api/rollout.py @@ -100,7 +100,8 @@ class Trajectories(Rollout): transition_tensors - tensors that have a row (multidimensional) per each transition. E.g. state, reward, done rollout_tensors - tensors that have a row (multidimensional) per whole rollout. E.g. final_value, initial rnn state """ - def __init__(self, num_steps, num_envs, environment_information, transition_tensors, rollout_tensors, extra_data=None): + def __init__(self, num_steps, num_envs, environment_information, transition_tensors, rollout_tensors, + extra_data=None): self.num_steps = num_steps self.num_envs = num_envs self.environment_information = environment_information @@ -111,11 +112,15 @@ def __init__(self, num_steps, num_envs, environment_information, transition_tens def to_transitions(self) -> 'Transitions': """ Convert given rollout to Transitions """ # No need to propagate 'rollout_tensors' as they won't mean anything + + if self.environment_information is not None: + env_info = [ei for l in self.environment_information for ei in l] + else: + env_info = None + return Transitions( size=self.num_steps * self.num_envs, - environment_information= - [ei for l in self.environment_information for ei in l] - if self.environment_information is not None else None, + environment_information=env_info, transition_tensors={ name: tensor_util.merge_first_two_dims(t) for name, t in self.transition_tensors.items() }, diff --git a/vel/rl/buffer/backend/circular_vec_buffer_backend.py b/vel/rl/buffer/backend/circular_vec_buffer_backend.py index 074700bc..3b30fb6f 100644 --- a/vel/rl/buffer/backend/circular_vec_buffer_backend.py +++ b/vel/rl/buffer/backend/circular_vec_buffer_backend.py @@ -1,7 +1,7 @@ import gym import numpy as np -from vel.exceptions import VelException +from vel.exception import VelException def take_along_axis(large_array, indexes): @@ -22,7 +22,7 @@ class CircularVecEnvBufferBackend: """ def __init__(self, buffer_capacity: int, num_envs: int, observation_space: gym.Space, action_space: gym.Space, - frame_stack_compensation: bool=False, frame_history: int=1): + frame_stack_compensation: bool = False, frame_history: int = 1): # Maximum number of items in the buffer self.buffer_capacity = buffer_capacity diff --git a/vel/rl/buffer/tests/__init__.py b/vel/rl/buffer/test/__init__.py similarity index 100% rename from vel/rl/buffer/tests/__init__.py rename to vel/rl/buffer/test/__init__.py diff --git a/vel/rl/buffer/tests/test_circular_buffer_backend.py b/vel/rl/buffer/test/test_circular_buffer_backend.py similarity index 100% rename from vel/rl/buffer/tests/test_circular_buffer_backend.py rename to vel/rl/buffer/test/test_circular_buffer_backend.py diff --git a/vel/rl/buffer/tests/test_circular_vec_env_buffer_backend.py b/vel/rl/buffer/test/test_circular_vec_env_buffer_backend.py similarity index 100% rename from vel/rl/buffer/tests/test_circular_vec_env_buffer_backend.py rename to vel/rl/buffer/test/test_circular_vec_env_buffer_backend.py diff --git a/vel/rl/buffer/tests/test_prioritized_circular_buffer_backend.py b/vel/rl/buffer/test/test_prioritized_circular_buffer_backend.py similarity index 100% rename from vel/rl/buffer/tests/test_prioritized_circular_buffer_backend.py rename to vel/rl/buffer/test/test_prioritized_circular_buffer_backend.py diff --git a/vel/rl/buffer/tests/test_prioritized_vec_buffer_backend.py b/vel/rl/buffer/test/test_prioritized_vec_buffer_backend.py similarity index 98% rename from vel/rl/buffer/tests/test_prioritized_vec_buffer_backend.py rename to vel/rl/buffer/test/test_prioritized_vec_buffer_backend.py index cd90ebe6..5fb21f3b 100644 --- a/vel/rl/buffer/tests/test_prioritized_vec_buffer_backend.py +++ b/vel/rl/buffer/test/test_prioritized_vec_buffer_backend.py @@ -5,8 +5,8 @@ import numpy.testing as nt import pytest -from vel.exceptions import VelException -from vel.rl.buffers.backend.prioritized_vec_buffer_backend import PrioritizedCircularVecEnvBufferBackend +from vel.exception import VelException +from vel.rl.buffer.backend.prioritized_vec_buffer_backend import PrioritizedCircularVecEnvBufferBackend def get_halfempty_buffer_with_dones(frame_history=1): diff --git a/vel/rl/command/evaluate_env_command.py b/vel/rl/command/evaluate_env_command.py index e9d7c2e4..33f7f4dc 100644 --- a/vel/rl/command/evaluate_env_command.py +++ b/vel/rl/command/evaluate_env_command.py @@ -11,7 +11,8 @@ class EvaluateEnvCommand: """ Record environment playthrough as a game """ def __init__(self, model_config: ModelConfig, env_factory: VecEnvFactory, model_factory: ModelFactory, - storage: Storage, parallel_envs: int, action_noise: typing.Optional[ModelFactory], takes: int, sample_args: dict = None): + storage: Storage, parallel_envs: int, action_noise: typing.Optional[ModelFactory], takes: int, + sample_args: dict = None): self.model_config = model_config self.model_factory = model_factory self.env_factory = env_factory @@ -26,7 +27,9 @@ def __init__(self, model_config: ModelConfig, env_factory: VecEnvFactory, model_ def run(self): device = self.model_config.torch_device() - env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, preset='record', seed=self.model_config.seed) + env = self.env_factory.instantiate( + parallel_envs=self.parallel_envs, preset='record', seed=self.model_config.seed + ) model = self.model_factory.instantiate(action_space=env.action_space).to(device) if self.action_noise_factory is not None: diff --git a/vel/rl/command/record_movie_command.py b/vel/rl/command/record_movie_command.py index 78e60d5f..6b6f3c4c 100644 --- a/vel/rl/command/record_movie_command.py +++ b/vel/rl/command/record_movie_command.py @@ -78,7 +78,9 @@ def record_take(self, model, env_instance, device, take_number): # End of an episode break - takename = self.model_config.output_dir('videos', self.model_config.run_name, self.videoname.format(take_number)) + takename = self.model_config.output_dir( + 'videos', self.model_config.run_name, self.videoname.format(take_number) + ) pathlib.Path(os.path.dirname(takename)).mkdir(parents=True, exist_ok=True) fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') diff --git a/vel/rl/env_roller/transition_replay_env_roller.py b/vel/rl/env_roller/transition_replay_env_roller.py index d64628ae..d0ed933b 100644 --- a/vel/rl/env_roller/transition_replay_env_roller.py +++ b/vel/rl/env_roller/transition_replay_env_roller.py @@ -18,8 +18,9 @@ class TransitionReplayEnvRoller(ReplayEnvRollerBase): Samples transitions from the replay buffer (individual frame transitions) """ - def __init__(self, environment, device, replay_buffer: ReplayBuffer, discount_factor: typing.Optional[float]=None, - normalize_returns: bool=False, forward_steps: int=1, action_noise: typing.Optional[nn.Module]=None): + def __init__(self, environment, device, replay_buffer: ReplayBuffer, discount_factor: typing.Optional[float] = None, + normalize_returns: bool = False, forward_steps: int = 1, + action_noise: typing.Optional[nn.Module] = None): self._environment = environment self.device = device self.replay_buffer = replay_buffer @@ -156,8 +157,9 @@ def update(self, rollout, batch_info): class TransitionReplayEnvRollerFactory(ReplayEnvRollerFactoryBase): """ Factory for the ReplayEnvRoller """ - def __init__(self, replay_buffer_factory: ReplayBufferFactory, discount_factor: typing.Optional[float]=None, - normalize_returns: bool=False, forward_steps: int=1, action_noise: typing.Optional[ModelFactory]=None): + def __init__(self, replay_buffer_factory: ReplayBufferFactory, discount_factor: typing.Optional[float] = None, + normalize_returns: bool = False, forward_steps: int = 1, + action_noise: typing.Optional[ModelFactory] = None): self.replay_buffer_factory = replay_buffer_factory self.normalize_returns = normalize_returns self.forward_steps = forward_steps @@ -183,8 +185,8 @@ def instantiate(self, environment, device): ) -def create(replay_buffer, discount_factor: typing.Optional[float]=None, normalize_returns: bool=False, - forward_steps: int=1, action_noise: typing.Optional[ModelFactory]=None): +def create(replay_buffer, discount_factor: typing.Optional[float] = None, normalize_returns: bool = False, + forward_steps: int = 1, action_noise: typing.Optional[ModelFactory] = None): """ Vel factory function """ return TransitionReplayEnvRollerFactory( replay_buffer_factory=replay_buffer, diff --git a/vel/rl/model/backbone/lstm.py b/vel/rl/model/backbone/lstm.py index 874f7ca6..50356d07 100644 --- a/vel/rl/model/backbone/lstm.py +++ b/vel/rl/model/backbone/lstm.py @@ -1,4 +1,4 @@ -from vel.api import LinearBackboneModel, ModelFactory +from vel.api import LinearBackboneModel class LstmBackbone(LinearBackboneModel): diff --git a/vel/rl/model/backbone/mlp.py b/vel/rl/model/backbone/mlp.py index cc87e9e9..f4e03ae3 100644 --- a/vel/rl/model/backbone/mlp.py +++ b/vel/rl/model/backbone/mlp.py @@ -17,8 +17,8 @@ class MLP(LinearBackboneModel): """ Simple Multi-Layer-Perceptron network """ - def __init__(self, input_length: int, hidden_layers: typing.List[int], activation: str='tanh', - normalization: typing.Optional[str]=None): + def __init__(self, input_length: int, hidden_layers: typing.List[int], activation: str = 'tanh', + normalization: typing.Optional[str] = None): super().__init__() self.input_length = input_length diff --git a/vel/rl/model/backbone/nature_cnn_rnn.py b/vel/rl/model/backbone/nature_cnn_rnn.py index 832926e4..9662a444 100644 --- a/vel/rl/model/backbone/nature_cnn_rnn.py +++ b/vel/rl/model/backbone/nature_cnn_rnn.py @@ -1,6 +1,6 @@ from vel.api import LinearBackboneModel, ModelFactory -from vel.rl.models.backbone.nature_cnn import NatureCnn -from vel.modules.rnn_cell import RnnCell +from vel.rl.model.backbone.nature_cnn import NatureCnn +from vel.module.rnn_cell import RnnCell class NatureCnnRnnBackbone(LinearBackboneModel): @@ -8,8 +8,8 @@ class NatureCnnRnnBackbone(LinearBackboneModel): Long-Short-Term Memory rnn cell together with DeepMind-style 'Nature' cnn preprocessing """ - def __init__(self, input_width: int, input_height: int, input_channels: int, rnn_type='lstm', - cnn_output_dim: int=512, hidden_units: int=128): + def __init__(self, input_width: int, input_height: int, input_channels: int, rnn_type: str = 'lstm', + cnn_output_dim: int = 512, hidden_units: int = 128): super().__init__() self.hidden_units = hidden_units diff --git a/vel/rl/model/deterministic_policy_model.py b/vel/rl/model/deterministic_policy_model.py index 6be633ce..da7b31d0 100644 --- a/vel/rl/model/deterministic_policy_model.py +++ b/vel/rl/model/deterministic_policy_model.py @@ -4,10 +4,10 @@ import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Evaluator, RlModel -from vel.rl.modules.deterministic_action_head import DeterministicActionHead -from vel.rl.modules.deterministic_critic_head import DeterministicCriticHead +from vel.rl.module.deterministic_action_head import DeterministicActionHead +from vel.rl.module.deterministic_critic_head import DeterministicCriticHead class DeterministicPolicyEvaluator(Evaluator): @@ -154,7 +154,7 @@ def instantiate(self, **extra_args): def create(policy_backbone: ModelFactory, value_backbone: ModelFactory, - input_block: typing.Optional[ModelFactory]=None): + input_block: typing.Optional[ModelFactory] = None): """ Vel factory function """ if input_block is None: input_block = IdentityFactory() diff --git a/vel/rl/model/q_distributional_model.py b/vel/rl/model/q_distributional_model.py index a769e741..209b002b 100644 --- a/vel/rl/model/q_distributional_model.py +++ b/vel/rl/model/q_distributional_model.py @@ -2,9 +2,9 @@ import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, RlModel, Evaluator -from vel.rl.modules.q_distributional_head import QDistributionalHead +from vel.rl.module.q_distributional_head import QDistributionalHead class QDistributionalModelEvaluator(Evaluator): @@ -60,7 +60,7 @@ class QDistributionalModel(RlModel): Supports only discrete action spaces (ones that can be enumerated) """ def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space, - vmin: float, vmax: float, atoms: int=1): + vmin: float, vmax: float, atoms: int = 1): super().__init__() self.action_space = action_space @@ -131,7 +131,7 @@ def instantiate(self, **extra_args): def create(backbone: ModelFactory, vmin: float, vmax: float, atoms: int, - input_block: typing.Optional[ModelFactory]=None): + input_block: typing.Optional[ModelFactory] = None): """ Vel factory function """ if input_block is None: input_block = IdentityFactory() diff --git a/vel/rl/model/q_dueling_model.py b/vel/rl/model/q_dueling_model.py index 09d8518b..74fff35a 100644 --- a/vel/rl/model/q_dueling_model.py +++ b/vel/rl/model/q_dueling_model.py @@ -2,10 +2,10 @@ import typing from vel.api import LinearBackboneModel, Model, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Evaluator -from vel.rl.modules.q_dueling_head import QDuelingHead -from vel.rl.models.q_model import QModelEvaluator +from vel.rl.module.q_dueling_head import QDuelingHead +from vel.rl.model.q_model import QModelEvaluator class QDuelingModel(Model): @@ -65,7 +65,7 @@ def instantiate(self, **extra_args): return QDuelingModel(input_block, backbone, extra_args['action_space']) -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory]=None): +def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): """ Vel factory function """ if input_block is None: input_block = IdentityFactory() diff --git a/vel/rl/model/q_model.py b/vel/rl/model/q_model.py index 208137e9..7472e0bb 100644 --- a/vel/rl/model/q_model.py +++ b/vel/rl/model/q_model.py @@ -2,9 +2,9 @@ import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, RlModel, Evaluator -from vel.rl.modules.q_head import QHead +from vel.rl.module.q_head import QHead class QModelEvaluator(Evaluator): @@ -89,7 +89,7 @@ def instantiate(self, **extra_args): return QModel(input_block, backbone, extra_args['action_space']) -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory]=None): +def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): """ Vel factory function """ if input_block is None: input_block = IdentityFactory() diff --git a/vel/rl/model/q_noisy_model.py b/vel/rl/model/q_noisy_model.py index cfc3e491..9dc73e6e 100644 --- a/vel/rl/model/q_noisy_model.py +++ b/vel/rl/model/q_noisy_model.py @@ -75,8 +75,8 @@ def instantiate(self, **extra_args): ) -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory]=None, initial_std_dev=0.4, - factorized_noise=True): +def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None, initial_std_dev: float = 0.4, + factorized_noise: bool = True): """ Vel factory function """ if input_block is None: input_block = IdentityFactory() diff --git a/vel/rl/model/q_stochastic_policy_model.py b/vel/rl/model/q_stochastic_policy_model.py index 929dc3b9..5cf97893 100644 --- a/vel/rl/model/q_stochastic_policy_model.py +++ b/vel/rl/model/q_stochastic_policy_model.py @@ -3,10 +3,10 @@ import typing from vel.api import LinearBackboneModel, Model, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Evaluator -from vel.rl.modules.action_head import ActionHead -from vel.rl.modules.q_head import QHead +from vel.rl.module.action_head import ActionHead +from vel.rl.module.q_head import QHead class QStochasticPolicyEvaluator(Evaluator): @@ -120,7 +120,7 @@ def instantiate(self, **extra_args): return QStochasticPolicyModel(input_block, backbone, extra_args['action_space']) -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory]=None): +def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): """ Vel factory function """ if input_block is None: input_block = IdentityFactory() diff --git a/vel/rl/model/stochastic_policy_model.py b/vel/rl/model/stochastic_policy_model.py index 27a7c4c2..cee084b6 100644 --- a/vel/rl/model/stochastic_policy_model.py +++ b/vel/rl/model/stochastic_policy_model.py @@ -2,10 +2,10 @@ import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Evaluator, RlModel -from vel.rl.modules.action_head import ActionHead -from vel.rl.modules.value_head import ValueHead +from vel.rl.module.action_head import ActionHead +from vel.rl.module.value_head import ValueHead class StochasticPolicyEvaluator(Evaluator): @@ -115,7 +115,7 @@ def instantiate(self, **extra_args): return StochasticPolicyModel(input_block, backbone, extra_args['action_space']) -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory]=None): +def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): """ Vel factory function """ if input_block is None: input_block = IdentityFactory() diff --git a/vel/rl/model/stochastic_policy_rnn_model.py b/vel/rl/model/stochastic_policy_rnn_model.py index cda7d69a..7147a381 100644 --- a/vel/rl/model/stochastic_policy_rnn_model.py +++ b/vel/rl/model/stochastic_policy_rnn_model.py @@ -3,10 +3,10 @@ import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Trajectories, Evaluator, RlRnnModel -from vel.rl.modules.action_head import ActionHead -from vel.rl.modules.value_head import ValueHead +from vel.rl.module.action_head import ActionHead +from vel.rl.module.value_head import ValueHead class StochasticPolicyRnnEvaluator(Evaluator): @@ -142,7 +142,7 @@ def instantiate(self, **extra_args): return StochasticPolicyRnnModel(input_block, backbone, extra_args['action_space']) -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory]=None): +def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): """ Vel factory function """ if input_block is None: input_block = IdentityFactory() diff --git a/vel/rl/module/q_head.py b/vel/rl/module/q_head.py index 52abaaeb..00431c2f 100644 --- a/vel/rl/module/q_head.py +++ b/vel/rl/module/q_head.py @@ -26,4 +26,3 @@ def forward(self, input_data): def sample(self, q_values): """ Sample from epsilon-greedy strategy with given q-values """ return q_values.argmax(dim=1) - diff --git a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py index d9f873b3..d3ce3349 100644 --- a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py @@ -190,7 +190,7 @@ def instantiate(self, device: torch.device) -> BufferedOffPolicyIterationReinfor def create(model_config, vec_env, model, algo, env_roller, parallel_envs: int, - rollout_steps: int, training_steps: int, training_rounds: int=1): + rollout_steps: int, training_steps: int, training_rounds: int = 1): """ Vel factory function """ settings = BufferedOffPolicyIterationReinforcerSettings( rollout_steps=rollout_steps, diff --git a/vel/schedule/linear.py b/vel/schedule/linear.py index ef74b03c..a3f88c4f 100644 --- a/vel/schedule/linear.py +++ b/vel/schedule/linear.py @@ -18,4 +18,3 @@ def value(self, progress_indicator): def create(initial_value, final_value): """ Vel factory function """ return LinearSchedule(initial_value, final_value) - diff --git a/vel/scheduler/linear_batch_scaler.py b/vel/scheduler/linear_batch_scaler.py index 59088401..584251f4 100644 --- a/vel/scheduler/linear_batch_scaler.py +++ b/vel/scheduler/linear_batch_scaler.py @@ -32,5 +32,3 @@ def instantiate(self, optimizer, last_epoch=-1) -> LinearBatchScaler: def create(): """ Vel factory function """ return LinearBatchScalerFactory() - - diff --git a/vel/scheduler/multi_step.py b/vel/scheduler/multi_step.py index 172f7ee7..eeeb735d 100644 --- a/vel/scheduler/multi_step.py +++ b/vel/scheduler/multi_step.py @@ -1,5 +1,4 @@ -import torch.optim.lr_scheduler as scheduler - +# import torch.optim.lr_scheduler as scheduler # class MultiStepScheduler: # def __init__(self, optimizer, milestones, gamma, last_epoch): diff --git a/vel/scheduler/reduce_lr_on_plateau.py b/vel/scheduler/reduce_lr_on_plateau.py index daf97999..98dc67a3 100644 --- a/vel/scheduler/reduce_lr_on_plateau.py +++ b/vel/scheduler/reduce_lr_on_plateau.py @@ -1,8 +1,9 @@ -import torch.optim.lr_scheduler as scheduler +# import torch.optim.lr_scheduler as scheduler # class ReduceLrOnPlateau: -# def __init__(self, optimizer, metric_name, mode, factor, patience, threshold, threshold_mode, cooldown, min_lr, epsilon): +# def __init__(self, optimizer, metric_name, mode, factor, patience, threshold, threshold_mode, +# cooldown, min_lr, epsilon): # self.metric_name = metric_name # self.scheduler = scheduler.ReduceLROnPlateau( # optimizer, @@ -30,7 +31,7 @@ # min_lr=0, epsilon=1e-8): # """ Create a scheduler that lowers the LR on metric plateau """ # def scheduler_fn(optimizer): -# return ReduceLrOnPlateau(optimizer, metric_name, mode, factor, patience, threshold, threshold_mode, cooldown, min_lr, epsilon) +# return ReduceLrOnPlateau(optimizer, metric_name, mode, factor, patience, threshold, threshold_mode, +# cooldown, min_lr, epsilon) # # return scheduler_fn - diff --git a/vel/source/nlp/text_url.py b/vel/source/nlp/text_url.py index e1b2095d..5478837c 100644 --- a/vel/source/nlp/text_url.py +++ b/vel/source/nlp/text_url.py @@ -18,7 +18,9 @@ def __init__(self, padded_sequence, sequence_length, batch_size, alphabet_size, self.alphabet_size = alphabet_size self.padded_sequence = padded_sequence[:-1].reshape(self.num_batches * self.batch_size, self.sequence_length) - self.padded_sequence_next = padded_sequence[1:].reshape(self.num_batches * self.batch_size, self.sequence_length) + self.padded_sequence_next = padded_sequence[1:].reshape( + self.num_batches * self.batch_size, self.sequence_length + ) self.sequence_indices = np.arange(self.num_batches * self.batch_size) diff --git a/vel/source/vision/cifar10.py b/vel/source/vision/cifar10.py index 4d8c02ee..53ec1080 100644 --- a/vel/source/vision/cifar10.py +++ b/vel/source/vision/cifar10.py @@ -2,9 +2,9 @@ from vel.api import SupervisedTrainingData -from vel.augmentations.normalize import Normalize -from vel.augmentations.to_tensor import ToTensor -from vel.augmentations.to_array import ToArray +from vel.augmentation.normalize import Normalize +from vel.augmentation.to_tensor import ToTensor +from vel.augmentation.to_array import ToArray def create(model_config, batch_size, normalize=True, num_workers=0, augmentations=None): @@ -18,7 +18,7 @@ def create(model_config, batch_size, normalize=True, num_workers=0, augmentation test_dataset = datasets.CIFAR10(path, train=False, download=True) augmentations = [ToArray()] + (augmentations if augmentations is not None else []) - + if normalize: train_data = train_dataset.data mean_value = (train_data / 255).mean(axis=(0, 1, 2)) diff --git a/vel/storage/backend/mongodb.py b/vel/storage/backend/mongodb.py index d9d197eb..ff663737 100644 --- a/vel/storage/backend/mongodb.py +++ b/vel/storage/backend/mongodb.py @@ -48,4 +48,3 @@ def store(self, metrics): def create(model_config, uri, database): """ Vel factory function """ return MongoDbBackend(model_config, uri, database) - diff --git a/vel/storage/strategy/checkpoint_strategy.py b/vel/storage/strategy/checkpoint_strategy.py index a2d245c4..272b3681 100644 --- a/vel/storage/strategy/checkpoint_strategy.py +++ b/vel/storage/strategy/checkpoint_strategy.py @@ -22,5 +22,3 @@ def current_best_checkpoint_idx(self) -> typing.Union[int, None]: def write_state_dict(self, hidden_state_dict): pass def restore(self, hidden_state_dict): pass - - diff --git a/vel/storage/streaming/visdom.py b/vel/storage/streaming/visdom.py index 917bb390..a32bf83b 100644 --- a/vel/storage/streaming/visdom.py +++ b/vel/storage/streaming/visdom.py @@ -36,7 +36,7 @@ def on_batch_end(self, batch_info): float(batch_info.epoch_number) + float(batch_info.batch_number) / batch_info.batches_per_epoch ) - + lr = batch_info.optimizer.param_groups[-1]['lr'] metrics_df = pd.DataFrame([lr], index=[iteration_idx], columns=['lr']) diff --git a/vel/util/intepolate.py b/vel/util/interpolate.py similarity index 99% rename from vel/util/intepolate.py rename to vel/util/interpolate.py index 29eb72a4..985773c0 100644 --- a/vel/util/intepolate.py +++ b/vel/util/interpolate.py @@ -53,4 +53,3 @@ def interpolate_series(start, end, steps, how='linear'): def interpolate_single(start, end, coefficient, how='linear'): """ Interpolate single value between start and end in given number of steps """ return INTERP_SINGLE_DICT[how](start, end, coefficient) - diff --git a/vel/util/module_util.py b/vel/util/module_util.py index e08e9eda..1c501336 100644 --- a/vel/util/module_util.py +++ b/vel/util/module_util.py @@ -38,7 +38,8 @@ def module_broadcast(m, broadcast_fn, *args, **kwargs): def set_train_mode(module): # Only fix ones which we don't want to "train" - if hasattr(module, 'running_mean') and (getattr(module, 'bn_freeze', False) or not getattr(module, 'trainable', True)): + if hasattr(module, 'running_mean') and (getattr(module, 'bn_freeze', False) or + not getattr(module, 'trainable', True)): module.eval() elif getattr(module, 'drop_freeze', False) and hasattr(module, 'p') and ('drop' in type(module).__name__.lower()): module.eval() diff --git a/vel/util/situational.py b/vel/util/situational.py index d57924f1..56bb25a8 100644 --- a/vel/util/situational.py +++ b/vel/util/situational.py @@ -1,8 +1,8 @@ import typing -def process_environment_settings(default_dictionary: dict, settings: typing.Optional[dict]=None, - presets: typing.Optional[dict]=None): +def process_environment_settings(default_dictionary: dict, settings: typing.Optional[dict] = None, + presets: typing.Optional[dict] = None): """ Process a dictionary of env settings """ settings = settings if settings is not None else {} presets = presets if presets is not None else {} @@ -25,4 +25,3 @@ def process_environment_settings(default_dictionary: dict, settings: typing.Opti result_dict[key] = new_dict return result_dict - diff --git a/vel/util/summary.py b/vel/util/summary.py index b5809f39..d7608601 100644 --- a/vel/util/summary.py +++ b/vel/util/summary.py @@ -76,7 +76,7 @@ def hook(module, input, output): '{0:,}'.format(summary[layer]['nb_params'])) total_params += summary[layer]['nb_params'] if 'trainable' in summary[layer]: - if summary[layer]['trainable'] == True: + if summary[layer]['trainable']: trainable_params += summary[layer]['nb_params'] print(line_new) print('================================================================') @@ -84,4 +84,4 @@ def hook(module, input, output): print('Trainable params: {0:,}'.format(trainable_params)) print('Non-trainable params: {0:,}'.format(total_params - trainable_params)) print('----------------------------------------------------------------') - # return summary \ No newline at end of file + # return summary diff --git a/vel/util/visdom.py b/vel/util/visdom.py index 38877987..6d30369a 100644 --- a/vel/util/visdom.py +++ b/vel/util/visdom.py @@ -118,4 +118,3 @@ def visdom_append_metrics(vis, metrics, first_epoch=False): }, update=update ) - From 2e9d926e2461e9ee96a011f3e0b9703c1ddf7b64 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 13 Jun 2019 12:37:08 -0700 Subject: [PATCH 042/162] Fixed tests after the refactoring. --- Makefile | 5 ++- .../rl/mujoco/ddpg/half_cheetah_ddpg.py | 14 +++---- setup.py | 2 +- vel/api/model.py | 2 +- vel/api/model_factory.py | 2 +- vel/command/lr_find_command.py | 4 +- vel/command/train_command.py | 2 +- vel/internal/parser.py | 2 +- vel/internal/provider.py | 4 +- vel/internal/test/test_parser.py | 2 +- vel/internal/test/test_provider.py | 36 +++++++++--------- vel/launcher.py | 2 +- vel/metric/__init__.py | 3 ++ vel/metric/accuracy.py | 2 +- vel/metric/loss_metric.py | 2 +- .../autoencoder/mnist_cnn_autoencoder.py | 4 +- vel/model/autoencoder/mnist_cnn_vae.py | 6 +-- vel/model/vision/cifar10_cnn_01.py | 4 +- vel/model/vision/cifar_resnet_v1.py | 4 +- vel/model/vision/cifar_resnet_v2.py | 10 +---- vel/model/vision/mnist_cnn_01.py | 4 +- vel/phase/cycle.py | 2 +- vel/rl/algo/distributional_dqn.py | 2 +- vel/rl/algo/policy_gradient/a2c.py | 4 +- vel/rl/algo/policy_gradient/acer.py | 2 +- .../buffer/backend/circular_buffer_backend.py | 2 +- .../test/test_circular_buffer_backend.py | 4 +- .../test_circular_vec_env_buffer_backend.py | 4 +- ...est_prioritized_circular_buffer_backend.py | 4 +- vel/rl/command/rl_train_command.py | 2 +- vel/rl/metrics.py | 2 +- .../model/backbone/double_noisy_nature_cnn.py | 2 +- vel/rl/model/backbone/noisy_nature_cnn.py | 2 +- vel/rl/model/q_noisy_model.py | 6 +-- vel/rl/model/q_rainbow_model.py | 6 +-- .../model/stochastic_policy_model_separate.py | 8 ++-- vel/rl/module/noise/eps_greedy.py | 4 +- vel/rl/module/noise/ou_noise.py | 4 +- .../q_distributional_noisy_dueling_head.py | 2 +- vel/rl/module/q_noisy_head.py | 2 +- vel/rl/module/test/test_action_head.py | 2 +- vel/rl/test/test_integration.py | 38 +++++++++---------- vel/schedule/linear.py | 2 +- vel/schedule/linear_and_constant.py | 2 +- 44 files changed, 112 insertions(+), 112 deletions(-) diff --git a/Makefile b/Makefile index 23022e26..3f2fc3d3 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: default test requpgrade lint +.PHONY: default test partest requpgrade lint default: test; @@ -36,6 +36,9 @@ serve-visdom: test: pytest . +partestc: + pytest -n 4 . + requirements.txt: pip-compile requirements.in diff --git a/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py b/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py index 6b0e200b..9c0a679b 100644 --- a/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py +++ b/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py @@ -2,22 +2,22 @@ import torch.optim from vel.api import TrainingInfo, EpochInfo -from vel.modules.input.normalize_observations import NormalizeObservationsFactory -from vel.rl.buffers.circular_replay_buffer import CircularReplayBuffer +from vel.module.input.normalize_observations import NormalizeObservationsFactory +from vel.rl.buffer.circular_replay_buffer import CircularReplayBuffer from vel.rl.env_roller.transition_replay_env_roller import TransitionReplayEnvRoller from vel.rl.metrics import EpisodeRewardMetric -from vel.rl.modules.noise.ou_noise import OuNoise +from vel.rl.module.noise.ou_noise import OuNoise from vel.storage.streaming.stdout import StdoutStreaming from vel.util.random import set_seed from vel.rl.env.mujoco import MujocoEnv -from vel.rl.models.deterministic_policy_model import DeterministicPolicyModelFactory -from vel.rl.models.backbone.mlp import MLPFactory -from vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer import ( +from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory +from vel.rl.model.backbone.mlp import MLPFactory +from vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer import ( BufferedOffPolicyIterationReinforcer, BufferedOffPolicyIterationReinforcerSettings ) from vel.rl.algo.policy_gradient.ddpg import DeepDeterministicPolicyGradient from vel.rl.vecenv.dummy import DummyVecEnvWrapper -from vel.optimizers.adam import AdamFactory +from vel.optimizer.adam import AdamFactory def half_cheetah_ddpg(): diff --git a/setup.py b/setup.py index e6290b56..8a8fa5b9 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ 'mongo': ['pymongo', 'dnspython'], 'gym': ['gym[atari,box2d,classic_control]'], 'mujoco': ['gym[mujoco,robotics]'], - 'dev': ['pytest', 'ipython', 'jupyter', 'pip-tools', 'flake8'], + 'dev': ['pytest', 'ipython', 'jupyter', 'pip-tools', 'flake8', 'pytest-xdist'], 'text': ['spacy'], 'all': ['visdom', 'pymongo', 'dnspython', 'gym[all]', 'pytest', 'spacy', 'ipython', 'jupyter'] }, diff --git a/vel/api/model.py b/vel/api/model.py index 699d8a45..9164442f 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -3,7 +3,7 @@ import vel.util.module_util as mu -from vel.metrics.loss_metric import Loss +from vel.metric.loss_metric import Loss from vel.util.summary import summary diff --git a/vel/api/model_factory.py b/vel/api/model_factory.py index e2e61896..eeb533a0 100644 --- a/vel/api/model_factory.py +++ b/vel/api/model_factory.py @@ -1,5 +1,5 @@ from .model import Model -from vel.internals.generic_factory import GenericFactory +from vel.internal.generic_factory import GenericFactory class ModelFactory: diff --git a/vel/command/lr_find_command.py b/vel/command/lr_find_command.py index f1de1e20..32544c5d 100644 --- a/vel/command/lr_find_command.py +++ b/vel/command/lr_find_command.py @@ -6,10 +6,10 @@ import numpy as np import tqdm -import vel.util.intepolate as interp +import vel.util.interpolate as interp from vel.api import Learner, TrainingInfo, EpochInfo, BatchInfo -from vel.api.metrics.averaging_metric import AveragingNamedMetric +from vel.metric.averaging_metric import AveragingNamedMetric class LrFindCommand: diff --git a/vel/command/train_command.py b/vel/command/train_command.py index f9b6afd2..2e708706 100644 --- a/vel/command/train_command.py +++ b/vel/command/train_command.py @@ -2,7 +2,7 @@ import vel.api as api -from vel.callbacks.time_tracker import TimeTracker +from vel.callback.time_tracker import TimeTracker class SimpleTrainCommand: diff --git a/vel/internal/parser.py b/vel/internal/parser.py index 6c79e10d..d26f000a 100644 --- a/vel/internal/parser.py +++ b/vel/internal/parser.py @@ -1,7 +1,7 @@ import os import yaml -from vel.exceptions import VelException +from vel.exception import VelException class Dummy: diff --git a/vel/internal/provider.py b/vel/internal/provider.py index 526181cc..d694ad24 100644 --- a/vel/internal/provider.py +++ b/vel/internal/provider.py @@ -1,8 +1,8 @@ import importlib import inspect -from vel.internals.parser import Variable -from vel.internals.generic_factory import GenericFactory +from vel.internal.parser import Variable +from vel.internal.generic_factory import GenericFactory class Provider: diff --git a/vel/internal/test/test_parser.py b/vel/internal/test/test_parser.py index 67fdf2d5..d2fc7053 100644 --- a/vel/internal/test/test_parser.py +++ b/vel/internal/test/test_parser.py @@ -1,6 +1,6 @@ import pytest -import vel.internals.parser as v +import vel.internal.parser as v @pytest.fixture diff --git a/vel/internal/test/test_provider.py b/vel/internal/test/test_provider.py index 4f49e675..7428756f 100644 --- a/vel/internal/test/test_provider.py +++ b/vel/internal/test/test_provider.py @@ -1,9 +1,9 @@ import os import pytest -import vel.internals.provider as v -import vel.internals.parser as p -import vel.exceptions as e +import vel.internal.provider as v +import vel.internal.parser as p +import vel.exception as e def data_function(a, b): @@ -37,17 +37,17 @@ def test_simple_injection(): 'a': 1, 'b': 2, 'one': { - 'name': 'vel.internals.tests.fixture_a' + 'name': 'vel.internal.test.fixture_a' }, 'two': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'a': 5, 'b': 6 }, 'three': { - 'name': 'vel.internals.tests.fixture_b', + 'name': 'vel.internal.test.fixture_b', 'd': 'd' } }) @@ -78,20 +78,20 @@ def test_parameter_resolution(): 'a': 1, 'b': p.Parameter("xxx"), 'one': { - 'name': 'vel.internals.tests.fixture_a' + 'name': 'vel.internal.test.fixture_a' }, 'two': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'b': p.Parameter('yyy') }, 'three': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'b': p.Parameter('yyy', 7) }, 'four': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'b': p.EnvironmentVariable('TEST_VAR') }, @@ -120,20 +120,20 @@ def test_render_configuration(): 'a': 1, 'b': p.Parameter("xxx"), 'one': { - 'name': 'vel.internals.tests.fixture_a' + 'name': 'vel.internal.test.fixture_a' }, 'two': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'b': p.Parameter('yyy', 5) }, 'three': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'b': p.Parameter('yyy', 7) }, 'four': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'b': p.EnvironmentVariable('TEST_VAR') }, @@ -145,20 +145,20 @@ def test_render_configuration(): 'a': 1, 'b': 5, 'one': { - 'name': 'vel.internals.tests.fixture_a' + 'name': 'vel.internal.test.fixture_a' }, 'two': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'b': 5 }, 'three': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'b': 7 }, 'four': { - 'name': 'vel.internals.tests.fixture_a', + 'name': 'vel.internal.test.fixture_a', 'b': '10' }, } diff --git a/vel/launcher.py b/vel/launcher.py index e4c68114..3f800638 100644 --- a/vel/launcher.py +++ b/vel/launcher.py @@ -4,7 +4,7 @@ import sys from vel.api.model_config import ModelConfig -from vel.internals.parser import Parser +from vel.internal.parser import Parser def main(): diff --git a/vel/metric/__init__.py b/vel/metric/__init__.py index e69de29b..7bb2fe79 100644 --- a/vel/metric/__init__.py +++ b/vel/metric/__init__.py @@ -0,0 +1,3 @@ +from .base_metric import BaseMetric # noqa +from .averaging_metric import AveragingMetric, AveragingNamedMetric, AveragingSupervisedMetric # noqa +from .value_metric import ValueMetric # noqa diff --git a/vel/metric/accuracy.py b/vel/metric/accuracy.py index 8cb332bf..442f8470 100644 --- a/vel/metric/accuracy.py +++ b/vel/metric/accuracy.py @@ -1,4 +1,4 @@ -from vel.api.metrics.averaging_metric import AveragingSupervisedMetric +from vel.metric.averaging_metric import AveragingSupervisedMetric class Accuracy(AveragingSupervisedMetric): diff --git a/vel/metric/loss_metric.py b/vel/metric/loss_metric.py index 8de3707d..1e02ce4d 100644 --- a/vel/metric/loss_metric.py +++ b/vel/metric/loss_metric.py @@ -1,4 +1,4 @@ -from vel.api.metrics.averaging_metric import AveragingMetric +from vel.metric.averaging_metric import AveragingMetric class Loss(AveragingMetric): diff --git a/vel/model/autoencoder/mnist_cnn_autoencoder.py b/vel/model/autoencoder/mnist_cnn_autoencoder.py index fa90e4d5..0bb3197e 100644 --- a/vel/model/autoencoder/mnist_cnn_autoencoder.py +++ b/vel/model/autoencoder/mnist_cnn_autoencoder.py @@ -7,8 +7,8 @@ import vel.util.network as net_util from vel.api import LossFunctionModel, ModelFactory -from vel.metrics.loss_metric import Loss -from vel.modules.layers import Flatten, Reshape +from vel.metric.loss_metric import Loss +from vel.module.layers import Flatten, Reshape class MnistCnnAutoencoder(LossFunctionModel): diff --git a/vel/model/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py index 6765c874..b678a9de 100644 --- a/vel/model/autoencoder/mnist_cnn_vae.py +++ b/vel/model/autoencoder/mnist_cnn_vae.py @@ -8,9 +8,9 @@ import vel.util.network as net_util from vel.api import SupervisedModel, ModelFactory -from vel.api.metrics import AveragingNamedMetric -from vel.metrics.loss_metric import Loss -from vel.modules.layers import Flatten, Reshape +from vel.metric.averaging_metric import AveragingNamedMetric +from vel.metric.loss_metric import Loss +from vel.module.layers import Flatten, Reshape class MnistCnnVAE(SupervisedModel): diff --git a/vel/model/vision/cifar10_cnn_01.py b/vel/model/vision/cifar10_cnn_01.py index 50dc1328..3f3551af 100644 --- a/vel/model/vision/cifar10_cnn_01.py +++ b/vel/model/vision/cifar10_cnn_01.py @@ -9,8 +9,8 @@ import torch.nn.functional as F from vel.api import LossFunctionModel, ModelFactory -from vel.metrics.loss_metric import Loss -from vel.metrics.accuracy import Accuracy +from vel.metric.loss_metric import Loss +from vel.metric.accuracy import Accuracy class Net(LossFunctionModel): diff --git a/vel/model/vision/cifar_resnet_v1.py b/vel/model/vision/cifar_resnet_v1.py index fef562c8..2a19ffa8 100644 --- a/vel/model/vision/cifar_resnet_v1.py +++ b/vel/model/vision/cifar_resnet_v1.py @@ -74,8 +74,8 @@ def loss_value(self, x_data, y_true, y_pred): def metrics(self): """ Set of metrics for this model """ - from vel.metrics.loss_metric import Loss - from vel.metrics.accuracy import Accuracy + from vel.metric.loss_metric import Loss + from vel.metric.accuracy import Accuracy return [Loss(), Accuracy()] diff --git a/vel/model/vision/cifar_resnet_v2.py b/vel/model/vision/cifar_resnet_v2.py index 3bc03b52..2d44ab01 100644 --- a/vel/model/vision/cifar_resnet_v2.py +++ b/vel/model/vision/cifar_resnet_v2.py @@ -76,16 +76,10 @@ def loss_value(self, x_data, y_true, y_pred): def metrics(self): """ Set of metrics for this model """ - from vel.metrics.loss_metric import Loss - from vel.metrics.accuracy import Accuracy + from vel.metric.loss_metric import Loss + from vel.metric.accuracy import Accuracy return [Loss(), Accuracy()] - def summary(self): - """ Print model summary """ - # import torchsummary - # torchsummary.summary(self, input_size=(3, 32, 32)) - print(self) - def create(blocks, mode='basic', inplanes=16, divisor=4, num_classes=1000): """ Vel factory function """ diff --git a/vel/model/vision/mnist_cnn_01.py b/vel/model/vision/mnist_cnn_01.py index 08472f4d..513f33c0 100644 --- a/vel/model/vision/mnist_cnn_01.py +++ b/vel/model/vision/mnist_cnn_01.py @@ -10,8 +10,8 @@ from vel.api import LossFunctionModel, ModelFactory -from vel.metrics.loss_metric import Loss -from vel.metrics.accuracy import Accuracy +from vel.metric.loss_metric import Loss +from vel.metric.accuracy import Accuracy class Net(LossFunctionModel): diff --git a/vel/phase/cycle.py b/vel/phase/cycle.py index b1862323..9b38b8be 100644 --- a/vel/phase/cycle.py +++ b/vel/phase/cycle.py @@ -1,6 +1,6 @@ import numpy as np -import vel.util.intepolate as interp +import vel.util.interpolate as interp from vel.api import BatchInfo, EpochInfo, TrainingInfo, Callback, TrainPhase diff --git a/vel/rl/algo/distributional_dqn.py b/vel/rl/algo/distributional_dqn.py index dfa050ec..4b05ecf2 100644 --- a/vel/rl/algo/distributional_dqn.py +++ b/vel/rl/algo/distributional_dqn.py @@ -2,7 +2,7 @@ import torch.nn.utils from vel.api import ModelFactory -from vel.api.metrics.averaging_metric import AveragingNamedMetric +from vel.metric.averaging_metric import AveragingNamedMetric from vel.rl.api import OptimizerAlgoBase diff --git a/vel/rl/algo/policy_gradient/a2c.py b/vel/rl/algo/policy_gradient/a2c.py index 783f954a..86485184 100644 --- a/vel/rl/algo/policy_gradient/a2c.py +++ b/vel/rl/algo/policy_gradient/a2c.py @@ -1,8 +1,8 @@ import torch import torch.nn.functional as F -from vel.api.metrics.averaging_metric import AveragingNamedMetric -from vel.math.functions import explained_variance +from vel.metric.averaging_metric import AveragingNamedMetric +from vel.math.function import explained_variance from vel.rl.api import OptimizerAlgoBase, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae diff --git a/vel/rl/algo/policy_gradient/acer.py b/vel/rl/algo/policy_gradient/acer.py index 82cd3d9a..9426957d 100644 --- a/vel/rl/algo/policy_gradient/acer.py +++ b/vel/rl/algo/policy_gradient/acer.py @@ -1,7 +1,7 @@ import torch import torch.nn.functional as F -from vel.api.metrics.averaging_metric import AveragingNamedMetric +from vel.metric.averaging_metric import AveragingNamedMetric from vel.rl.api import Trajectories, OptimizerAlgoBase diff --git a/vel/rl/buffer/backend/circular_buffer_backend.py b/vel/rl/buffer/backend/circular_buffer_backend.py index a328d7fa..7be43246 100644 --- a/vel/rl/buffer/backend/circular_buffer_backend.py +++ b/vel/rl/buffer/backend/circular_buffer_backend.py @@ -1,7 +1,7 @@ import gym import numpy as np -from vel.exceptions import VelException +from vel.exception import VelException class CircularBufferBackend: diff --git a/vel/rl/buffer/test/test_circular_buffer_backend.py b/vel/rl/buffer/test/test_circular_buffer_backend.py index 53031de8..6cf7c9fc 100644 --- a/vel/rl/buffer/test/test_circular_buffer_backend.py +++ b/vel/rl/buffer/test/test_circular_buffer_backend.py @@ -4,8 +4,8 @@ import numpy.testing as nt import pytest -from vel.exceptions import VelException -from vel.rl.buffers.backend.circular_buffer_backend import CircularBufferBackend +from vel.exception import VelException +from vel.rl.buffer.backend.circular_buffer_backend import CircularBufferBackend def get_half_filled_buffer(): diff --git a/vel/rl/buffer/test/test_circular_vec_env_buffer_backend.py b/vel/rl/buffer/test/test_circular_vec_env_buffer_backend.py index 7b45a77e..53336980 100644 --- a/vel/rl/buffer/test/test_circular_vec_env_buffer_backend.py +++ b/vel/rl/buffer/test/test_circular_vec_env_buffer_backend.py @@ -4,8 +4,8 @@ import numpy.testing as nt import pytest -from vel.exceptions import VelException -from vel.rl.buffers.circular_replay_buffer import CircularVecEnvBufferBackend +from vel.exception import VelException +from vel.rl.buffer.circular_replay_buffer import CircularVecEnvBufferBackend def get_half_filled_buffer(frame_history=1): diff --git a/vel/rl/buffer/test/test_prioritized_circular_buffer_backend.py b/vel/rl/buffer/test/test_prioritized_circular_buffer_backend.py index d9907ebc..f603d65e 100644 --- a/vel/rl/buffer/test/test_prioritized_circular_buffer_backend.py +++ b/vel/rl/buffer/test/test_prioritized_circular_buffer_backend.py @@ -5,8 +5,8 @@ import numpy.testing as nt import pytest -from vel.exceptions import VelException -from vel.rl.buffers.backend.prioritized_buffer_backend import PrioritizedCircularBufferBackend +from vel.exception import VelException +from vel.rl.buffer.backend.prioritized_buffer_backend import PrioritizedCircularBufferBackend def get_halfempty_buffer_with_dones(): diff --git a/vel/rl/command/rl_train_command.py b/vel/rl/command/rl_train_command.py index e1c0d9fb..0e852826 100644 --- a/vel/rl/command/rl_train_command.py +++ b/vel/rl/command/rl_train_command.py @@ -2,7 +2,7 @@ from vel.api import ModelConfig, EpochInfo, TrainingInfo, BatchInfo, OptimizerFactory, Storage, Callback from vel.rl.api import ReinforcerFactory -from vel.callbacks.time_tracker import TimeTracker +from vel.callback.time_tracker import TimeTracker import vel.openai.baselines.logger as openai_logger diff --git a/vel/rl/metrics.py b/vel/rl/metrics.py index 3f5be7b2..d41cf25a 100644 --- a/vel/rl/metrics.py +++ b/vel/rl/metrics.py @@ -4,7 +4,7 @@ import torch from vel.api import BatchInfo -from vel.api.metrics import BaseMetric, AveragingMetric, ValueMetric +from vel.metric import BaseMetric, AveragingMetric, ValueMetric class FramesMetric(ValueMetric): diff --git a/vel/rl/model/backbone/double_noisy_nature_cnn.py b/vel/rl/model/backbone/double_noisy_nature_cnn.py index ca6626b5..a55fc8ed 100644 --- a/vel/rl/model/backbone/double_noisy_nature_cnn.py +++ b/vel/rl/model/backbone/double_noisy_nature_cnn.py @@ -13,7 +13,7 @@ import vel.util.network as net_util from vel.api import LinearBackboneModel, ModelFactory -from vel.rl.modules.noisy_linear import NoisyLinear +from vel.rl.module.noisy_linear import NoisyLinear class DoubleNoisyNatureCnn(LinearBackboneModel): diff --git a/vel/rl/model/backbone/noisy_nature_cnn.py b/vel/rl/model/backbone/noisy_nature_cnn.py index 7f5e3b64..d258543e 100644 --- a/vel/rl/model/backbone/noisy_nature_cnn.py +++ b/vel/rl/model/backbone/noisy_nature_cnn.py @@ -13,7 +13,7 @@ import vel.util.network as net_util from vel.api import LinearBackboneModel, ModelFactory -from vel.rl.modules.noisy_linear import NoisyLinear +from vel.rl.module.noisy_linear import NoisyLinear class NoisyNatureCnn(LinearBackboneModel): diff --git a/vel/rl/model/q_noisy_model.py b/vel/rl/model/q_noisy_model.py index 9dc73e6e..b2d747bb 100644 --- a/vel/rl/model/q_noisy_model.py +++ b/vel/rl/model/q_noisy_model.py @@ -2,10 +2,10 @@ import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, RlModel, Evaluator -from vel.rl.models.q_model import QModelEvaluator -from vel.rl.modules.q_noisy_head import QNoisyHead +from vel.rl.model.q_model import QModelEvaluator +from vel.rl.module.q_noisy_head import QNoisyHead class NoisyQModel(RlModel): diff --git a/vel/rl/model/q_rainbow_model.py b/vel/rl/model/q_rainbow_model.py index 3e5aea33..d9b9dfbf 100644 --- a/vel/rl/model/q_rainbow_model.py +++ b/vel/rl/model/q_rainbow_model.py @@ -2,10 +2,10 @@ import typing from vel.api import LinearBackboneModel, Model, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Evaluator -from vel.rl.models.q_distributional_model import QDistributionalModelEvaluator -from vel.rl.modules.q_distributional_noisy_dueling_head import QDistributionalNoisyDuelingHead +from vel.rl.model.q_distributional_model import QDistributionalModelEvaluator +from vel.rl.module.q_distributional_noisy_dueling_head import QDistributionalNoisyDuelingHead class QRainbowModel(Model): diff --git a/vel/rl/model/stochastic_policy_model_separate.py b/vel/rl/model/stochastic_policy_model_separate.py index 50ab5ffd..7612fde3 100644 --- a/vel/rl/model/stochastic_policy_model_separate.py +++ b/vel/rl/model/stochastic_policy_model_separate.py @@ -3,11 +3,11 @@ import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.modules.input.identity import IdentityFactory +from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, RlModel, Evaluator -from vel.rl.modules.action_head import ActionHead -from vel.rl.modules.value_head import ValueHead -from vel.rl.models.stochastic_policy_model import StochasticPolicyEvaluator +from vel.rl.module.action_head import ActionHead +from vel.rl.module.value_head import ValueHead +from vel.rl.model.stochastic_policy_model import StochasticPolicyEvaluator class StochasticPolicyModelSeparate(RlModel): diff --git a/vel/rl/module/noise/eps_greedy.py b/vel/rl/module/noise/eps_greedy.py index b5e6f0c9..5764a489 100644 --- a/vel/rl/module/noise/eps_greedy.py +++ b/vel/rl/module/noise/eps_greedy.py @@ -4,8 +4,8 @@ import torch.nn as nn from vel.api import Schedule -from vel.internals.generic_factory import GenericFactory -from vel.schedules.constant import ConstantSchedule +from vel.internal.generic_factory import GenericFactory +from vel.schedule.constant import ConstantSchedule class EpsGreedy(nn.Module): diff --git a/vel/rl/module/noise/ou_noise.py b/vel/rl/module/noise/ou_noise.py index be6ea0d8..a87f9786 100644 --- a/vel/rl/module/noise/ou_noise.py +++ b/vel/rl/module/noise/ou_noise.py @@ -2,8 +2,8 @@ import numpy as np import torch.nn as nn -from vel.math.processes import OrnsteinUhlenbeckNoiseProcess -from vel.internals.generic_factory import GenericFactory +from vel.math.process import OrnsteinUhlenbeckNoiseProcess +from vel.internal.generic_factory import GenericFactory class OuNoise(nn.Module): diff --git a/vel/rl/module/q_distributional_noisy_dueling_head.py b/vel/rl/module/q_distributional_noisy_dueling_head.py index 4c5c30bb..3e0f2794 100644 --- a/vel/rl/module/q_distributional_noisy_dueling_head.py +++ b/vel/rl/module/q_distributional_noisy_dueling_head.py @@ -5,7 +5,7 @@ import torch.nn.functional as F -from vel.rl.modules.noisy_linear import NoisyLinear +from vel.rl.module.noisy_linear import NoisyLinear class QDistributionalNoisyDuelingHead(nn.Module): diff --git a/vel/rl/module/q_noisy_head.py b/vel/rl/module/q_noisy_head.py index 63f510d6..8b171e1c 100644 --- a/vel/rl/module/q_noisy_head.py +++ b/vel/rl/module/q_noisy_head.py @@ -2,7 +2,7 @@ import gym.spaces as spaces -from vel.rl.modules.noisy_linear import NoisyLinear +from vel.rl.module.noisy_linear import NoisyLinear class QNoisyHead(nn.Module): diff --git a/vel/rl/module/test/test_action_head.py b/vel/rl/module/test/test_action_head.py index 5e3ff74c..6dc22e06 100644 --- a/vel/rl/module/test/test_action_head.py +++ b/vel/rl/module/test/test_action_head.py @@ -7,7 +7,7 @@ import torch.nn.functional as F import torch.distributions as d -from vel.rl.modules.action_head import DiagGaussianActionHead, CategoricalActionHead +from vel.rl.module.action_head import DiagGaussianActionHead, CategoricalActionHead def test_sample_diag_gaussian(): diff --git a/vel/rl/test/test_integration.py b/vel/rl/test/test_integration.py index 42aac5e3..2f51b419 100644 --- a/vel/rl/test/test_integration.py +++ b/vel/rl/test/test_integration.py @@ -1,19 +1,19 @@ import torch import torch.optim as optim -from vel.modules.input.image_to_tensor import ImageToTensorFactory -from vel.modules.input.normalize_observations import NormalizeObservationsFactory -from vel.rl.buffers.circular_replay_buffer import CircularReplayBuffer -from vel.rl.buffers.prioritized_circular_replay_buffer import PrioritizedCircularReplayBuffer -from vel.rl.commands.rl_train_command import FrameTracker +from vel.module.input.image_to_tensor import ImageToTensorFactory +from vel.module.input.normalize_observations import NormalizeObservationsFactory +from vel.rl.buffer.circular_replay_buffer import CircularReplayBuffer +from vel.rl.buffer.prioritized_circular_replay_buffer import PrioritizedCircularReplayBuffer +from vel.rl.command.rl_train_command import FrameTracker from vel.rl.env_roller.step_env_roller import StepEnvRoller from vel.rl.env_roller.trajectory_replay_env_roller import TrajectoryReplayEnvRoller from vel.rl.env_roller.transition_replay_env_roller import TransitionReplayEnvRoller from vel.rl.metrics import EpisodeRewardMetric -from vel.rl.modules.noise.eps_greedy import EpsGreedy -from vel.rl.modules.noise.ou_noise import OuNoise -from vel.schedules.linear import LinearSchedule -from vel.schedules.linear_and_constant import LinearAndConstantSchedule +from vel.rl.module.noise.eps_greedy import EpsGreedy +from vel.rl.module.noise.ou_noise import OuNoise +from vel.schedule.linear import LinearSchedule +from vel.schedule.linear_and_constant import LinearAndConstantSchedule from vel.util.random import set_seed from vel.rl.env.classic_atari import ClassicAtariEnv @@ -21,24 +21,24 @@ from vel.rl.vecenv.subproc import SubprocVecEnvWrapper from vel.rl.vecenv.dummy import DummyVecEnvWrapper -from vel.rl.models.stochastic_policy_model import StochasticPolicyModelFactory -from vel.rl.models.q_stochastic_policy_model import QStochasticPolicyModelFactory -from vel.rl.models.q_model import QModelFactory -from vel.rl.models.deterministic_policy_model import DeterministicPolicyModelFactory -from vel.rl.models.stochastic_policy_model_separate import StochasticPolicyModelSeparateFactory +from vel.rl.model.stochastic_policy_model import StochasticPolicyModelFactory +from vel.rl.model.q_stochastic_policy_model import QStochasticPolicyModelFactory +from vel.rl.model.q_model import QModelFactory +from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory +from vel.rl.model.stochastic_policy_model_separate import StochasticPolicyModelSeparateFactory -from vel.rl.models.backbone.nature_cnn import NatureCnnFactory -from vel.rl.models.backbone.mlp import MLPFactory +from vel.rl.model.backbone.nature_cnn import NatureCnnFactory +from vel.rl.model.backbone.mlp import MLPFactory -from vel.rl.reinforcers.on_policy_iteration_reinforcer import ( +from vel.rl.reinforcer.on_policy_iteration_reinforcer import ( OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings ) -from vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer import ( +from vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer import ( BufferedOffPolicyIterationReinforcer, BufferedOffPolicyIterationReinforcerSettings ) -from vel.rl.reinforcers.buffered_mixed_policy_iteration_reinforcer import ( +from vel.rl.reinforcer.buffered_mixed_policy_iteration_reinforcer import ( BufferedMixedPolicyIterationReinforcer, BufferedMixedPolicyIterationReinforcerSettings ) diff --git a/vel/schedule/linear.py b/vel/schedule/linear.py index a3f88c4f..58ca5a23 100644 --- a/vel/schedule/linear.py +++ b/vel/schedule/linear.py @@ -1,4 +1,4 @@ -import vel.util.intepolate as interpolate +import vel.util.interpolate as interpolate from vel.api import Schedule diff --git a/vel/schedule/linear_and_constant.py b/vel/schedule/linear_and_constant.py index f04b9e4a..fecf5d19 100644 --- a/vel/schedule/linear_and_constant.py +++ b/vel/schedule/linear_and_constant.py @@ -1,4 +1,4 @@ -import vel.util.intepolate as interpolate +import vel.util.interpolate as interpolate from vel.api import Schedule From 9150db17d094a3fb4638f70a4accd4c3034f4ab6 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Sat, 15 Jun 2019 21:25:06 -0700 Subject: [PATCH 043/162] Renaming models to policies. --- .velproject.yaml | 12 +- .../rl/atari/a2c/airraid_a2c.yaml | 69 -------- .../rl/atari/a2c/breakout_a2c.yaml | 69 -------- .../rl/atari/a2c/freeway_a2c.yaml | 69 -------- examples-configs/rl/atari/a2c/pacman_a2c.yaml | 69 -------- .../rl/atari/a2c/pitfall_a2c.yaml | 69 -------- .../rl/atari/a2c/pong_a2c_lstm.yaml | 71 -------- examples-configs/rl/atari/a2c/qbert_a2c.yaml | 70 -------- .../rl/atari/a2c/space_invaders_a2c.yaml | 69 -------- .../rl/atari/acer/breakout_acer.yaml | 82 --------- .../acer/breakout_acer_trust_region.yaml | 83 ---------- .../acer/seaquest_acer_trust_region.yaml | 83 ---------- .../rl/atari/acer/space_invaders_acer.yaml | 82 --------- .../space_invaders_acer_trust_region.yaml | 83 ---------- .../{a2c/pong_a2c.yaml => atari_a2c.yaml} | 25 +-- ...kout_a2c_lstm.yaml => atari_a2c_lstm.yaml} | 4 +- ...rmsprop.yaml => atari_a2c_tf_rmsprop.yaml} | 4 +- .../beam_rider_acer.yaml => atari_acer.yaml} | 4 +- ...gion.yaml => atari_acer_trust_region.yaml} | 4 +- .../{ppo/breakout_ppo.yaml => atari_ppo.yaml} | 27 +-- ...eakout_ppo_gru.yaml => atari_ppo_gru.yaml} | 22 +-- .../breakout_trpo.yaml => atari_trpo.yaml} | 5 +- .../{breakout_ddqn.yaml => atari_ddqn.yaml} | 4 +- ...nal.yaml => atari_dqn_distributional.yaml} | 4 +- ...eakout_dqn_raw.yaml => atari_dqn_raw.yaml} | 4 +- ...ling_ddqn.yaml => atari_dueling_ddqn.yaml} | 4 +- ...ml => atari_dueling_ddqn_prioritized.yaml} | 4 +- .../dqn/seaquest_dqn_distributional.yaml | 90 ---------- .../rl/atari/dqn/seaquest_dqn_raw.yaml | 86 ---------- .../dqn_rainbow_param/asterix_rp_dqn_raw.yaml | 89 ---------- .../atari_rainbow.yaml} | 4 +- ....yaml => atari_rp_dqn_distributional.yaml} | 4 +- ...isynet.yaml => atari_rp_dqn_noisynet.yaml} | 5 +- ...dqn_nstep.yaml => atari_rp_dqn_nstep.yaml} | 5 +- ..._rp_dqn_raw.yaml => atari_rp_dqn_raw.yaml} | 5 +- .../atlantis_rp_dqn_raw.yaml | 88 ---------- examples-configs/rl/atari/ppo/enduro_ppo.yaml | 85 ---------- examples-configs/rl/atari/ppo/qbert_ppo.yaml | 81 --------- examples-scripts/rl/atari/a2c/breakout_a2c.py | 10 +- .../rl/atari/a2c/breakout_a2c_evaluate.py | 2 +- .../rl/mujoco/ddpg/half_cheetah_ddpg.py | 2 +- vel/api/model.py | 4 + vel/model/vision/cifar_resnet_v1.py | 2 +- vel/model/vision/cifar_resnet_v2.py | 2 +- vel/module/input/one_hot_encoding.py | 2 +- vel/module/rnn_cell.py | 4 + vel/rl/algo/policy_gradient/trpo.py | 2 +- vel/rl/api/__init__.py | 2 +- vel/rl/api/algo_base.py | 4 +- vel/rl/api/env_roller.py | 15 +- vel/rl/api/model.py | 50 ------ vel/rl/api/policy.py | 22 +++ vel/rl/api/rollout.py | 2 +- vel/rl/{model => backbone}/__init__.py | 0 .../{model => }/backbone/double_nature_cnn.py | 0 .../backbone/double_noisy_nature_cnn.py | 0 vel/rl/{model => }/backbone/lstm.py | 0 vel/rl/{model => }/backbone/mlp.py | 0 vel/rl/{model => }/backbone/nature_cnn.py | 0 vel/rl/{model => }/backbone/nature_cnn_rnn.py | 11 +- .../{model => }/backbone/nature_cnn_small.py | 0 .../{model => }/backbone/noisy_nature_cnn.py | 0 vel/rl/env_roller/step_env_roller.py | 42 ++--- ...tion_head.py => stochastic_action_head.py} | 11 +- vel/rl/{model/backbone => policy}/__init__.py | 0 vel/rl/policy/purgatory/__init__.py | 0 .../purgatory/deterministic_policy.py} | 0 .../purgatory/q_distributional_policy.py} | 0 .../purgatory/q_dueling_policy.py} | 0 vel/rl/{model => policy/purgatory}/q_model.py | 0 .../purgatory}/q_noisy_model.py | 0 .../purgatory}/q_rainbow_model.py | 0 .../purgatory}/q_stochastic_policy_model.py | 8 +- .../purgatory/stochastic_policy.py} | 8 +- .../stochastic_policy_model_separate.py | 8 +- .../purgatory/stochastic_rnn_policy.py} | 8 +- vel/rl/policy/stochastic_policy.py | 121 ++++++++++++++ vel/rl/policy/stochastic_rnn_policy.py | 156 ++++++++++++++++++ ...fered_mixed_policy_iteration_reinforcer.py | 4 +- ...uffered_off_policy_iteration_reinforcer.py | 2 +- .../on_policy_iteration_reinforcer.py | 26 +-- vel/rl/test/test_integration.py | 4 +- vel/rl/util/actor.py | 36 ++++ vel/storage/streaming/tensorboard.py | 40 +++++ vel/util/tensor_util.py | 14 ++ 85 files changed, 562 insertions(+), 1699 deletions(-) delete mode 100644 examples-configs/rl/atari/a2c/airraid_a2c.yaml delete mode 100644 examples-configs/rl/atari/a2c/breakout_a2c.yaml delete mode 100644 examples-configs/rl/atari/a2c/freeway_a2c.yaml delete mode 100644 examples-configs/rl/atari/a2c/pacman_a2c.yaml delete mode 100644 examples-configs/rl/atari/a2c/pitfall_a2c.yaml delete mode 100644 examples-configs/rl/atari/a2c/pong_a2c_lstm.yaml delete mode 100644 examples-configs/rl/atari/a2c/qbert_a2c.yaml delete mode 100644 examples-configs/rl/atari/a2c/space_invaders_a2c.yaml delete mode 100644 examples-configs/rl/atari/acer/breakout_acer.yaml delete mode 100644 examples-configs/rl/atari/acer/breakout_acer_trust_region.yaml delete mode 100644 examples-configs/rl/atari/acer/seaquest_acer_trust_region.yaml delete mode 100644 examples-configs/rl/atari/acer/space_invaders_acer.yaml delete mode 100644 examples-configs/rl/atari/acer/space_invaders_acer_trust_region.yaml rename examples-configs/rl/atari/{a2c/pong_a2c.yaml => atari_a2c.yaml} (65%) rename examples-configs/rl/atari/{a2c/breakout_a2c_lstm.yaml => atari_a2c_lstm.yaml} (95%) rename examples-configs/rl/atari/{a2c/breakout_a2c_tf_rmsprop.yaml => atari_a2c_tf_rmsprop.yaml} (94%) rename examples-configs/rl/atari/{acer/beam_rider_acer.yaml => atari_acer.yaml} (96%) rename examples-configs/rl/atari/{acer/beam_rider_acer_trust_region.yaml => atari_acer_trust_region.yaml} (96%) rename examples-configs/rl/atari/{ppo/breakout_ppo.yaml => atari_ppo.yaml} (70%) rename examples-configs/rl/atari/{ppo/breakout_ppo_gru.yaml => atari_ppo_gru.yaml} (74%) rename examples-configs/rl/atari/{trpo/breakout_trpo.yaml => atari_trpo.yaml} (96%) rename examples-configs/rl/atari/dqn/{breakout_ddqn.yaml => atari_ddqn.yaml} (96%) rename examples-configs/rl/atari/dqn/{breakout_dqn_distributional.yaml => atari_dqn_distributional.yaml} (96%) rename examples-configs/rl/atari/dqn/{breakout_dqn_raw.yaml => atari_dqn_raw.yaml} (96%) rename examples-configs/rl/atari/dqn/{breakout_dueling_ddqn.yaml => atari_dueling_ddqn.yaml} (96%) rename examples-configs/rl/atari/dqn/{breakout_dueling_ddqn_prioritized.yaml => atari_dueling_ddqn_prioritized.yaml} (96%) delete mode 100644 examples-configs/rl/atari/dqn/seaquest_dqn_distributional.yaml delete mode 100644 examples-configs/rl/atari/dqn/seaquest_dqn_raw.yaml delete mode 100644 examples-configs/rl/atari/dqn_rainbow_param/asterix_rp_dqn_raw.yaml rename examples-configs/rl/atari/{rainbow/breakout_rainbow.yaml => dqn_rainbow_param/atari_rainbow.yaml} (97%) rename examples-configs/rl/atari/dqn_rainbow_param/{asterix_rp_dqn_distributional.yaml => atari_rp_dqn_distributional.yaml} (96%) rename examples-configs/rl/atari/dqn_rainbow_param/{asteroids_rp_dqn_noisynet.yaml => atari_rp_dqn_noisynet.yaml} (96%) rename examples-configs/rl/atari/dqn_rainbow_param/{atlantis_rp_dqn_nstep.yaml => atari_rp_dqn_nstep.yaml} (96%) rename examples-configs/rl/atari/dqn_rainbow_param/{asteroids_rp_dqn_raw.yaml => atari_rp_dqn_raw.yaml} (96%) delete mode 100644 examples-configs/rl/atari/dqn_rainbow_param/atlantis_rp_dqn_raw.yaml delete mode 100644 examples-configs/rl/atari/ppo/enduro_ppo.yaml delete mode 100644 examples-configs/rl/atari/ppo/qbert_ppo.yaml delete mode 100644 vel/rl/api/model.py create mode 100644 vel/rl/api/policy.py rename vel/rl/{model => backbone}/__init__.py (100%) rename vel/rl/{model => }/backbone/double_nature_cnn.py (100%) rename vel/rl/{model => }/backbone/double_noisy_nature_cnn.py (100%) rename vel/rl/{model => }/backbone/lstm.py (100%) rename vel/rl/{model => }/backbone/mlp.py (100%) rename vel/rl/{model => }/backbone/nature_cnn.py (100%) rename vel/rl/{model => }/backbone/nature_cnn_rnn.py (83%) rename vel/rl/{model => }/backbone/nature_cnn_small.py (100%) rename vel/rl/{model => }/backbone/noisy_nature_cnn.py (100%) rename vel/rl/module/{action_head.py => stochastic_action_head.py} (96%) rename vel/rl/{model/backbone => policy}/__init__.py (100%) create mode 100644 vel/rl/policy/purgatory/__init__.py rename vel/rl/{model/deterministic_policy_model.py => policy/purgatory/deterministic_policy.py} (100%) rename vel/rl/{model/q_distributional_model.py => policy/purgatory/q_distributional_policy.py} (100%) rename vel/rl/{model/q_dueling_model.py => policy/purgatory/q_dueling_policy.py} (100%) rename vel/rl/{model => policy/purgatory}/q_model.py (100%) rename vel/rl/{model => policy/purgatory}/q_noisy_model.py (100%) rename vel/rl/{model => policy/purgatory}/q_rainbow_model.py (100%) rename vel/rl/{model => policy/purgatory}/q_stochastic_policy_model.py (94%) rename vel/rl/{model/stochastic_policy_model.py => policy/purgatory/stochastic_policy.py} (94%) rename vel/rl/{model => policy/purgatory}/stochastic_policy_model_separate.py (94%) rename vel/rl/{model/stochastic_policy_rnn_model.py => policy/purgatory/stochastic_rnn_policy.py} (95%) create mode 100644 vel/rl/policy/stochastic_policy.py create mode 100644 vel/rl/policy/stochastic_rnn_policy.py create mode 100644 vel/rl/util/actor.py create mode 100644 vel/storage/streaming/tensorboard.py diff --git a/.velproject.yaml b/.velproject.yaml index 5921e596..2b6bbabd 100644 --- a/.velproject.yaml +++ b/.velproject.yaml @@ -2,12 +2,16 @@ storage: name: vel.storage.classic backend: - name: vel.storage.backend.mongodb - uri: 'mongodb://localhost:27017/' - database: deep_learning + name: vel.storage.backend.dummy + +# Other potential setting +# name: vel.storage.backend.mongodb +# uri: 'mongodb://localhost:27017/' +# database: deep_learning streaming: - - name: vel.storage.streaming.visdom + - name: vel.storage.streaming.tensorboard +# - name: vel.storage.streaming.visdom - name: vel.storage.streaming.stdout diff --git a/examples-configs/rl/atari/a2c/airraid_a2c.yaml b/examples-configs/rl/atari/a2c/airraid_a2c.yaml deleted file mode 100644 index 6869f6ad..00000000 --- a/examples-configs/rl/atari/a2c/airraid_a2c.yaml +++ /dev/null @@ -1,69 +0,0 @@ -name: 'airraid_a2c' - - -env: - name: vel.rl.env.classic_atari - game: 'AirRaidNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - number_of_steps: 5 # How many environment steps go into a single batch - parallel_envs: 16 # How many environments to run in parallel - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 100 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'airraid_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/rl/atari/a2c/breakout_a2c.yaml b/examples-configs/rl/atari/a2c/breakout_a2c.yaml deleted file mode 100644 index 77b7593b..00000000 --- a/examples-configs/rl/atari/a2c/breakout_a2c.yaml +++ /dev/null @@ -1,69 +0,0 @@ -name: 'breakout_a2c' - - -env: - name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - number_of_steps: 5 # How many environment steps go into a single batch - parallel_envs: 16 # How many environments to run in parallel - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 100 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'breakout_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/rl/atari/a2c/freeway_a2c.yaml b/examples-configs/rl/atari/a2c/freeway_a2c.yaml deleted file mode 100644 index d8f7ce2c..00000000 --- a/examples-configs/rl/atari/a2c/freeway_a2c.yaml +++ /dev/null @@ -1,69 +0,0 @@ -name: 'freeway_a2c' - - -env: - name: vel.rl.env.classic_atari - game: 'FreewayNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - number_of_steps: 5 # How many environment steps go into a single batch - parallel_envs: 16 # How many environments to run in parallel - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 100 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'freeway_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/rl/atari/a2c/pacman_a2c.yaml b/examples-configs/rl/atari/a2c/pacman_a2c.yaml deleted file mode 100644 index 3ad255b1..00000000 --- a/examples-configs/rl/atari/a2c/pacman_a2c.yaml +++ /dev/null @@ -1,69 +0,0 @@ -name: 'pacman_a2c' - - -env: - name: vel.rl.env.classic_atari - game: 'MsPacmanNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - number_of_steps: 5 # How many environment steps go into a single batch - parallel_envs: 16 # How many environments to run in parallel - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 100 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'pacman_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/rl/atari/a2c/pitfall_a2c.yaml b/examples-configs/rl/atari/a2c/pitfall_a2c.yaml deleted file mode 100644 index fa9b467e..00000000 --- a/examples-configs/rl/atari/a2c/pitfall_a2c.yaml +++ /dev/null @@ -1,69 +0,0 @@ -name: 'pitfall_a2c' - - -env: - name: vel.rl.env.classic_atari - game: 'PitfallNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - number_of_steps: 5 # How many environment steps go into a single batch - parallel_envs: 16 # How many environments to run in parallel - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 100 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'pitfall_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/rl/atari/a2c/pong_a2c_lstm.yaml b/examples-configs/rl/atari/a2c/pong_a2c_lstm.yaml deleted file mode 100644 index e7023fd6..00000000 --- a/examples-configs/rl/atari/a2c/pong_a2c_lstm.yaml +++ /dev/null @@ -1,71 +0,0 @@ -name: 'pong_a2c_lstm' - - -env: - name: vel.rl.env.classic_atari - game: 'PongNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_rnn_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn_rnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - number_of_steps: 5 # How many environment steps go into a single batch - parallel_envs: 16 # How many environments to run in parallel - - shuffle_transitions: off # Required for RNN policies - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 100 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'pong_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/rl/atari/a2c/qbert_a2c.yaml b/examples-configs/rl/atari/a2c/qbert_a2c.yaml deleted file mode 100644 index 32c97af6..00000000 --- a/examples-configs/rl/atari/a2c/qbert_a2c.yaml +++ /dev/null @@ -1,70 +0,0 @@ -name: 'qbert_a2c' - - -env: - name: vel.rl.env.classic_atari - game: 'QbertNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - rnn_type: 'lstm' - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - number_of_steps: 5 # How many environment steps go into a single batch - parallel_envs: 16 # How many environments to run in parallel - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 100 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'qbert_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/rl/atari/a2c/space_invaders_a2c.yaml b/examples-configs/rl/atari/a2c/space_invaders_a2c.yaml deleted file mode 100644 index 6333f1d2..00000000 --- a/examples-configs/rl/atari/a2c/space_invaders_a2c.yaml +++ /dev/null @@ -1,69 +0,0 @@ -name: 'space_invaders_a2c' - - -env: - name: vel.rl.env.classic_atari - game: 'SpaceInvadersNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - number_of_steps: 5 # How many environment steps go into a single batch - parallel_envs: 16 # How many environments to run in parallel - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 100 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'space_invaders_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/rl/atari/acer/breakout_acer.yaml b/examples-configs/rl/atari/acer/breakout_acer.yaml deleted file mode 100644 index 5d4d787e..00000000 --- a/examples-configs/rl/atari/acer/breakout_acer.yaml +++ /dev/null @@ -1,82 +0,0 @@ -name: 'breakout_acer_notr' - - -env: - name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_mixed_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.trajectory_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 1_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 50_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - algo: - name: vel.rl.algo.policy_gradient.acer - entropy_coefficient: 0.01 - q_coefficient: 0.5 - rho_cap: 10.0 - retrace_rho_cap: 1.0 - - max_grad_norm: 10.0 - discount_factor: 0.99 - - trust_region: false - - parallel_envs: 12 # How many environments to run in parallel - number_of_steps: 20 # How many environment steps go into a single batch - experience_replay: 4 - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 -# epsilon: 1.0e-5 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 30 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'breakout_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 - parallel_envs: 12 # How many environments to run in parallel diff --git a/examples-configs/rl/atari/acer/breakout_acer_trust_region.yaml b/examples-configs/rl/atari/acer/breakout_acer_trust_region.yaml deleted file mode 100644 index 7fc824ab..00000000 --- a/examples-configs/rl/atari/acer/breakout_acer_trust_region.yaml +++ /dev/null @@ -1,83 +0,0 @@ -name: 'breakout_acer_trust_region' - - -env: - name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_mixed_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.trajectory_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 1_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 50_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - algo: - name: vel.rl.algo.policy_gradient.acer - entropy_coefficient: 0.01 - q_coefficient: 0.5 - rho_cap: 10.0 - retrace_rho_cap: 1.0 - - max_grad_norm: 10.0 - discount_factor: 0.99 - - trust_region: true - trust_region_delta: 1.0 - - parallel_envs: 12 # How many environments to run in parallel - number_of_steps: 20 # How many environment steps go into a single batch - experience_replay: 4 - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - # epsilon: 1.0e-5 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 30 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'breakout_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 - parallel_envs: 12 # How many environments to run in parallel diff --git a/examples-configs/rl/atari/acer/seaquest_acer_trust_region.yaml b/examples-configs/rl/atari/acer/seaquest_acer_trust_region.yaml deleted file mode 100644 index 4fab32f0..00000000 --- a/examples-configs/rl/atari/acer/seaquest_acer_trust_region.yaml +++ /dev/null @@ -1,83 +0,0 @@ -name: 'seaquest_acer' - - -env: - name: vel.rl.env.classic_atari - game: 'SeaquestNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_mixed_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.trajectory_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 1_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 50_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - algo: - name: vel.rl.algo.policy_gradient.acer - entropy_coefficient: 0.01 - q_coefficient: 0.5 - rho_cap: 10.0 - retrace_rho_cap: 1.0 - - max_grad_norm: 10.0 - discount_factor: 0.99 - - trust_region: true - trust_region_delta: 1.0 - - parallel_envs: 12 # How many environments to run in parallel - number_of_steps: 20 # How many environment steps go into a single batch - experience_replay: 4 - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - # epsilon: 1.0e-5 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 10 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'seaquest_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 - parallel_envs: 12 # How many environments to run in parallel diff --git a/examples-configs/rl/atari/acer/space_invaders_acer.yaml b/examples-configs/rl/atari/acer/space_invaders_acer.yaml deleted file mode 100644 index b1dd8001..00000000 --- a/examples-configs/rl/atari/acer/space_invaders_acer.yaml +++ /dev/null @@ -1,82 +0,0 @@ -name: 'spaceinvaders_acer_notr' - - -env: - name: vel.rl.env.classic_atari - game: 'SpaceInvadersNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_mixed_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.trajectory_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 1_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 50_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - algo: - name: vel.rl.algo.policy_gradient.acer - entropy_coefficient: 0.01 - q_coefficient: 0.5 - rho_cap: 10.0 - retrace_rho_cap: 1.0 - - max_grad_norm: 10.0 - discount_factor: 0.99 - - trust_region: false - - parallel_envs: 12 # How many environments to run in parallel - number_of_steps: 20 # How many environment steps go into a single batch - experience_replay: 4 - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - # epsilon: 1.0e-5 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 30 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'spaceinvaders_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 - parallel_envs: 12 # How many environments to run in parallel diff --git a/examples-configs/rl/atari/acer/space_invaders_acer_trust_region.yaml b/examples-configs/rl/atari/acer/space_invaders_acer_trust_region.yaml deleted file mode 100644 index 0564d229..00000000 --- a/examples-configs/rl/atari/acer/space_invaders_acer_trust_region.yaml +++ /dev/null @@ -1,83 +0,0 @@ -name: 'spaceinvaders_acer' - - -env: - name: vel.rl.env.classic_atari - game: 'SpaceInvadersNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_mixed_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.trajectory_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 1_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 50_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - algo: - name: vel.rl.algo.policy_gradient.acer - entropy_coefficient: 0.01 - q_coefficient: 0.5 - rho_cap: 10.0 - retrace_rho_cap: 1.0 - - max_grad_norm: 10.0 - discount_factor: 0.99 - - trust_region: true - trust_region_delta: 1.0 - - parallel_envs: 12 # How many environments to run in parallel - number_of_steps: 20 # How many environment steps go into a single batch - experience_replay: 4 - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - # epsilon: 1.0e-5 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 10 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'spaceinvaders_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 - parallel_envs: 12 # How many environments to run in parallel diff --git a/examples-configs/rl/atari/a2c/pong_a2c.yaml b/examples-configs/rl/atari/atari_a2c.yaml similarity index 65% rename from examples-configs/rl/atari/a2c/pong_a2c.yaml rename to examples-configs/rl/atari/atari_a2c.yaml index 8b15fb6b..1d15f2dd 100644 --- a/examples-configs/rl/atari/a2c/pong_a2c.yaml +++ b/examples-configs/rl/atari/atari_a2c.yaml @@ -1,9 +1,9 @@ -name: 'pong_a2c' +name: 'atari_a2c' env: name: vel.rl.env.classic_atari - game: 'PongNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: @@ -12,20 +12,21 @@ vec_env: model: - name: vel.rl.models.stochastic_policy_model + name: vel.rl.policy.stochastic_policy input_block: - name: vel.modules.input.image_to_tensor + name: vel.module.input.image_to_tensor backbone: - name: vel.rl.models.backbone.nature_cnn + name: vel.rl.backbone.nature_cnn + input_width: 84 input_height: 84 input_channels: 4 # The same as frame_history reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer + name: vel.rl.reinforcer.on_policy_iteration_reinforcer algo: name: vel.rl.algo.policy_gradient.a2c @@ -42,7 +43,7 @@ reinforcer: optimizer: - name: vel.optimizers.rmsprop + name: vel.optimizer.rmsprop lr: 7.0e-4 alpha: 0.99 epsilon: 1.0e-3 @@ -50,20 +51,20 @@ optimizer: commands: train: - name: vel.rl.commands.rl_train_command + name: vel.rl.command.rl_train_command total_frames: 1.1e7 batches_per_epoch: 100 record: - name: vel.rl.commands.record_movie_command + name: vel.rl.command.record_movie_command takes: 10 - videoname: 'pong_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: - name: vel.rl.commands.evaluate_env_command + name: vel.rl.command.evaluate_env_command parallel_envs: 16 # How many environments to run in parallel takes: 20 visdom: - name: vel.commands.vis_store_command + name: vel.command.vis_store_command diff --git a/examples-configs/rl/atari/a2c/breakout_a2c_lstm.yaml b/examples-configs/rl/atari/atari_a2c_lstm.yaml similarity index 95% rename from examples-configs/rl/atari/a2c/breakout_a2c_lstm.yaml rename to examples-configs/rl/atari/atari_a2c_lstm.yaml index 8593be1c..36947571 100644 --- a/examples-configs/rl/atari/a2c/breakout_a2c_lstm.yaml +++ b/examples-configs/rl/atari/atari_a2c_lstm.yaml @@ -1,9 +1,9 @@ -name: 'breakout_a2c_lstm' +name: 'atari_a2c_lstm' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/a2c/breakout_a2c_tf_rmsprop.yaml b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml similarity index 94% rename from examples-configs/rl/atari/a2c/breakout_a2c_tf_rmsprop.yaml rename to examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml index a3acb3f2..3fa29e5b 100644 --- a/examples-configs/rl/atari/a2c/breakout_a2c_tf_rmsprop.yaml +++ b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml @@ -1,9 +1,9 @@ -name: 'breakout_a2c_tf_rmsprop' +name: 'atari_a2c_tf_rmsprop' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/acer/beam_rider_acer.yaml b/examples-configs/rl/atari/atari_acer.yaml similarity index 96% rename from examples-configs/rl/atari/acer/beam_rider_acer.yaml rename to examples-configs/rl/atari/atari_acer.yaml index 123c3c0b..c488883e 100644 --- a/examples-configs/rl/atari/acer/beam_rider_acer.yaml +++ b/examples-configs/rl/atari/atari_acer.yaml @@ -1,9 +1,9 @@ -name: 'beamrider_acer_notr' +name: 'atari_acer' env: name: vel.rl.env.classic_atari - game: 'BeamRiderNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/acer/beam_rider_acer_trust_region.yaml b/examples-configs/rl/atari/atari_acer_trust_region.yaml similarity index 96% rename from examples-configs/rl/atari/acer/beam_rider_acer_trust_region.yaml rename to examples-configs/rl/atari/atari_acer_trust_region.yaml index 3758d9ea..99bae873 100644 --- a/examples-configs/rl/atari/acer/beam_rider_acer_trust_region.yaml +++ b/examples-configs/rl/atari/atari_acer_trust_region.yaml @@ -1,9 +1,9 @@ -name: 'beamrider_acer' +name: 'atari_acer_trust_region' env: name: vel.rl.env.classic_atari - game: 'BeamRiderNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/ppo/breakout_ppo.yaml b/examples-configs/rl/atari/atari_ppo.yaml similarity index 70% rename from examples-configs/rl/atari/ppo/breakout_ppo.yaml rename to examples-configs/rl/atari/atari_ppo.yaml index 1a1c4e50..4850edba 100644 --- a/examples-configs/rl/atari/ppo/breakout_ppo.yaml +++ b/examples-configs/rl/atari/atari_ppo.yaml @@ -1,9 +1,9 @@ -name: 'breakout_ppo' +name: 'atari_ppo' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: @@ -12,20 +12,20 @@ vec_env: model: - name: vel.rl.models.stochastic_policy_model + name: vel.rl.policy.stochastic_policy input_block: - name: vel.modules.input.image_to_tensor + name: vel.module.input.image_to_tensor backbone: - name: vel.rl.models.backbone.nature_cnn + name: vel.rl.backbone.nature_cnn input_width: 84 input_height: 84 input_channels: 4 # The same as frame_history reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer + name: vel.rl.reinforcer.on_policy_iteration_reinforcer algo: name: vel.rl.algo.policy_gradient.ppo @@ -39,7 +39,7 @@ reinforcer: max_grad_norm: 0.5 # Gradient clipping parameter cliprange: - name: vel.schedules.linear + name: vel.schedule.linear initial_value: 0.1 final_value: 0.0 @@ -53,7 +53,7 @@ reinforcer: optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 2.5e-4 epsilon: 1.0e-5 @@ -64,17 +64,20 @@ scheduler: commands: train: - name: vel.rl.commands.rl_train_command + name: vel.rl.command.rl_train_command total_frames: 1.1e7 batches_per_epoch: 10 record: - name: vel.rl.commands.record_movie_command + name: vel.rl.command.record_movie_command takes: 10 - videoname: 'breakout_ppo_vid_{:04}.avi' + videoname: 'atari_ppo_vid_{:04}.avi' evaluate: - name: vel.rl.commands.evaluate_env_command + name: vel.rl.command.evaluate_env_command parallel_envs: 16 # How many environments to run in parallel takes: 20 + + visdom: + name: vel.command.vis_store_command diff --git a/examples-configs/rl/atari/ppo/breakout_ppo_gru.yaml b/examples-configs/rl/atari/atari_ppo_gru.yaml similarity index 74% rename from examples-configs/rl/atari/ppo/breakout_ppo_gru.yaml rename to examples-configs/rl/atari/atari_ppo_gru.yaml index 1a7aa669..0f8c2e6a 100644 --- a/examples-configs/rl/atari/ppo/breakout_ppo_gru.yaml +++ b/examples-configs/rl/atari/atari_ppo_gru.yaml @@ -1,9 +1,9 @@ -name: 'breakout_ppo_gru' +name: 'atari_ppo_gru' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: @@ -11,13 +11,13 @@ vec_env: model: - name: vel.rl.models.stochastic_policy_rnn_model + name: vel.rl.policy.stochastic_rnn_policy input_block: - name: vel.modules.input.image_to_tensor + name: vel.module.input.image_to_tensor backbone: - name: vel.rl.models.backbone.nature_cnn_rnn + name: vel.rl.backbone.nature_cnn_rnn rnn_type: 'gru' hidden_units: 512 @@ -27,7 +27,7 @@ model: reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer + name: vel.rl.reinforcer.on_policy_iteration_reinforcer algo: name: vel.rl.algo.policy_gradient.ppo @@ -41,7 +41,7 @@ reinforcer: max_grad_norm: 0.5 # Gradient clipping parameter cliprange: - name: vel.schedules.linear + name: vel.schedule.linear initial_value: 0.1 final_value: 0.0 @@ -57,7 +57,7 @@ reinforcer: optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 2.5e-4 epsilon: 1.0e-5 @@ -68,17 +68,17 @@ scheduler: commands: train: - name: vel.rl.commands.rl_train_command + name: vel.rl.command.rl_train_command total_frames: 1.1e7 batches_per_epoch: 10 record: - name: vel.rl.commands.record_movie_command + name: vel.rl.command.record_movie_command takes: 10 videoname: 'breakout_ppo_gru_vid_{:04}.avi' evaluate: - name: vel.rl.commands.evaluate_env_command + name: vel.rl.command.evaluate_env_command parallel_envs: 16 # How many environments to run in parallel takes: 20 diff --git a/examples-configs/rl/atari/trpo/breakout_trpo.yaml b/examples-configs/rl/atari/atari_trpo.yaml similarity index 96% rename from examples-configs/rl/atari/trpo/breakout_trpo.yaml rename to examples-configs/rl/atari/atari_trpo.yaml index 9c29af38..e54e6d07 100644 --- a/examples-configs/rl/atari/trpo/breakout_trpo.yaml +++ b/examples-configs/rl/atari/atari_trpo.yaml @@ -1,8 +1,9 @@ -name: 'breakout_trpo' +name: 'atari_trpo' + env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/dqn/breakout_ddqn.yaml b/examples-configs/rl/atari/dqn/atari_ddqn.yaml similarity index 96% rename from examples-configs/rl/atari/dqn/breakout_ddqn.yaml rename to examples-configs/rl/atari/dqn/atari_ddqn.yaml index fb6fb2bf..667ce429 100644 --- a/examples-configs/rl/atari/dqn/breakout_ddqn.yaml +++ b/examples-configs/rl/atari/dqn/atari_ddqn.yaml @@ -1,9 +1,9 @@ -name: 'breakout_ddqn' +name: 'atari_ddqn' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/dqn/breakout_dqn_distributional.yaml b/examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml similarity index 96% rename from examples-configs/rl/atari/dqn/breakout_dqn_distributional.yaml rename to examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml index a0c6e219..b605a75d 100644 --- a/examples-configs/rl/atari/dqn/breakout_dqn_distributional.yaml +++ b/examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml @@ -1,9 +1,9 @@ -name: 'breakout_dqn_distributional' +name: 'atari_dqn_distributional' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/dqn/breakout_dqn_raw.yaml b/examples-configs/rl/atari/dqn/atari_dqn_raw.yaml similarity index 96% rename from examples-configs/rl/atari/dqn/breakout_dqn_raw.yaml rename to examples-configs/rl/atari/dqn/atari_dqn_raw.yaml index 5422f454..31e81b00 100644 --- a/examples-configs/rl/atari/dqn/breakout_dqn_raw.yaml +++ b/examples-configs/rl/atari/dqn/atari_dqn_raw.yaml @@ -1,9 +1,9 @@ -name: 'breakout_dqn_raw' +name: 'atari_dqn_raw' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/dqn/breakout_dueling_ddqn.yaml b/examples-configs/rl/atari/dqn/atari_dueling_ddqn.yaml similarity index 96% rename from examples-configs/rl/atari/dqn/breakout_dueling_ddqn.yaml rename to examples-configs/rl/atari/dqn/atari_dueling_ddqn.yaml index fb6088e0..a5a225a9 100644 --- a/examples-configs/rl/atari/dqn/breakout_dueling_ddqn.yaml +++ b/examples-configs/rl/atari/dqn/atari_dueling_ddqn.yaml @@ -1,9 +1,9 @@ -name: 'breakout_dueling_ddqn' +name: 'atari_dueling_ddqn' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/dqn/breakout_dueling_ddqn_prioritized.yaml b/examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml similarity index 96% rename from examples-configs/rl/atari/dqn/breakout_dueling_ddqn_prioritized.yaml rename to examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml index 017a0c6e..99127352 100644 --- a/examples-configs/rl/atari/dqn/breakout_dueling_ddqn_prioritized.yaml +++ b/examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml @@ -1,9 +1,9 @@ -name: 'breakout_dueling_ddqn_prioritized' +name: 'atari_dueling_ddqn_prioritized' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' vec_env: diff --git a/examples-configs/rl/atari/dqn/seaquest_dqn_distributional.yaml b/examples-configs/rl/atari/dqn/seaquest_dqn_distributional.yaml deleted file mode 100644 index 9d068b5c..00000000 --- a/examples-configs/rl/atari/dqn/seaquest_dqn_distributional.yaml +++ /dev/null @@ -1,90 +0,0 @@ -name: 'seaquest_dqn_distributional' - - -env: - name: vel.rl.env.classic_atari - game: 'SeaquestNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.distributional_q_model - - atoms: 51 # 51 bins for Distributional DQN - vmin: -10.0 - vmax: 10.0 - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 30_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 250_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.distributional_dqn - - target_update_frequency: 10_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.rmsprop - lr: 2.5e-4 - alpha: 0.95 - momentum: 0.95 - epsilon: 1.0e-1 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 5.0e7 # 11M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'breakout_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 diff --git a/examples-configs/rl/atari/dqn/seaquest_dqn_raw.yaml b/examples-configs/rl/atari/dqn/seaquest_dqn_raw.yaml deleted file mode 100644 index a6e19031..00000000 --- a/examples-configs/rl/atari/dqn/seaquest_dqn_raw.yaml +++ /dev/null @@ -1,86 +0,0 @@ -name: 'seaquest_dqn_raw' - - -env: - name: vel.rl.env.classic_atari - game: 'SeaquestNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 30_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 250_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.dqn - - target_update_frequency: 10_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.rmsprop - lr: 2.5e-4 - alpha: 0.95 - momentum: 0.95 - epsilon: 1.0e-1 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 # 11M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'seaquest_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 diff --git a/examples-configs/rl/atari/dqn_rainbow_param/asterix_rp_dqn_raw.yaml b/examples-configs/rl/atari/dqn_rainbow_param/asterix_rp_dqn_raw.yaml deleted file mode 100644 index 4717e900..00000000 --- a/examples-configs/rl/atari/dqn_rainbow_param/asterix_rp_dqn_raw.yaml +++ /dev/null @@ -1,89 +0,0 @@ -name: 'asterix_rp_dqn_raw' - - -env: - name: vel.rl.env.classic_atari - game: 'AsterixNoFrameskip-v4' - - settings: - max_episode_frames: 108_000 - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 80_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 1_000_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.dqn - - target_update_frequency: 32_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.adam - lr: 6.25e-05 - epsilon: 1.5e-4 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 5.0e7 # 50M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'asterix_vid_{:04}.avi' - fps: 15 - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 12 - takes: 20 diff --git a/examples-configs/rl/atari/rainbow/breakout_rainbow.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml similarity index 97% rename from examples-configs/rl/atari/rainbow/breakout_rainbow.yaml rename to examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml index 1e0cbeec..af99acd8 100644 --- a/examples-configs/rl/atari/rainbow/breakout_rainbow.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml @@ -1,9 +1,9 @@ -name: 'breakout_rainbow' +name: 'atari_rainbow' env: name: vel.rl.env.classic_atari - game: 'BreakoutNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' settings: max_episode_frames: 108_000 diff --git a/examples-configs/rl/atari/dqn_rainbow_param/asterix_rp_dqn_distributional.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml similarity index 96% rename from examples-configs/rl/atari/dqn_rainbow_param/asterix_rp_dqn_distributional.yaml rename to examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml index 432cb64e..57e4fe91 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/asterix_rp_dqn_distributional.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml @@ -1,9 +1,9 @@ -name: 'asterix_dqn_distributional' +name: 'atari_dqn_distributional' env: name: vel.rl.env.classic_atari - game: 'AsterixNoFrameskip-v4' + game: !param game = 'BreakoutNoFrameskip-v4' settings: max_episode_frames: 108_000 diff --git a/examples-configs/rl/atari/dqn_rainbow_param/asteroids_rp_dqn_noisynet.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml similarity index 96% rename from examples-configs/rl/atari/dqn_rainbow_param/asteroids_rp_dqn_noisynet.yaml rename to examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml index b71afb46..e8174023 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/asteroids_rp_dqn_noisynet.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml @@ -1,10 +1,9 @@ -name: 'asteroids_rp_dqn_noisynet' +name: 'atari_rp_dqn_noisynet' env: name: vel.rl.env.classic_atari - game: 'AsteroidsNoFrameskip-v4' - + game: !param game = 'BreakoutNoFrameskip-v4' settings: max_episode_frames: 108_000 diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atlantis_rp_dqn_nstep.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml similarity index 96% rename from examples-configs/rl/atari/dqn_rainbow_param/atlantis_rp_dqn_nstep.yaml rename to examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml index d6558654..89629ac7 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/atlantis_rp_dqn_nstep.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml @@ -1,10 +1,9 @@ -name: 'atlantis_rp_dqn_nstep' +name: 'atari_rp_dqn_nstep' env: name: vel.rl.env.classic_atari - game: 'AtlantisNoFrameskip-v4' - + game: !param game = 'BreakoutNoFrameskip-v4' settings: max_episode_frames: 108_000 diff --git a/examples-configs/rl/atari/dqn_rainbow_param/asteroids_rp_dqn_raw.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml similarity index 96% rename from examples-configs/rl/atari/dqn_rainbow_param/asteroids_rp_dqn_raw.yaml rename to examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml index 9622d200..5786002b 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/asteroids_rp_dqn_raw.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml @@ -1,10 +1,9 @@ -name: 'asteroids_rp_dqn_raw' +name: 'atari_rp_dqn_raw' env: name: vel.rl.env.classic_atari - game: 'AsteroidsNoFrameskip-v4' - + game: !param game = 'BreakoutNoFrameskip-v4' settings: max_episode_frames: 108_000 diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atlantis_rp_dqn_raw.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atlantis_rp_dqn_raw.yaml deleted file mode 100644 index d30461e2..00000000 --- a/examples-configs/rl/atari/dqn_rainbow_param/atlantis_rp_dqn_raw.yaml +++ /dev/null @@ -1,88 +0,0 @@ -name: 'atlantis_rp_dqn_raw' - - -env: - name: vel.rl.env.classic_atari - game: 'AtlantisNoFrameskip-v4' - - settings: - max_episode_frames: 108_000 - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 80_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 1_000_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.dqn - - target_update_frequency: 32_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.adam - lr: 6.25e-05 - epsilon: 1.5e-4 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 5.0e7 # 50M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'atlantis_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 12 - takes: 100 diff --git a/examples-configs/rl/atari/ppo/enduro_ppo.yaml b/examples-configs/rl/atari/ppo/enduro_ppo.yaml deleted file mode 100644 index afc6d1cc..00000000 --- a/examples-configs/rl/atari/ppo/enduro_ppo.yaml +++ /dev/null @@ -1,85 +0,0 @@ -name: 'enduro_ppo' - -env: - name: vel.rl.env.classic_atari - game: 'EnduroNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.ppo - - entropy_coefficient: 0.01 - value_coefficient: 0.5 - - discount_factor: 0.99 # Discount factor for the rewards - gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - - max_grad_norm: 0.5 # Gradient clipping parameter - - cliprange: - name: vel.schedules.linear - initial_value: 0.1 - final_value: 0.0 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - parallel_envs: 8 # How many environments to run in parallel - number_of_steps: 128 # How many environment steps go into a single batch - batch_size: 256 # How many samples can go into the model once - experience_replay: 4 # How many times to replay the experience - - discount_factor: 0.99 # Discount factor for the rewards - - -optimizer: - name: vel.optimizers.adam - lr: 2.5e-4 - epsilon: 1.0e-5 - - -scheduler: - name: vel.scheduler.linear_batch_scaler - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 10 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'enduro_ppo_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 - - enjoy: - name: vel.rl.commands.enjoy - fps: 15 diff --git a/examples-configs/rl/atari/ppo/qbert_ppo.yaml b/examples-configs/rl/atari/ppo/qbert_ppo.yaml deleted file mode 100644 index b4e012b0..00000000 --- a/examples-configs/rl/atari/ppo/qbert_ppo.yaml +++ /dev/null @@ -1,81 +0,0 @@ -name: 'qbert_ppo_simple' - -env: - name: vel.rl.env.classic_atari - game: 'QbertNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.ppo - - entropy_coefficient: 0.01 - value_coefficient: 0.5 - - discount_factor: 0.99 # Discount factor for the rewards - gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - - max_grad_norm: 0.5 # Gradient clipping parameter - - cliprange: - name: vel.schedules.linear - initial_value: 0.1 - final_value: 0.0 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - parallel_envs: 8 # How many environments to run in parallel - number_of_steps: 128 # How many environment steps go into a single batch - batch_size: 256 # How many samples can go into the model once - experience_replay: 4 # How many times to replay the experience - - discount_factor: 0.99 # Discount factor for the rewards - - -optimizer: - name: vel.optimizers.adam - lr: 2.5e-4 - epsilon: 1.0e-5 - - -scheduler: - name: vel.scheduler.linear_batch_scaler - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 10 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'qbert_ppo_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 16 # How many environments to run in parallel - - takes: 20 diff --git a/examples-scripts/rl/atari/a2c/breakout_a2c.py b/examples-scripts/rl/atari/a2c/breakout_a2c.py index 1f855788..4cb9560a 100644 --- a/examples-scripts/rl/atari/a2c/breakout_a2c.py +++ b/examples-scripts/rl/atari/a2c/breakout_a2c.py @@ -8,12 +8,12 @@ from vel.rl.env.classic_atari import ClassicAtariEnv from vel.rl.vecenv.subproc import SubprocVecEnvWrapper -from vel.modules.input.image_to_tensor import ImageToTensorFactory -from vel.rl.models.stochastic_policy_model import StochasticPolicyModelFactory -from vel.rl.models.backbone.nature_cnn import NatureCnnFactory +from vel.module.input.image_to_tensor import ImageToTensorFactory +from vel.rl.policy.stochastic_policy import StochasticPolicy +from vel.rl.backbone.nature_cnn import NatureCnnFactory -from vel.rl.reinforcers.on_policy_iteration_reinforcer import ( +from vel.rl.reinforcer.on_policy_iteration_reinforcer import ( OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings ) @@ -39,7 +39,7 @@ def breakout_a2c(): # Again, use a helper to create a model # But because model is owned by the reinforcer, model should not be accessed using this variable # but from reinforcer.model property - model = StochasticPolicyModelFactory( + model = StochasticPolicy( input_block=ImageToTensorFactory(), backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) ).instantiate(action_space=vec_env.action_space) diff --git a/examples-scripts/rl/atari/a2c/breakout_a2c_evaluate.py b/examples-scripts/rl/atari/a2c/breakout_a2c_evaluate.py index 2912d46e..3c31f6ab 100644 --- a/examples-scripts/rl/atari/a2c/breakout_a2c_evaluate.py +++ b/examples-scripts/rl/atari/a2c/breakout_a2c_evaluate.py @@ -51,7 +51,7 @@ def record_take(model, env_instance, device): while True: observation_array = np.expand_dims(np.array(observation), axis=0) observation_tensor = torch.from_numpy(observation_array).to(device) - actions = model.step(observation_tensor, argmax_sampling=True)['actions'] + actions = model.step(observation_tensor, deterministic=True)['actions'] observation, reward, done, epinfo = env_instance.step(actions.item()) diff --git a/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py b/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py index 9c0a679b..49900c0e 100644 --- a/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py +++ b/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py @@ -11,7 +11,7 @@ from vel.util.random import set_seed from vel.rl.env.mujoco import MujocoEnv from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory -from vel.rl.model.backbone.mlp import MLPFactory +from vel.rl.backbone.mlp import MLPFactory from vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer import ( BufferedOffPolicyIterationReinforcer, BufferedOffPolicyIterationReinforcerSettings ) diff --git a/vel/api/model.py b/vel/api/model.py index 9164442f..be09907b 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -58,6 +58,10 @@ def is_stateful(self) -> bool: """ If the model has a state that needs to be fed between individual observations """ return False + def zero_state(self, batch_size): + """ Potential state for the model """ + return None + class SupervisedModel(Model): """ Model for a supervised learning problem """ diff --git a/vel/model/vision/cifar_resnet_v1.py b/vel/model/vision/cifar_resnet_v1.py index 2a19ffa8..ab8fac25 100644 --- a/vel/model/vision/cifar_resnet_v1.py +++ b/vel/model/vision/cifar_resnet_v1.py @@ -7,7 +7,7 @@ import torch.nn.functional as F from vel.api import LossFunctionModel, ModelFactory -from vel.modules.resnet_v1 import Bottleneck, BasicBlock +from vel.module.resnet_v1 import Bottleneck, BasicBlock class ResNetV1(LossFunctionModel): diff --git a/vel/model/vision/cifar_resnet_v2.py b/vel/model/vision/cifar_resnet_v2.py index 2d44ab01..66e96fb6 100644 --- a/vel/model/vision/cifar_resnet_v2.py +++ b/vel/model/vision/cifar_resnet_v2.py @@ -7,7 +7,7 @@ import torch.nn.functional as F from vel.api import LossFunctionModel, ModelFactory -from vel.modules.resnet_v2 import Bottleneck, BasicBlock +from vel.module.resnet_v2 import Bottleneck, BasicBlock class ResNetV2(LossFunctionModel): diff --git a/vel/module/input/one_hot_encoding.py b/vel/module/input/one_hot_encoding.py index 1e351721..eaee642c 100644 --- a/vel/module/input/one_hot_encoding.py +++ b/vel/module/input/one_hot_encoding.py @@ -1,5 +1,5 @@ from vel.api import LinearBackboneModel, ModelFactory -from vel.modules.layers import OneHotEncode +from vel.module.layers import OneHotEncode class OneHotEncodingInput(LinearBackboneModel): diff --git a/vel/module/rnn_cell.py b/vel/module/rnn_cell.py index 5b62a046..5ce58867 100644 --- a/vel/module/rnn_cell.py +++ b/vel/module/rnn_cell.py @@ -46,6 +46,10 @@ def state_dim(self) -> int: else: return self.hidden_size + def zero_state(self, batch_size): + """ Potential state for the model """ + return torch.zeros(batch_size, self.state_dim) + def forward(self, input_data, state): if self.rnn_type == 'lstm': hidden_state, cell_state = torch.split(state, self.hidden_size, 1) diff --git a/vel/rl/algo/policy_gradient/trpo.py b/vel/rl/algo/policy_gradient/trpo.py index d97d1bf3..2922e128 100644 --- a/vel/rl/algo/policy_gradient/trpo.py +++ b/vel/rl/algo/policy_gradient/trpo.py @@ -84,7 +84,7 @@ def process_rollout(self, batch_info, rollout: Rollout): return rollout - def optimizer_step(self, batch_info, device, model, rollout): + def optimize(self, batch_info, device, model, rollout): """ Single optimization step for a model """ rollout = rollout.to_transitions() diff --git a/vel/rl/api/__init__.py b/vel/rl/api/__init__.py index a48ee85a..4e80755c 100644 --- a/vel/rl/api/__init__.py +++ b/vel/rl/api/__init__.py @@ -2,7 +2,7 @@ from .env_base import EnvFactory, VecEnvFactory from .env_roller import EnvRollerBase, ReplayEnvRollerBase, EnvRollerFactoryBase, ReplayEnvRollerFactoryBase from .evaluator import Evaluator -from .model import RlModel, RlRnnModel +from .policy import Policy from .reinforcer_base import ReinforcerBase, ReinforcerFactory from .replay_buffer import ReplayBuffer, ReplayBufferFactory from .rollout import Rollout, Trajectories, Transitions diff --git a/vel/rl/api/algo_base.py b/vel/rl/api/algo_base.py index d9e6c8c4..305e2624 100644 --- a/vel/rl/api/algo_base.py +++ b/vel/rl/api/algo_base.py @@ -25,7 +25,7 @@ def process_rollout(self, batch_info, rollout): """ Process rollout for ALGO before any chunking/shuffling """ return rollout - def optimizer_step(self, batch_info, device, model, rollout): + def optimize(self, batch_info, device, model, rollout): """ Single optimization step for a model """ raise NotImplementedError @@ -48,7 +48,7 @@ def post_optimization_step(self, batch_info, device, model, rollout): """ Steps to take after optimization has been done""" pass - def optimizer_step(self, batch_info, device, model, rollout): + def optimize(self, batch_info, device, model, rollout): """ Single optimization step for a model """ batch_info.optimizer.zero_grad() diff --git a/vel/rl/api/env_roller.py b/vel/rl/api/env_roller.py index 3457104d..f868fe90 100644 --- a/vel/rl/api/env_roller.py +++ b/vel/rl/api/env_roller.py @@ -1,20 +1,19 @@ import typing -import gym -from vel.rl.api.rollout import Rollout -from vel.api import BatchInfo, Model +from vel.api import BatchInfo from vel.openai.baselines.common.vec_env import VecEnv +from vel.rl.api.rollout import Rollout class EnvRollerBase: """ Class generating environment rollouts """ @property - def environment(self) -> typing.Union[gym.Env, VecEnv]: + def environment(self) -> VecEnv: """ Reference to environment being evaluated """ raise NotImplementedError - def rollout(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> Rollout: + def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Roll-out the environment and return it """ raise NotImplementedError @@ -27,7 +26,7 @@ def metrics(self) -> list: class ReplayEnvRollerBase(EnvRollerBase): """ Class generating environment rollouts with experience replay """ - def sample(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> Rollout: + def sample(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Sample experience from replay buffer and return a batch """ raise NotImplementedError @@ -47,7 +46,7 @@ def update(self, rollout, batch_info): class EnvRollerFactoryBase: """ Factory for env rollers """ - def instantiate(self, environment, device) -> EnvRollerBase: + def instantiate(self, environment, policy, device) -> EnvRollerBase: """ Instantiate env roller """ raise NotImplementedError @@ -55,6 +54,6 @@ def instantiate(self, environment, device) -> EnvRollerBase: class ReplayEnvRollerFactoryBase(EnvRollerFactoryBase): """ Factory for env rollers """ - def instantiate(self, environment, device) -> ReplayEnvRollerBase: + def instantiate(self, environment, policy, device) -> ReplayEnvRollerBase: """ Instantiate env roller """ raise NotImplementedError diff --git a/vel/rl/api/model.py b/vel/rl/api/model.py deleted file mode 100644 index fb5a691c..00000000 --- a/vel/rl/api/model.py +++ /dev/null @@ -1,50 +0,0 @@ -import torch - -from vel.api import Model - -from .rollout import Rollout -from .evaluator import Evaluator - - -class RlModel(Model): - """ Reinforcement learning model """ - - def step(self, observations) -> dict: - """ - Evaluate environment on given observations, return actions and potentially some extra information - in a dictionary. - """ - raise NotImplementedError - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - raise NotImplementedError - - -class RlRnnModel(Model): - """ Reinforcement learning recurrent model """ - - @property - def is_stateful(self) -> bool: - """ If the network is recurrent and needs to be fed previous state """ - return True - - def step(self, observations, state) -> dict: - """ - Evaluate environment on given observations, return actions and potentially some extra information - in a dictionary. - """ - raise NotImplementedError - - @property - def state_dim(self) -> int: - """ Dimension of model state """ - raise NotImplementedError - - def zero_state(self, batch_size): - """ Initial state of the network """ - return torch.zeros(batch_size, self.state_dim) - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - raise NotImplementedError diff --git a/vel/rl/api/policy.py b/vel/rl/api/policy.py new file mode 100644 index 00000000..34082364 --- /dev/null +++ b/vel/rl/api/policy.py @@ -0,0 +1,22 @@ +import torch +from vel.api import Model + + +class Policy(Model): + """ Base class for reinforcement learning policies """ + + def act(self, observation, state=None, deterministic=False) -> dict: + """ Make an action based on the observation from the environment. """ + raise NotImplementedError + + def value(self, observation, state=None) -> torch.tensor: + """ Return the expected reward from current state """ + return self.act(observation=observation, state=state)['value'] + + def reset_state(self, state, dones): + """ Reset the state after the episode has been terminated """ + raise NotImplementedError + + def evaluate(self, rollout) -> object: + """ Return an evaluator object evaluating given rollout that may be used for gradient computations etc. """ + raise NotImplementedError diff --git a/vel/rl/api/rollout.py b/vel/rl/api/rollout.py index 4acf8cbb..4e7695d7 100644 --- a/vel/rl/api/rollout.py +++ b/vel/rl/api/rollout.py @@ -176,7 +176,7 @@ def frames(self): """ Number of frames in rollout """ return self.num_steps * self.num_envs - def to_device(self, device, non_blocking=True): + def to_device(self, device, non_blocking=False): """ Move a rollout to a selected device """ return Trajectories( num_steps=self.num_steps, diff --git a/vel/rl/model/__init__.py b/vel/rl/backbone/__init__.py similarity index 100% rename from vel/rl/model/__init__.py rename to vel/rl/backbone/__init__.py diff --git a/vel/rl/model/backbone/double_nature_cnn.py b/vel/rl/backbone/double_nature_cnn.py similarity index 100% rename from vel/rl/model/backbone/double_nature_cnn.py rename to vel/rl/backbone/double_nature_cnn.py diff --git a/vel/rl/model/backbone/double_noisy_nature_cnn.py b/vel/rl/backbone/double_noisy_nature_cnn.py similarity index 100% rename from vel/rl/model/backbone/double_noisy_nature_cnn.py rename to vel/rl/backbone/double_noisy_nature_cnn.py diff --git a/vel/rl/model/backbone/lstm.py b/vel/rl/backbone/lstm.py similarity index 100% rename from vel/rl/model/backbone/lstm.py rename to vel/rl/backbone/lstm.py diff --git a/vel/rl/model/backbone/mlp.py b/vel/rl/backbone/mlp.py similarity index 100% rename from vel/rl/model/backbone/mlp.py rename to vel/rl/backbone/mlp.py diff --git a/vel/rl/model/backbone/nature_cnn.py b/vel/rl/backbone/nature_cnn.py similarity index 100% rename from vel/rl/model/backbone/nature_cnn.py rename to vel/rl/backbone/nature_cnn.py diff --git a/vel/rl/model/backbone/nature_cnn_rnn.py b/vel/rl/backbone/nature_cnn_rnn.py similarity index 83% rename from vel/rl/model/backbone/nature_cnn_rnn.py rename to vel/rl/backbone/nature_cnn_rnn.py index 9662a444..8888da6e 100644 --- a/vel/rl/model/backbone/nature_cnn_rnn.py +++ b/vel/rl/backbone/nature_cnn_rnn.py @@ -1,5 +1,5 @@ from vel.api import LinearBackboneModel, ModelFactory -from vel.rl.model.backbone.nature_cnn import NatureCnn +from vel.rl.backbone.nature_cnn import NatureCnn from vel.module.rnn_cell import RnnCell @@ -31,6 +31,15 @@ def state_dim(self) -> int: """ Initial state of the network """ return self.rnn_cell.state_dim + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return True + + def zero_state(self, batch_size): + """ Potential state for the model """ + return self.rnn_cell.zero_state(batch_size) + def forward(self, input_image, state): cnn_output = self.nature_cnn(input_image) hidden_state, new_state = self.rnn_cell(cnn_output, state) diff --git a/vel/rl/model/backbone/nature_cnn_small.py b/vel/rl/backbone/nature_cnn_small.py similarity index 100% rename from vel/rl/model/backbone/nature_cnn_small.py rename to vel/rl/backbone/nature_cnn_small.py diff --git a/vel/rl/model/backbone/noisy_nature_cnn.py b/vel/rl/backbone/noisy_nature_cnn.py similarity index 100% rename from vel/rl/model/backbone/noisy_nature_cnn.py rename to vel/rl/backbone/noisy_nature_cnn.py diff --git a/vel/rl/env_roller/step_env_roller.py b/vel/rl/env_roller/step_env_roller.py index a25cecfe..c4ec4700 100644 --- a/vel/rl/env_roller/step_env_roller.py +++ b/vel/rl/env_roller/step_env_roller.py @@ -1,8 +1,10 @@ import torch import numpy as np -from vel.api import BatchInfo, Model -from vel.rl.api import Trajectories, Rollout, EnvRollerBase, EnvRollerFactoryBase +from vel.api import BatchInfo +from vel.openai.baselines.common.vec_env import VecEnv +from vel.rl.api import Trajectories, Rollout, EnvRollerBase, EnvRollerFactoryBase, Policy +from vel.rl.util.actor import PolicyActor from vel.util.tensor_accumulator import TensorAccumulator @@ -11,15 +13,14 @@ class StepEnvRoller(EnvRollerBase): Class calculating env rollouts. """ - def __init__(self, environment, device): + def __init__(self, environment: VecEnv, policy: Policy, device: torch.device): self._environment = environment self.device = device # Initial observation - kept on CPU self.last_observation = torch.from_numpy(self.environment.reset()).clone() - # Relevant for RNN policies - kept on DEVICE - self.hidden_state = None + self.actor = PolicyActor(self.environment.num_envs, policy, device) @property def environment(self): @@ -27,26 +28,17 @@ def environment(self): return self._environment @torch.no_grad() - def rollout(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> Rollout: + def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Calculate env rollout """ accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information - if self.hidden_state is None and model.is_stateful: - self.hidden_state = model.zero_state(self.last_observation.size(0)).to(self.device) - - # Remember rollout initial state, we'll use that for training as well - initial_hidden_state = self.hidden_state - for step_idx in range(number_of_steps): - if model.is_stateful: - step = model.step(self.last_observation.to(self.device), state=self.hidden_state) - self.hidden_state = step['state'] - else: - step = model.step(self.last_observation.to(self.device)) + step = self.actor.act(self.last_observation.to(self.device)) # Add step to the tensor accumulator for name, tensor in step.items(): + # Take not that here we convert all the tensors to CPU accumulator.add(name, tensor.cpu()) accumulator.add('observations', self.last_observation) @@ -57,22 +49,16 @@ def rollout(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> # Done is flagged true when the episode has ended AND the frame we see is already a first frame from the # next episode dones_tensor = torch.from_numpy(new_dones.astype(np.float32)).clone() - accumulator.add('dones', dones_tensor) self.last_observation = torch.from_numpy(new_obs).clone() + self.actor.reset_states(dones_tensor) - if model.is_stateful: - # Zero out state in environments that have finished - self.hidden_state = self.hidden_state * (1.0 - dones_tensor.unsqueeze(-1)).to(self.device) - + accumulator.add('dones', dones_tensor) accumulator.add('rewards', torch.from_numpy(new_rewards.astype(np.float32)).clone()) episode_information.append(new_infos) - if model.is_stateful: - final_values = model.value(self.last_observation.to(self.device), state=self.hidden_state).cpu() - else: - final_values = model.value(self.last_observation.to(self.device)).cpu() + final_values = self.actor.value(self.last_observation.to(self.device)).cpu() accumulated_tensors = accumulator.result() @@ -82,7 +68,6 @@ def rollout(self, batch_info: BatchInfo, model: Model, number_of_steps: int) -> environment_information=episode_information, transition_tensors=accumulated_tensors, rollout_tensors={ - 'initial_hidden_state': initial_hidden_state.cpu() if initial_hidden_state is not None else None, 'final_values': final_values } ) @@ -93,9 +78,10 @@ class StepEnvRollerFactory(EnvRollerFactoryBase): def __init__(self): pass - def instantiate(self, environment, device): + def instantiate(self, environment, policy, device): return StepEnvRoller( environment=environment, + policy=policy, device=device, ) diff --git a/vel/rl/module/action_head.py b/vel/rl/module/stochastic_action_head.py similarity index 96% rename from vel/rl/module/action_head.py rename to vel/rl/module/stochastic_action_head.py index 8cd6b6ba..2d54cbab 100644 --- a/vel/rl/module/action_head.py +++ b/vel/rl/module/stochastic_action_head.py @@ -32,12 +32,12 @@ def forward(self, input_data): return torch.stack([means, log_std_tile], dim=-1) - def sample(self, params, argmax_sampling=False): + def sample(self, params, deterministic=False): """ Sample from a probability space of all actions """ means = params[:, :, 0] log_std = params[:, :, 1] - if argmax_sampling: + if deterministic: return means else: return torch.randn_like(means) * torch.exp(log_std) + means @@ -105,11 +105,12 @@ def logprob(self, actions, action_logits): neg_log_prob = F.nll_loss(action_logits, actions, reduction='none') return -neg_log_prob - def sample(self, logits, argmax_sampling=False): + def sample(self, logits, deterministic=False): """ Sample from a probability space of all actions """ - if argmax_sampling: + if deterministic: return torch.argmax(logits, dim=-1) else: + # Gumbel-softmax trick u = torch.rand_like(logits) return torch.argmax(logits - torch.log(-torch.log(u)), dim=-1) @@ -133,7 +134,7 @@ def kl_divergence(self, logits_q, logits_p): return (torch.exp(logits_q) * (logits_q - logits_p)).sum(1, keepdim=True) -class ActionHead(nn.Module): +class StochasticActionHead(nn.Module): """ Network head for action determination. Returns probability distribution parametrization """ diff --git a/vel/rl/model/backbone/__init__.py b/vel/rl/policy/__init__.py similarity index 100% rename from vel/rl/model/backbone/__init__.py rename to vel/rl/policy/__init__.py diff --git a/vel/rl/policy/purgatory/__init__.py b/vel/rl/policy/purgatory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/rl/model/deterministic_policy_model.py b/vel/rl/policy/purgatory/deterministic_policy.py similarity index 100% rename from vel/rl/model/deterministic_policy_model.py rename to vel/rl/policy/purgatory/deterministic_policy.py diff --git a/vel/rl/model/q_distributional_model.py b/vel/rl/policy/purgatory/q_distributional_policy.py similarity index 100% rename from vel/rl/model/q_distributional_model.py rename to vel/rl/policy/purgatory/q_distributional_policy.py diff --git a/vel/rl/model/q_dueling_model.py b/vel/rl/policy/purgatory/q_dueling_policy.py similarity index 100% rename from vel/rl/model/q_dueling_model.py rename to vel/rl/policy/purgatory/q_dueling_policy.py diff --git a/vel/rl/model/q_model.py b/vel/rl/policy/purgatory/q_model.py similarity index 100% rename from vel/rl/model/q_model.py rename to vel/rl/policy/purgatory/q_model.py diff --git a/vel/rl/model/q_noisy_model.py b/vel/rl/policy/purgatory/q_noisy_model.py similarity index 100% rename from vel/rl/model/q_noisy_model.py rename to vel/rl/policy/purgatory/q_noisy_model.py diff --git a/vel/rl/model/q_rainbow_model.py b/vel/rl/policy/purgatory/q_rainbow_model.py similarity index 100% rename from vel/rl/model/q_rainbow_model.py rename to vel/rl/policy/purgatory/q_rainbow_model.py diff --git a/vel/rl/model/q_stochastic_policy_model.py b/vel/rl/policy/purgatory/q_stochastic_policy_model.py similarity index 94% rename from vel/rl/model/q_stochastic_policy_model.py rename to vel/rl/policy/purgatory/q_stochastic_policy_model.py index 5cf97893..c489980d 100644 --- a/vel/rl/model/q_stochastic_policy_model.py +++ b/vel/rl/policy/purgatory/q_stochastic_policy_model.py @@ -5,7 +5,7 @@ from vel.api import LinearBackboneModel, Model, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Evaluator -from vel.rl.module.action_head import ActionHead +from vel.rl.module.action_head import StochasticActionHead from vel.rl.module.q_head import QHead @@ -43,7 +43,7 @@ def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, ac self.input_block = input_block self.backbone = backbone - self.action_head = ActionHead( + self.action_head = StochasticActionHead( input_dim=self.backbone.output_dim, action_space=action_space ) @@ -71,10 +71,10 @@ def forward(self, observations): return policy_params, q - def step(self, observation, argmax_sampling=False): + def step(self, observation, deterministic=False): """ Select actions based on model's output """ policy_params, q = self(observation) - actions = self.action_head.sample(policy_params, argmax_sampling=argmax_sampling) + actions = self.action_head.sample(policy_params, deterministic=deterministic) # log probability - we can do that, because we support only discrete action spaces logprobs = self.action_head.logprob(actions, policy_params) diff --git a/vel/rl/model/stochastic_policy_model.py b/vel/rl/policy/purgatory/stochastic_policy.py similarity index 94% rename from vel/rl/model/stochastic_policy_model.py rename to vel/rl/policy/purgatory/stochastic_policy.py index cee084b6..1788ffc6 100644 --- a/vel/rl/model/stochastic_policy_model.py +++ b/vel/rl/policy/purgatory/stochastic_policy.py @@ -4,7 +4,7 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Evaluator, RlModel -from vel.rl.module.action_head import ActionHead +from vel.rl.module.action_head import StochasticActionHead from vel.rl.module.value_head import ValueHead @@ -43,7 +43,7 @@ def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, ac self.input_block = input_block self.backbone = backbone - self.action_head = ActionHead( + self.action_head = StochasticActionHead( action_space=action_space, input_dim=self.backbone.output_dim ) @@ -67,10 +67,10 @@ def forward(self, observations): return action_output, value_output - def step(self, observation, argmax_sampling=False): + def step(self, observation, deterministic=False): """ Select actions based on model's output """ action_pd_params, value_output = self(observation) - actions = self.action_head.sample(action_pd_params, argmax_sampling=argmax_sampling) + actions = self.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action logprobs = self.action_head.logprob(actions, action_pd_params) diff --git a/vel/rl/model/stochastic_policy_model_separate.py b/vel/rl/policy/purgatory/stochastic_policy_model_separate.py similarity index 94% rename from vel/rl/model/stochastic_policy_model_separate.py rename to vel/rl/policy/purgatory/stochastic_policy_model_separate.py index 7612fde3..3044459e 100644 --- a/vel/rl/model/stochastic_policy_model_separate.py +++ b/vel/rl/policy/purgatory/stochastic_policy_model_separate.py @@ -5,7 +5,7 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, RlModel, Evaluator -from vel.rl.module.action_head import ActionHead +from vel.rl.module.action_head import StochasticActionHead from vel.rl.module.value_head import ValueHead from vel.rl.model.stochastic_policy_model import StochasticPolicyEvaluator @@ -24,7 +24,7 @@ def __init__(self, input_block: BackboneModel, self.policy_backbone = policy_backbone self.value_backbone = value_backbone - self.action_head = ActionHead( + self.action_head = StochasticActionHead( action_space=action_space, input_dim=self.policy_backbone.output_dim ) @@ -53,10 +53,10 @@ def forward(self, observations): return action_output, value_output - def step(self, observation, argmax_sampling=False): + def step(self, observation, deterministic=False): """ Select actions based on model's output """ policy_params, values = self(observation) - actions = self.action_head.sample(policy_params, argmax_sampling=argmax_sampling) + actions = self.action_head.sample(policy_params, deterministic=deterministic) # log likelihood of selected action logprobs = self.action_head.logprob(actions, policy_params) diff --git a/vel/rl/model/stochastic_policy_rnn_model.py b/vel/rl/policy/purgatory/stochastic_rnn_policy.py similarity index 95% rename from vel/rl/model/stochastic_policy_rnn_model.py rename to vel/rl/policy/purgatory/stochastic_rnn_policy.py index 7147a381..25551144 100644 --- a/vel/rl/model/stochastic_policy_rnn_model.py +++ b/vel/rl/policy/purgatory/stochastic_rnn_policy.py @@ -5,7 +5,7 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Trajectories, Evaluator, RlRnnModel -from vel.rl.module.action_head import ActionHead +from vel.rl.module.action_head import StochasticActionHead from vel.rl.module.value_head import ValueHead @@ -61,7 +61,7 @@ def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, self.input_block = input_block self.backbone = backbone - self.action_head = ActionHead( + self.action_head = StochasticActionHead( action_space=action_space, input_dim=self.backbone.output_dim ) @@ -91,10 +91,10 @@ def forward(self, observations, state): return action_output, value_output, new_state - def step(self, observations, state, argmax_sampling=False): + def step(self, observations, state, deterministic=False): """ Select actions based on model's output """ action_pd_params, value_output, new_state = self(observations, state) - actions = self.action_head.sample(action_pd_params, argmax_sampling=argmax_sampling) + actions = self.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action logprobs = self.action_head.logprob(actions, action_pd_params) diff --git a/vel/rl/policy/stochastic_policy.py b/vel/rl/policy/stochastic_policy.py new file mode 100644 index 00000000..6ecabc12 --- /dev/null +++ b/vel/rl/policy/stochastic_policy.py @@ -0,0 +1,121 @@ +import gym +import torch +import typing + +from vel.api import LinearBackboneModel, ModelFactory, BackboneModel +from vel.module.input.identity import IdentityFactory +from vel.rl.api import Rollout, Evaluator, Policy +from vel.rl.module.stochastic_action_head import StochasticActionHead +from vel.rl.module.value_head import ValueHead + + +class StochasticPolicyEvaluator(Evaluator): + """ Evaluator for a policy gradient model """ + + def __init__(self, model: 'StochasticPolicy', rollout: Rollout): + super().__init__(rollout) + + self.model = model + + pd_params, estimated_values = model(self.rollout.batch_tensor('observations')) + + self.provide('model:pd_params', pd_params) + self.provide('model:values', estimated_values) + + @Evaluator.provides('model:action:logprobs') + def model_action_logprobs(self): + actions = self.get('rollout:actions') + pd_params = self.get('model:pd_params') + return self.model.action_head.logprob(actions, pd_params) + + @Evaluator.provides('model:entropy') + def model_entropy(self): + pd_params = self.get('model:pd_params') + return self.model.action_head.entropy(pd_params) + + +class StochasticPolicy(Policy): + """ + Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone + """ + + def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space): + super().__init__() + + self.input_block = input_block + self.backbone = backbone + + assert not self.backbone.is_stateful, "Backbone shouldn't have state" + + self.action_head = StochasticActionHead( + action_space=action_space, + input_dim=self.backbone.output_dim + ) + + self.value_head = ValueHead( + input_dim=self.backbone.output_dim + ) + + def reset_weights(self): + """ Initialize properly model weights """ + self.input_block.reset_weights() + self.backbone.reset_weights() + self.action_head.reset_weights() + self.value_head.reset_weights() + + def forward(self, observation): + """ Calculate model outputs """ + input_data = self.input_block(observation) + + base_output = self.backbone(input_data) + + action_output = self.action_head(base_output) + value_output = self.value_head(base_output) + + return action_output, value_output + + def act(self, observation, state=None, deterministic=False): + """ Select actions based on model's output """ + action_pd_params, value_output = self(observation) + actions = self.action_head.sample(action_pd_params, deterministic=deterministic) + + # log likelihood of selected action + logprobs = self.action_head.logprob(actions, action_pd_params) + + return { + 'actions': actions, + 'values': value_output, + 'action:logprobs': logprobs + } + + def value(self, observation, state=None) -> torch.tensor: + """ Calculate value only - small optimization """ + input_data = self.input_block(observation) + base_output = self.backbone(input_data) + return self.value_head(base_output) + + def evaluate(self, rollout: Rollout) -> Evaluator: + """ Evaluate model on a rollout """ + return StochasticPolicyEvaluator(self, rollout) + + +class StochasticPolicyFactory(ModelFactory): + """ Factory class for policy gradient models """ + def __init__(self, input_block: IdentityFactory, backbone: ModelFactory): + self.backbone = backbone + self.input_block = input_block + + def instantiate(self, **extra_args): + """ Instantiate the model """ + input_block = self.input_block.instantiate() + backbone = self.backbone.instantiate(**extra_args) + + return StochasticPolicy(input_block, backbone, extra_args['action_space']) + + +def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): + """ Vel factory function """ + if input_block is None: + input_block = IdentityFactory() + + return StochasticPolicyFactory(input_block=input_block, backbone=backbone) diff --git a/vel/rl/policy/stochastic_rnn_policy.py b/vel/rl/policy/stochastic_rnn_policy.py new file mode 100644 index 00000000..a1a87f2e --- /dev/null +++ b/vel/rl/policy/stochastic_rnn_policy.py @@ -0,0 +1,156 @@ +import gym +import torch +import typing + +from vel.api import LinearBackboneModel, ModelFactory, BackboneModel +from vel.module.input.identity import IdentityFactory +from vel.rl.api import Rollout, Trajectories, Evaluator, Policy +from vel.rl.module.stochastic_action_head import StochasticActionHead +from vel.rl.module.value_head import ValueHead + + +class StochasticPolicyRnnEvaluator(Evaluator): + """ Evaluate recurrent model from initial state """ + + def __init__(self, model: 'StochasticRnnPolicy', rollout: Rollout): + assert isinstance(rollout, Trajectories), "For an RNN model, we must evaluate trajectories" + super().__init__(rollout) + + self.model = model + + observation_trajectories = rollout.transition_tensors['observations'] + hidden_state = rollout.transition_tensors['state'][0] # Initial hidden state + + action_accumulator = [] + value_accumulator = [] + + # Evaluate recurrent network step by step + for i in range(observation_trajectories.size(0)): + action_output, value_output, hidden_state = model(observation_trajectories[i], hidden_state) + action_accumulator.append(action_output) + value_accumulator.append(value_output) + + policy_params = torch.cat(action_accumulator, dim=0) + estimated_values = torch.cat(value_accumulator, dim=0) + + self.provide('model:policy_params', policy_params) + self.provide('model:values', estimated_values) + + @Evaluator.provides('model:action:logprobs') + def model_action_logprobs(self): + actions = self.get('rollout:actions') + policy_params = self.get('model:policy_params') + return self.model.action_head.logprob(actions, policy_params) + + @Evaluator.provides('model:entropy') + def model_entropy(self): + policy_params = self.get('model:policy_params') + return self.model.action_head.entropy(policy_params) + + +class StochasticRnnPolicy(Policy): + """ + Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone + RNN version + """ + + def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, + action_space: gym.Space): + super().__init__() + + self.input_block = input_block + self.backbone = backbone + + assert self.backbone.is_stateful, "Must have a stateful backbone" + + self.action_head = StochasticActionHead( + action_space=action_space, + input_dim=self.backbone.output_dim + ) + self.value_head = ValueHead(input_dim=self.backbone.output_dim) + + assert self.backbone.is_stateful, "Backbone must be a recurrent model" + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return True + + def reset_weights(self): + """ Initialize properly model weights """ + self.input_block.reset_weights() + self.backbone.reset_weights() + self.action_head.reset_weights() + self.value_head.reset_weights() + + def forward(self, observations, state): + """ Calculate model outputs """ + input_data = self.input_block(observations) + base_output, new_state = self.backbone(input_data, state=state) + + action_output = self.action_head(base_output) + value_output = self.value_head(base_output) + + return action_output, value_output, new_state + + def act(self, observation, state=None, deterministic=False) -> dict: + """ Select actions based on model's output """ + action_pd_params, value_output, new_state = self(observation, state) + actions = self.action_head.sample(action_pd_params, deterministic=deterministic) + + # log likelihood of selected action + logprobs = self.action_head.logprob(actions, action_pd_params) + + return { + 'actions': actions, + 'values': value_output, + 'action:logprobs': logprobs, + 'state': new_state + } + + def evaluate(self, rollout: Rollout) -> Evaluator: + """ Evaluate model on a rollout """ + return StochasticPolicyRnnEvaluator(self, rollout) + + def value(self, observation, state=None): + """ Calculate only value head for given state """ + input_data = self.input_block(observation) + + base_output, new_state = self.backbone(input_data, state) + value_output = self.value_head(base_output) + + return value_output + + def reset_state(self, state, dones): + """ Reset the state after the episode has been terminated """ + if (dones > 0).any().item(): + zero_state = self.backbone.zero_state(dones.shape[0]).to(state.device) + dones_expanded = dones.unsqueeze(-1) + return state * (1 - dones_expanded) + zero_state * dones_expanded + else: + return state + + +class StochasticRnnPolicyFactory(ModelFactory): + """ Factory class for policy gradient models """ + def __init__(self, input_block: ModelFactory, backbone: ModelFactory): + self.input_block = input_block + self.backbone = backbone + + def instantiate(self, **extra_args): + """ Instantiate the model """ + input_block = self.input_block.instantiate() + backbone = self.backbone.instantiate(**extra_args) + + return StochasticRnnPolicy(input_block, backbone, extra_args['action_space']) + + +def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): + """ Vel factory function """ + if input_block is None: + input_block = IdentityFactory() + + return StochasticRnnPolicyFactory( + input_block=input_block, + backbone=backbone + ) diff --git a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py index 8f1888a6..c2d97e31 100644 --- a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py @@ -115,7 +115,7 @@ def on_policy_train_batch(self, batch_info: BatchInfo): rollout = self.env_roller.rollout(batch_info, self.model, self.settings.number_of_steps).to_device(self.device) - batch_result = self.algo.optimizer_step( + batch_result = self.algo.optimize( batch_info=batch_info, device=self.device, model=self.model, @@ -132,7 +132,7 @@ def off_policy_train_batch(self, batch_info: BatchInfo): rollout = self.env_roller.sample(batch_info, self.model, self.settings.number_of_steps).to_device(self.device) - batch_result = self.algo.optimizer_step( + batch_result = self.algo.optimize( batch_info=batch_info, device=self.device, model=self.model, diff --git a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py index d3ce3349..c2ec6fe8 100644 --- a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py @@ -146,7 +146,7 @@ def train_on_replay_memory(self, batch_info): for i in range(self.settings.training_rounds): sampled_rollout = self.env_roller.sample(batch_info, self.model, self.settings.training_steps) - batch_result = self.algo.optimizer_step( + batch_result = self.algo.optimize( batch_info=batch_info, device=self.device, model=self.model, diff --git a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py index 46c7d450..d889c681 100644 --- a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py @@ -5,7 +5,10 @@ import tqdm from vel.api import Model, ModelFactory, TrainingInfo, EpochInfo, BatchInfo -from vel.rl.api import ReinforcerBase, ReinforcerFactory, VecEnvFactory, EnvRollerFactoryBase, EnvRollerBase, AlgoBase +from vel.rl.api import ( + ReinforcerBase, ReinforcerFactory, VecEnvFactory, EnvRollerFactoryBase, EnvRollerBase, AlgoBase, + Policy +) from vel.rl.metrics import ( FPSMetric, EpisodeLengthMetric, EpisodeRewardMetricQuantile, EpisodeRewardMetric, FramesMetric @@ -32,16 +35,16 @@ class OnPolicyIterationReinforcer(ReinforcerBase): A reinforcer that calculates on-policy environment rollouts and uses them to train policy directly. May split the sample into multiple batches and may replay batches a few times. """ - def __init__(self, device: torch.device, settings: OnPolicyIterationReinforcerSettings, model: Model, + def __init__(self, device: torch.device, settings: OnPolicyIterationReinforcerSettings, policy: Policy, algo: AlgoBase, env_roller: EnvRollerBase) -> None: self.device = device self.settings = settings - self._trained_model = model.to(self.device) - self.env_roller = env_roller self.algo = algo + self._trained_model = policy.to(self.device) + def metrics(self) -> list: """ List of metrics to track for this learning process """ my_metrics = [ @@ -53,7 +56,7 @@ def metrics(self) -> list: EpisodeLengthMetric("episode_length"), ] - return my_metrics + self.algo.metrics() + self.env_roller.metrics() + return my_metrics + self.algo.metrics() + self.env_roller.metrics() + self.model.metrics() @property def model(self) -> Model: @@ -102,7 +105,7 @@ def train_batch(self, batch_info: BatchInfo) -> None: # Calculate environment rollout on the evaluation version of the model self.model.train() - rollout = self.env_roller.rollout(batch_info, self.model, self.settings.number_of_steps) + rollout = self.env_roller.rollout(batch_info, self.settings.number_of_steps) # Process rollout by the 'algo' (e.g. perform the advantage estimation) rollout = self.algo.process_rollout(batch_info, rollout) @@ -124,7 +127,7 @@ def train_batch(self, batch_info: BatchInfo) -> None: for i in range(experience_replay_count): # We may potentially need to split rollout into multiple batches if self.settings.batch_size >= rollout.frames(): - batch_result = self.algo.optimizer_step( + batch_result = self.algo.optimize( batch_info=batch_info, device=self.device, model=self.model, @@ -135,7 +138,7 @@ def train_batch(self, batch_info: BatchInfo) -> None: else: # Rollout too big, need to split in batches for batch_rollout in rollout.shuffled_batches(self.settings.batch_size): - batch_result = self.algo.optimizer_step( + batch_result = self.algo.optimize( batch_info=batch_info, device=self.device, model=self.model, @@ -166,10 +169,9 @@ def __init__(self, settings, parallel_envs: int, env_factory: VecEnvFactory, mod def instantiate(self, device: torch.device) -> ReinforcerBase: env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, seed=self.seed) - env_roller = self.env_roller_factory.instantiate(environment=env, device=device) - model = self.model_factory.instantiate(action_space=env.action_space) - - return OnPolicyIterationReinforcer(device, self.settings, model, self.algo, env_roller) + policy = self.model_factory.instantiate(action_space=env.action_space) + env_roller = self.env_roller_factory.instantiate(environment=env, policy=policy, device=device) + return OnPolicyIterationReinforcer(device, self.settings, policy, self.algo, env_roller) def create(model_config, model, vec_env, algo, env_roller, parallel_envs, number_of_steps, diff --git a/vel/rl/test/test_integration.py b/vel/rl/test/test_integration.py index 2f51b419..b0433ac4 100644 --- a/vel/rl/test/test_integration.py +++ b/vel/rl/test/test_integration.py @@ -27,8 +27,8 @@ from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory from vel.rl.model.stochastic_policy_model_separate import StochasticPolicyModelSeparateFactory -from vel.rl.model.backbone.nature_cnn import NatureCnnFactory -from vel.rl.model.backbone.mlp import MLPFactory +from vel.rl.backbone.nature_cnn import NatureCnnFactory +from vel.rl.backbone.mlp import MLPFactory from vel.rl.reinforcer.on_policy_iteration_reinforcer import ( OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings diff --git a/vel/rl/util/actor.py b/vel/rl/util/actor.py new file mode 100644 index 00000000..a395bb53 --- /dev/null +++ b/vel/rl/util/actor.py @@ -0,0 +1,36 @@ +import torch + +from vel.rl.api import Policy +from vel.util.tensor_util import to_device + + +class PolicyActor: + """ Evaluates policy on a fixed set of environments. Additionally tracks the state """ + + def __init__(self, num_envs: int, policy: Policy, device: torch.device): + self.num_envs = num_envs + self.policy = policy + self.device = device + self.state = to_device(self.policy.zero_state(num_envs), self.device) + + def act(self, observation, advance_state=True, deterministic=False): + """ Return result of a policy on a given input """ + result = self.policy.act(observation, state=self.state, deterministic=deterministic) + + if self.policy.is_stateful and advance_state: + self.state = result['state'] + + return result + + def reset_states(self, dones): + """ Reset states given dones """ + if not self.policy.is_stateful: + return + + dones = dones.to(self.device) + + self.state = self.policy.reset_state(self.state, dones) + + def value(self, observation): + """ Return value for provided observations """ + return self.policy.value(observation, state=self.state) diff --git a/vel/storage/streaming/tensorboard.py b/vel/storage/streaming/tensorboard.py new file mode 100644 index 00000000..ef90b267 --- /dev/null +++ b/vel/storage/streaming/tensorboard.py @@ -0,0 +1,40 @@ +import os +import shutil + +from vel.api import ModelConfig, Callback, TrainingInfo +from torch.utils.tensorboard import SummaryWriter + + +class TensorboardStreaming(Callback): + """ Stream results to tensorboard """ + + def __init__(self, model_config: ModelConfig): + self.model_config = model_config + self.logdir = self.model_config.output_dir('tensorboard', self.model_config.run_name) + + def on_train_begin(self, training_info: TrainingInfo) -> None: + """ Potentially cleanup previous runs """ + if training_info.start_epoch_idx == 0: + if os.path.exists(self.logdir): + shutil.rmtree(self.logdir) + + def on_epoch_end(self, epoch_info): + """ Push data to tensorboard on push """ + summary_writer = SummaryWriter(log_dir=self.logdir) + + for key, value in epoch_info.result.items(): + if key == 'epoch_idx': + continue + + summary_writer.add_scalar( + tag=key, + scalar_value=value, + global_step=epoch_info.global_epoch_idx, + ) + + summary_writer.close() + + +def create(model_config): + """ Vel factory function """ + return TensorboardStreaming(model_config) diff --git a/vel/util/tensor_util.py b/vel/util/tensor_util.py index 06e4dd7c..b0683099 100644 --- a/vel/util/tensor_util.py +++ b/vel/util/tensor_util.py @@ -16,3 +16,17 @@ def merge_first_two_dims(tensor): batch_size = shape[0] * shape[1] new_shape = tuple([batch_size] + list(shape[2:])) return tensor.view(new_shape) + + +def to_device(tensor, device: torch.device): + """ Convert tensor-like object to given PyTorch device """ + if tensor is None: + return tensor + elif isinstance(tensor, torch.Tensor): + return tensor.to(device) + elif isinstance(tensor, dict): + return {k: to_device(v, device) for k, v in tensor.items()} + elif isinstance(tensor, list): + return [to_device(v, device) for v in tensor] + else: + raise NotImplementedError From 1b8251a8c4b7ee25fe93c595bb342943ac1ad631 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Sat, 15 Jun 2019 21:29:48 -0700 Subject: [PATCH 044/162] Fixing configuration files. --- README.md | 12 +++++------- examples-configs/rl/atari/atari_a2c_lstm.yaml | 2 +- examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml | 2 +- examples-configs/rl/atari/atari_acer.yaml | 2 +- .../rl/atari/atari_acer_trust_region.yaml | 2 +- examples-configs/rl/atari/atari_ppo_gru.yaml | 2 +- examples-configs/rl/atari/atari_trpo.yaml | 2 +- .../rl/atari/dqn/atari_dqn_distributional.yaml | 2 +- examples-configs/rl/atari/dqn/atari_dqn_raw.yaml | 2 +- .../rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml | 2 +- .../rl/atari/dqn_rainbow_param/atari_rainbow.yaml | 2 +- .../atari_rp_dqn_distributional.yaml | 2 +- .../dqn_rainbow_param/atari_rp_dqn_noisynet.yaml | 2 +- .../atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml | 2 +- .../rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml | 2 +- 15 files changed, 19 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index be618544..fd5d7c42 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ into a structure that is designed to be reused rather than copied over. As a goal, it should be enough to write a config file that wires existing components together and defines their hyperparameters for most common applications. -If that's not the case few bits of custom glue code should do the job. +If that's not the case few bits of custom glue code should do the jobatari. This repository is still in an early stage of that journey but it will grow @@ -55,16 +55,14 @@ pip install -e . ``` from the repository root directory. -This project requires Python at least 3.6 and PyTorch 1.0. +This project requires Python at least 3.6 and PyTorch 1.1. If you want to run YAML config examples, you'll also need a **project configuration file** `.velproject.yaml`. An example is included in this repository. -Default project configuration writes -metrics to MongoDB instance open on localhost port 27017 and Visdom instance -on localhost port 8097. +Default project configuration writes logs to the tensorboard directory `output/tensorboard` +under the main directory. Outputs to visdom and mongodb are also implemented. -If you don't want to run these services, there is included -another example file `.velproject.dummy.yaml` +If you don't want any logging, there is included another example file `.velproject.dummy.yaml` that writes training progress to the standard output only. To use it, just rename it to `.velproject.yaml`. diff --git a/examples-configs/rl/atari/atari_a2c_lstm.yaml b/examples-configs/rl/atari/atari_a2c_lstm.yaml index 36947571..f83f5c50 100644 --- a/examples-configs/rl/atari/atari_a2c_lstm.yaml +++ b/examples-configs/rl/atari/atari_a2c_lstm.yaml @@ -60,7 +60,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'breakout_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: name: vel.rl.commands.evaluate_env_command diff --git a/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml index 3fa29e5b..2786a6a4 100644 --- a/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml +++ b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml @@ -57,7 +57,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'breakout_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: name: vel.rl.commands.evaluate_env_command diff --git a/examples-configs/rl/atari/atari_acer.yaml b/examples-configs/rl/atari/atari_acer.yaml index c488883e..52e47b84 100644 --- a/examples-configs/rl/atari/atari_acer.yaml +++ b/examples-configs/rl/atari/atari_acer.yaml @@ -74,7 +74,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'beamrider_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: name: vel.rl.commands.evaluate_env_command diff --git a/examples-configs/rl/atari/atari_acer_trust_region.yaml b/examples-configs/rl/atari/atari_acer_trust_region.yaml index 99bae873..810a6a51 100644 --- a/examples-configs/rl/atari/atari_acer_trust_region.yaml +++ b/examples-configs/rl/atari/atari_acer_trust_region.yaml @@ -75,7 +75,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'beamrider_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: name: vel.rl.commands.evaluate_env_command diff --git a/examples-configs/rl/atari/atari_ppo_gru.yaml b/examples-configs/rl/atari/atari_ppo_gru.yaml index 0f8c2e6a..43244de9 100644 --- a/examples-configs/rl/atari/atari_ppo_gru.yaml +++ b/examples-configs/rl/atari/atari_ppo_gru.yaml @@ -75,7 +75,7 @@ commands: record: name: vel.rl.command.record_movie_command takes: 10 - videoname: 'breakout_ppo_gru_vid_{:04}.avi' + videoname: 'atari_ppo_gru_vid_{:04}.avi' evaluate: name: vel.rl.command.evaluate_env_command diff --git a/examples-configs/rl/atari/atari_trpo.yaml b/examples-configs/rl/atari/atari_trpo.yaml index e54e6d07..6b363274 100644 --- a/examples-configs/rl/atari/atari_trpo.yaml +++ b/examples-configs/rl/atari/atari_trpo.yaml @@ -78,7 +78,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'breakout_trpo_vid_{:04}.avi' + videoname: 'atari_trpo_vid_{:04}.avi' frame_history: 4 sample_args: argmax_sampling: true diff --git a/examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml b/examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml index b605a75d..dd5b62f3 100644 --- a/examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml +++ b/examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml @@ -83,7 +83,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'breakout_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: name: vel.rl.commands.evaluate_env_command diff --git a/examples-configs/rl/atari/dqn/atari_dqn_raw.yaml b/examples-configs/rl/atari/dqn/atari_dqn_raw.yaml index 31e81b00..a32427bd 100644 --- a/examples-configs/rl/atari/dqn/atari_dqn_raw.yaml +++ b/examples-configs/rl/atari/dqn/atari_dqn_raw.yaml @@ -79,7 +79,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'breakout_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: name: vel.rl.commands.evaluate_env_command diff --git a/examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml b/examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml index 99127352..3084f15a 100644 --- a/examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml +++ b/examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml @@ -89,7 +89,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'breakout_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: name: vel.rl.commands.evaluate_env_command diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml index af99acd8..9e8a92cb 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml @@ -93,7 +93,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'breakout_rainbow_vid_{:04}.avi' + videoname: 'atari_rainbow_vid_{:04}.avi' fps: 15 evaluate: diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml index 57e4fe91..20fabbd6 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml @@ -84,7 +84,7 @@ commands: name: vel.rl.commands.record_movie_command takes: 10 fps: 15 - videoname: 'asterix_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: name: vel.rl.commands.evaluate_env_command diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml index e8174023..822e3085 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml @@ -77,7 +77,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'asterix_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' fps: 15 evaluate: diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml index 89629ac7..af118e3d 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml @@ -83,7 +83,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'atlantis_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' evaluate: name: vel.rl.commands.evaluate_env_command diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml index 5786002b..8e7272b8 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml +++ b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml @@ -79,7 +79,7 @@ commands: record: name: vel.rl.commands.record_movie_command takes: 10 - videoname: 'asterix_vid_{:04}.avi' + videoname: 'atari_vid_{:04}.avi' fps: 15 evaluate: From 30e654e5c8dc99553bb3b014932f268371b933b6 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 20 Jun 2019 17:30:46 -0700 Subject: [PATCH 045/162] Added a few extra input modules. --- vel/module/input/flatten.py | 28 ++++++++++++++++++++++++++++ vel/module/input/sequence.py | 25 +++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 vel/module/input/flatten.py create mode 100644 vel/module/input/sequence.py diff --git a/vel/module/input/flatten.py b/vel/module/input/flatten.py new file mode 100644 index 00000000..d32b06f0 --- /dev/null +++ b/vel/module/input/flatten.py @@ -0,0 +1,28 @@ +from vel.module.layers import Flatten + + +from vel.api import ModelFactory, BackboneModel + + +class FlattenInput(BackboneModel): + """ Sequence input """ + def __init__(self): + super().__init__() + self.model = Flatten() + + def forward(self, input_data): + return self.model(input_data) + + +def create(): + """ Vel factory function """ + def instantiate(**_): + return Flatten() + + return ModelFactory.generic(instantiate) + + +# Scripting interface +FlattenInputFactory = create + + diff --git a/vel/module/input/sequence.py b/vel/module/input/sequence.py new file mode 100644 index 00000000..43d41ad1 --- /dev/null +++ b/vel/module/input/sequence.py @@ -0,0 +1,25 @@ +import torch.nn as nn + +from vel.api import ModelFactory, BackboneModel + + +class SequenceInput(BackboneModel): + """ Sequence input """ + def __init__(self, modules): + super().__init__() + self.model = nn.Sequential(*modules) + + def forward(self, input_data): + return self.model(input_data) + + +def create(modules): + """ Vel factory function """ + def instantiate(**_): + return SequenceInput([f.instantiate() for f in modules]) + + return ModelFactory.generic(instantiate) + + +# Scripting interface +SequenceInputFactory = create From 54352e9727e6934319135f4c1318cc7308b69051 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 20 Jun 2019 17:34:00 -0700 Subject: [PATCH 046/162] Remove blank line. --- vel/module/input/flatten.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/vel/module/input/flatten.py b/vel/module/input/flatten.py index d32b06f0..0972616d 100644 --- a/vel/module/input/flatten.py +++ b/vel/module/input/flatten.py @@ -24,5 +24,3 @@ def instantiate(**_): # Scripting interface FlattenInputFactory = create - - From 99106a6098ae844700311418e9f85f8685ddda39 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 20 Jun 2019 17:41:05 -0700 Subject: [PATCH 047/162] Fixed replay env rollers a bit. --- .../trajectory_replay_env_roller.py | 24 +++++++++++-------- .../transition_replay_env_roller.py | 24 +++++++++++-------- vel/rl/module/test/test_action_head.py | 2 +- vel/rl/util/actor.py | 5 ++++ 4 files changed, 34 insertions(+), 21 deletions(-) diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index 38d1f9aa..e0407a05 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -2,9 +2,11 @@ import numpy as np from vel.api import BatchInfo +from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( - Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, RlModel + Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, Policy ) +from vel.rl.util.actor import PolicyActor from vel.util.tensor_accumulator import TensorAccumulator @@ -15,11 +17,14 @@ class TrajectoryReplayEnvRoller(ReplayEnvRollerBase): Samples trajectories from the replay buffer (consecutive series of frames) """ - def __init__(self, environment, device, replay_buffer: ReplayBuffer): + def __init__(self, environment: VecEnv, policy: Policy, device: torch.device, replay_buffer: ReplayBuffer): self._environment = environment self.device = device self.replay_buffer = replay_buffer + self.actor = PolicyActor(self.environment.num_envs, policy, device) + assert not self.actor.is_stateful, "Does not support stateful policies" + # Initial observation self.last_observation_cpu = torch.from_numpy(self.environment.reset()).clone() self.last_observation = self.last_observation_cpu.to(self.device) @@ -30,15 +35,13 @@ def environment(self): return self._environment @torch.no_grad() - def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout: + def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Calculate env rollout """ - assert not model.is_stateful, "Replay env roller does not support recurrent models" - accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information for step_idx in range(number_of_steps): - step = model.step(self.last_observation) + step = self.actor.act(self.last_observation) replay_extra_information = {} @@ -84,17 +87,17 @@ def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) - environment_information=episode_information, transition_tensors=accumulated_tensors, rollout_tensors={ - 'final_values': model.value(self.last_observation).cpu() + 'final_values': self.actor.value(self.last_observation).cpu() } ) - def sample(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout: + def sample(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Sample experience from replay buffer and return a batch """ # Sample trajectories rollout = self.replay_buffer.sample_trajectories(rollout_length=number_of_steps, batch_info=batch_info) last_observations = rollout.transition_tensors['observations_next'][-1].to(self.device) - final_values = model.value(last_observations).cpu() + final_values = self.actor.value(last_observations).cpu() # Add 'final_values' to the rollout rollout.rollout_tensors['final_values'] = final_values @@ -116,11 +119,12 @@ class TrajectoryReplayEnvRollerFactory(ReplayEnvRollerFactoryBase): def __init__(self, replay_buffer_factory: ReplayBufferFactory): self.replay_buffer_factory = replay_buffer_factory - def instantiate(self, environment, device): + def instantiate(self, environment, policy, device): replay_buffer = self.replay_buffer_factory.instantiate(environment) return TrajectoryReplayEnvRoller( environment=environment, + policy=policy, device=device, replay_buffer=replay_buffer ) diff --git a/vel/rl/env_roller/transition_replay_env_roller.py b/vel/rl/env_roller/transition_replay_env_roller.py index d0ed933b..25c1541a 100644 --- a/vel/rl/env_roller/transition_replay_env_roller.py +++ b/vel/rl/env_roller/transition_replay_env_roller.py @@ -4,10 +4,12 @@ import numpy as np from vel.api import BatchInfo, ModelFactory +from vel.openai.baselines.common.vec_env import VecEnv from vel.openai.baselines.common.running_mean_std import RunningMeanStd from vel.rl.api import ( - Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, RlModel, ReplayBuffer, ReplayBufferFactory + Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, Policy ) +from vel.rl.util.actor import PolicyActor from vel.util.tensor_accumulator import TensorAccumulator @@ -18,9 +20,9 @@ class TransitionReplayEnvRoller(ReplayEnvRollerBase): Samples transitions from the replay buffer (individual frame transitions) """ - def __init__(self, environment, device, replay_buffer: ReplayBuffer, discount_factor: typing.Optional[float] = None, - normalize_returns: bool = False, forward_steps: int = 1, - action_noise: typing.Optional[nn.Module] = None): + def __init__(self, environment: VecEnv, policy: Policy, device: torch.device, replay_buffer: ReplayBuffer, + discount_factor: typing.Optional[float] = None, normalize_returns: bool = False, + forward_steps: int = 1, action_noise: typing.Optional[nn.Module] = None): self._environment = environment self.device = device self.replay_buffer = replay_buffer @@ -29,6 +31,9 @@ def __init__(self, environment, device, replay_buffer: ReplayBuffer, discount_fa self.discount_factor = discount_factor self.action_noise = action_noise.to(self.device) if action_noise is not None else None + self.actor = PolicyActor(self.environment.num_envs, policy, device) + assert not self.actor.is_stateful, "Does not support stateful policies" + if self.normalize_returns: assert self.discount_factor is not None, \ "TransitionReplayEnvRoller must have a discount factor defined if normalize_returns is turned on" @@ -53,15 +58,13 @@ def environment(self): return self._environment @torch.no_grad() - def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout: + def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Calculate env rollout """ - assert not model.is_stateful, "Replay env roller does not support stateful models" - accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information for step_idx in range(number_of_steps): - step = model.step(self.last_observation) + step = self.actor.act(self.last_observation) if self.action_noise is not None: step['actions'] = self.action_noise(step['actions'], batch_info=batch_info) @@ -124,7 +127,7 @@ def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) - rollout_tensors={} ).to_transitions() - def sample(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout: + def sample(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Sample experience from replay buffer and return a batch """ if self.forward_steps > 1: transitions = self.replay_buffer.sample_forward_transitions( @@ -166,7 +169,7 @@ def __init__(self, replay_buffer_factory: ReplayBufferFactory, discount_factor: self.discount_factor = discount_factor self.action_noise_factory = action_noise - def instantiate(self, environment, device): + def instantiate(self, environment, policy, device): replay_buffer = self.replay_buffer_factory.instantiate(environment) if self.action_noise_factory is None: @@ -176,6 +179,7 @@ def instantiate(self, environment, device): return TransitionReplayEnvRoller( environment=environment, + policy=policy, device=device, replay_buffer=replay_buffer, discount_factor=self.discount_factor, diff --git a/vel/rl/module/test/test_action_head.py b/vel/rl/module/test/test_action_head.py index 6dc22e06..b0364e5c 100644 --- a/vel/rl/module/test/test_action_head.py +++ b/vel/rl/module/test/test_action_head.py @@ -7,7 +7,7 @@ import torch.nn.functional as F import torch.distributions as d -from vel.rl.module.action_head import DiagGaussianActionHead, CategoricalActionHead +from vel.rl.module.stochastic_action_head import DiagGaussianActionHead, CategoricalActionHead def test_sample_diag_gaussian(): diff --git a/vel/rl/util/actor.py b/vel/rl/util/actor.py index a395bb53..43fc4b80 100644 --- a/vel/rl/util/actor.py +++ b/vel/rl/util/actor.py @@ -34,3 +34,8 @@ def reset_states(self, dones): def value(self, observation): """ Return value for provided observations """ return self.policy.value(observation, state=self.state) + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return self.policy.is_stateful From 772f6bcd9e3d81798c3b6a9e0152eda07a7f6774 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 20 Jun 2019 17:47:04 -0700 Subject: [PATCH 048/162] Fixed integration tests a bit for the time being. --- vel/rl/test/test_integration.py | 942 ++++++++++++++++---------------- vel/rl/util/actor.py | 2 +- 2 files changed, 473 insertions(+), 471 deletions(-) diff --git a/vel/rl/test/test_integration.py b/vel/rl/test/test_integration.py index b0433ac4..912debf1 100644 --- a/vel/rl/test/test_integration.py +++ b/vel/rl/test/test_integration.py @@ -21,11 +21,11 @@ from vel.rl.vecenv.subproc import SubprocVecEnvWrapper from vel.rl.vecenv.dummy import DummyVecEnvWrapper -from vel.rl.model.stochastic_policy_model import StochasticPolicyModelFactory -from vel.rl.model.q_stochastic_policy_model import QStochasticPolicyModelFactory -from vel.rl.model.q_model import QModelFactory -from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory -from vel.rl.model.stochastic_policy_model_separate import StochasticPolicyModelSeparateFactory +from vel.rl.policy.stochastic_policy import StochasticPolicyFactory +# from vel.rl.model.q_stochastic_policy_model import QStochasticPolicyModelFactory +# from vel.rl.model.q_model import QModelFactory +# from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory +# from vel.rl.model.stochastic_policy_model_separate import StochasticPolicyModelSeparateFactory from vel.rl.backbone.nature_cnn import NatureCnnFactory from vel.rl.backbone.mlp import MLPFactory @@ -73,7 +73,7 @@ def test_a2c_breakout(): # Again, use a helper to create a model # But because model is owned by the reinforcer, model should not be accessed using this variable # but from reinforcer.model property - model = StochasticPolicyModelFactory( + policy = StochasticPolicyFactory( input_block=ImageToTensorFactory(), backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) ).instantiate(action_space=vec_env.action_space) @@ -85,7 +85,7 @@ def test_a2c_breakout(): batch_size=256, number_of_steps=5 ), - model=model, + policy=policy, algo=A2CPolicyGradient( entropy_coefficient=0.01, value_coefficient=0.5, @@ -94,6 +94,7 @@ def test_a2c_breakout(): ), env_roller=StepEnvRoller( environment=vec_env, + policy=policy, device=CPU_DEVICE ) ) @@ -150,7 +151,7 @@ def test_ppo_breakout(): # Again, use a helper to create a model # But because model is owned by the reinforcer, model should not be accessed using this variable # but from reinforcer.model property - model = StochasticPolicyModelFactory( + policy = StochasticPolicyFactory( input_block=ImageToTensorFactory(), backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) ).instantiate(action_space=vec_env.action_space) @@ -163,7 +164,7 @@ def test_ppo_breakout(): batch_size=4, experience_replay=2, ), - model=model, + policy=policy, algo=PpoPolicyGradient( entropy_coefficient=0.01, value_coefficient=0.5, @@ -174,6 +175,7 @@ def test_ppo_breakout(): ), env_roller=StepEnvRoller( environment=vec_env, + policy=policy, device=device, ) ) @@ -214,464 +216,464 @@ def test_ppo_breakout(): training_info.on_train_end() -def test_dqn_breakout(): - """ - Simple 1 iteration of DQN breakout - """ - device = torch.device('cpu') - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - # Only single environment for DQN - vec_env = DummyVecEnvWrapper( - ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 - ).instantiate(parallel_envs=1, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - model_factory = QModelFactory( - input_block=ImageToTensorFactory(), - backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) - ) - - # Reinforcer - an object managing the learning process - reinforcer = BufferedOffPolicyIterationReinforcer( - device=device, - settings=BufferedOffPolicyIterationReinforcerSettings( - rollout_steps=4, - training_steps=1, - ), - environment=vec_env, - algo=DeepQLearning( - model_factory=model_factory, - double_dqn=False, - target_update_frequency=10_000, - discount_factor=0.99, - max_grad_norm=0.5 - ), - model=model_factory.instantiate(action_space=vec_env.action_space), - env_roller=TransitionReplayEnvRoller( - environment=vec_env, - device=device, - replay_buffer=CircularReplayBuffer( - buffer_capacity=100, - buffer_initial_size=100, - num_envs=vec_env.num_envs, - observation_space=vec_env.observation_space, - action_space=vec_env.action_space, - frame_stack_compensation=True, - frame_history=4 - ), - action_noise=EpsGreedy( - epsilon=LinearAndConstantSchedule( - initial_value=1.0, final_value=0.1, end_of_interpolation=0.1 - ), - environment=vec_env - ) - ) - ) - - # Model optimizer - optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=2.5e-4, alpha=0.95, momentum=0.95, eps=1e-3) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[ - FrameTracker(100_000) - ] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 100 batches per epoch to average metrics nicely - num_epochs = 1 - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=1, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info, interactive=False) - - training_info.on_train_end() - - -def test_prioritized_dqn_breakout(): - """ - Simple 1 iteration of DQN prioritized replay breakout - """ - device = torch.device('cpu') - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - # Only single environment for DQN - vec_env = DummyVecEnvWrapper( - ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 - ).instantiate(parallel_envs=1, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - model_factory = QModelFactory( - input_block=ImageToTensorFactory(), - backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) - ) - - # Reinforcer - an object managing the learning process - reinforcer = BufferedOffPolicyIterationReinforcer( - device=device, - settings=BufferedOffPolicyIterationReinforcerSettings( - rollout_steps=4, - training_steps=1, - ), - environment=vec_env, - algo=DeepQLearning( - model_factory=model_factory, - double_dqn=False, - target_update_frequency=10_000, - discount_factor=0.99, - max_grad_norm=0.5 - ), - model=model_factory.instantiate(action_space=vec_env.action_space), - env_roller=TransitionReplayEnvRoller( - environment=vec_env, - device=device, - replay_buffer=PrioritizedCircularReplayBuffer( - buffer_capacity=100, - buffer_initial_size=100, - num_envs=vec_env.num_envs, - observation_space=vec_env.observation_space, - action_space=vec_env.action_space, - priority_exponent=0.6, - priority_weight=LinearSchedule( - initial_value=0.4, - final_value=1.0 - ), - priority_epsilon=1.0e-6, - frame_stack_compensation=True, - frame_history=4 - ), - action_noise=EpsGreedy( - epsilon=LinearAndConstantSchedule( - initial_value=1.0, final_value=0.1, end_of_interpolation=0.1 - ), - environment=vec_env - ) - ) - ) - - # Model optimizer - optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=2.5e-4, alpha=0.95, momentum=0.95, eps=1e-3) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[ - FrameTracker(100_000) - ] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 100 batches per epoch to average metrics nicely - num_epochs = 1 - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=1, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info, interactive=False) - - training_info.on_train_end() - - -def test_ddpg_bipedal_walker(): - """ - 1 iteration of DDPG bipedal walker environment - """ - device = torch.device('cpu') - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - # Only single environment for DDPG - - vec_env = DummyVecEnvWrapper( - MujocoEnv('BipedalWalker-v2') - ).instantiate(parallel_envs=1, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - model_factory = DeterministicPolicyModelFactory( - input_block=NormalizeObservationsFactory(input_shape=24), - policy_backbone=MLPFactory(input_length=24, hidden_layers=[64, 64], normalization='layer'), - value_backbone=MLPFactory(input_length=28, hidden_layers=[64, 64], normalization='layer') - ) - - # Reinforcer - an object managing the learning process - reinforcer = BufferedOffPolicyIterationReinforcer( - device=device, - settings=BufferedOffPolicyIterationReinforcerSettings( - rollout_steps=4, - training_steps=1, - ), - environment=vec_env, - algo=DeepDeterministicPolicyGradient( - model_factory=model_factory, - tau=0.01, - discount_factor=0.99, - max_grad_norm=0.5 - ), - model=model_factory.instantiate(action_space=vec_env.action_space), - env_roller=TransitionReplayEnvRoller( - environment=vec_env, - device=device, - action_noise=OuNoise(std_dev=0.2, environment=vec_env), - replay_buffer=CircularReplayBuffer( - buffer_capacity=100, - buffer_initial_size=100, - num_envs=vec_env.num_envs, - observation_space=vec_env.observation_space, - action_space=vec_env.action_space - ), - normalize_returns=True, - discount_factor=0.99 - ), - ) - - # Model optimizer - optimizer = optim.Adam(reinforcer.model.parameters(), lr=2.5e-4, eps=1e-4) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[ - FrameTracker(100_000) - ] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 100 batches per epoch to average metrics nicely - num_epochs = 1 - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=1, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info, interactive=False) - - training_info.on_train_end() - - -def test_trpo_bipedal_walker(): - """ - 1 iteration of TRPO on bipedal walker - """ - device = torch.device('cpu') - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - vec_env = DummyVecEnvWrapper( - MujocoEnv('BipedalWalker-v2', normalize_returns=True), - ).instantiate(parallel_envs=8, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - model_factory = StochasticPolicyModelSeparateFactory( - input_block=NormalizeObservationsFactory(input_shape=24), - policy_backbone=MLPFactory(input_length=24, hidden_layers=[32, 32]), - value_backbone=MLPFactory(input_length=24, hidden_layers=[32]) - ) - - # Reinforcer - an object managing the learning process - reinforcer = OnPolicyIterationReinforcer( - device=device, - settings=OnPolicyIterationReinforcerSettings( - number_of_steps=12, - ), - model=model_factory.instantiate(action_space=vec_env.action_space), - algo=TrpoPolicyGradient( - max_kl=0.01, - cg_iters=10, - line_search_iters=10, - improvement_acceptance_ratio=0.1, - cg_damping=0.1, - vf_iters=5, - entropy_coef=0.0, - discount_factor=0.99, - max_grad_norm=0.5, - gae_lambda=1.0 - ), - env_roller=StepEnvRoller( - environment=vec_env, - device=device, - ) - ) - - # Model optimizer - optimizer = optim.Adam(reinforcer.model.parameters(), lr=1.0e-3, eps=1e-4) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[ - FrameTracker(100_000) - ] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 100 batches per epoch to average metrics nicely - num_epochs = 1 - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=1, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info, interactive=False) - - training_info.on_train_end() - - -def test_acer_breakout(): - """ - 1 iteration of ACER on breakout environment - """ - device = torch.device('cpu') - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers - # These are just helper functions for that - vec_env = SubprocVecEnvWrapper( - ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 - ).instantiate(parallel_envs=16, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - model_factory = QStochasticPolicyModelFactory( - input_block=ImageToTensorFactory(), - backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) - ) - - # Reinforcer - an object managing the learning process - reinforcer = BufferedMixedPolicyIterationReinforcer( - device=device, - settings=BufferedMixedPolicyIterationReinforcerSettings( - experience_replay=2, - number_of_steps=12, - stochastic_experience_replay=False - ), - model=model_factory.instantiate(action_space=vec_env.action_space), - env=vec_env, - algo=AcerPolicyGradient( - model_factory=model_factory, - entropy_coefficient=0.01, - q_coefficient=0.5, - rho_cap=10.0, - retrace_rho_cap=1.0, - trust_region=True, - trust_region_delta=1.0, - discount_factor=0.99, - max_grad_norm=10.0, - ), - env_roller=TrajectoryReplayEnvRoller( - environment=vec_env, - device=device, - replay_buffer=CircularReplayBuffer( - buffer_capacity=100, - buffer_initial_size=100, - num_envs=vec_env.num_envs, - action_space=vec_env.action_space, - observation_space=vec_env.observation_space, - frame_stack_compensation=True, - frame_history=4, - ) - ), - ) - - # Model optimizer - optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3, alpha=0.99) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 100 batches per epoch to average metrics nicely - num_epochs = 1 - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=1, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info, interactive=False) - - training_info.on_train_end() +# def test_dqn_breakout(): +# """ +# Simple 1 iteration of DQN breakout +# """ +# device = torch.device('cpu') +# seed = 1001 +# +# # Set random seed in python std lib, numpy and pytorch +# set_seed(seed) +# +# # Only single environment for DQN +# vec_env = DummyVecEnvWrapper( +# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 +# ).instantiate(parallel_envs=1, seed=seed) +# +# # Again, use a helper to create a model +# # But because model is owned by the reinforcer, model should not be accessed using this variable +# # but from reinforcer.model property +# model_factory = QModelFactory( +# input_block=ImageToTensorFactory(), +# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) +# ) +# +# # Reinforcer - an object managing the learning process +# reinforcer = BufferedOffPolicyIterationReinforcer( +# device=device, +# settings=BufferedOffPolicyIterationReinforcerSettings( +# rollout_steps=4, +# training_steps=1, +# ), +# environment=vec_env, +# algo=DeepQLearning( +# model_factory=model_factory, +# double_dqn=False, +# target_update_frequency=10_000, +# discount_factor=0.99, +# max_grad_norm=0.5 +# ), +# model=model_factory.instantiate(action_space=vec_env.action_space), +# env_roller=TransitionReplayEnvRoller( +# environment=vec_env, +# device=device, +# replay_buffer=CircularReplayBuffer( +# buffer_capacity=100, +# buffer_initial_size=100, +# num_envs=vec_env.num_envs, +# observation_space=vec_env.observation_space, +# action_space=vec_env.action_space, +# frame_stack_compensation=True, +# frame_history=4 +# ), +# action_noise=EpsGreedy( +# epsilon=LinearAndConstantSchedule( +# initial_value=1.0, final_value=0.1, end_of_interpolation=0.1 +# ), +# environment=vec_env +# ) +# ) +# ) +# +# # Model optimizer +# optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=2.5e-4, alpha=0.95, momentum=0.95, eps=1e-3) +# +# # Overall information store for training information +# training_info = TrainingInfo( +# metrics=[ +# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode +# ], +# callbacks=[ +# FrameTracker(100_000) +# ] # Print live metrics every epoch to standard output +# ) +# +# # A bit of training initialization bookkeeping... +# training_info.initialize() +# reinforcer.initialize_training(training_info) +# training_info.on_train_begin() +# +# # Let's make 100 batches per epoch to average metrics nicely +# num_epochs = 1 +# +# # Normal handrolled training loop +# for i in range(1, num_epochs+1): +# epoch_info = EpochInfo( +# training_info=training_info, +# global_epoch_idx=i, +# batches_per_epoch=1, +# optimizer=optimizer +# ) +# +# reinforcer.train_epoch(epoch_info, interactive=False) +# +# training_info.on_train_end() +# +# +# def test_prioritized_dqn_breakout(): +# """ +# Simple 1 iteration of DQN prioritized replay breakout +# """ +# device = torch.device('cpu') +# seed = 1001 +# +# # Set random seed in python std lib, numpy and pytorch +# set_seed(seed) +# +# # Only single environment for DQN +# vec_env = DummyVecEnvWrapper( +# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 +# ).instantiate(parallel_envs=1, seed=seed) +# +# # Again, use a helper to create a model +# # But because model is owned by the reinforcer, model should not be accessed using this variable +# # but from reinforcer.model property +# model_factory = QModelFactory( +# input_block=ImageToTensorFactory(), +# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) +# ) +# +# # Reinforcer - an object managing the learning process +# reinforcer = BufferedOffPolicyIterationReinforcer( +# device=device, +# settings=BufferedOffPolicyIterationReinforcerSettings( +# rollout_steps=4, +# training_steps=1, +# ), +# environment=vec_env, +# algo=DeepQLearning( +# model_factory=model_factory, +# double_dqn=False, +# target_update_frequency=10_000, +# discount_factor=0.99, +# max_grad_norm=0.5 +# ), +# model=model_factory.instantiate(action_space=vec_env.action_space), +# env_roller=TransitionReplayEnvRoller( +# environment=vec_env, +# device=device, +# replay_buffer=PrioritizedCircularReplayBuffer( +# buffer_capacity=100, +# buffer_initial_size=100, +# num_envs=vec_env.num_envs, +# observation_space=vec_env.observation_space, +# action_space=vec_env.action_space, +# priority_exponent=0.6, +# priority_weight=LinearSchedule( +# initial_value=0.4, +# final_value=1.0 +# ), +# priority_epsilon=1.0e-6, +# frame_stack_compensation=True, +# frame_history=4 +# ), +# action_noise=EpsGreedy( +# epsilon=LinearAndConstantSchedule( +# initial_value=1.0, final_value=0.1, end_of_interpolation=0.1 +# ), +# environment=vec_env +# ) +# ) +# ) +# +# # Model optimizer +# optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=2.5e-4, alpha=0.95, momentum=0.95, eps=1e-3) +# +# # Overall information store for training information +# training_info = TrainingInfo( +# metrics=[ +# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode +# ], +# callbacks=[ +# FrameTracker(100_000) +# ] # Print live metrics every epoch to standard output +# ) +# +# # A bit of training initialization bookkeeping... +# training_info.initialize() +# reinforcer.initialize_training(training_info) +# training_info.on_train_begin() +# +# # Let's make 100 batches per epoch to average metrics nicely +# num_epochs = 1 +# +# # Normal handrolled training loop +# for i in range(1, num_epochs+1): +# epoch_info = EpochInfo( +# training_info=training_info, +# global_epoch_idx=i, +# batches_per_epoch=1, +# optimizer=optimizer +# ) +# +# reinforcer.train_epoch(epoch_info, interactive=False) +# +# training_info.on_train_end() +# +# +# def test_ddpg_bipedal_walker(): +# """ +# 1 iteration of DDPG bipedal walker environment +# """ +# device = torch.device('cpu') +# seed = 1001 +# +# # Set random seed in python std lib, numpy and pytorch +# set_seed(seed) +# +# # Only single environment for DDPG +# +# vec_env = DummyVecEnvWrapper( +# MujocoEnv('BipedalWalker-v2') +# ).instantiate(parallel_envs=1, seed=seed) +# +# # Again, use a helper to create a model +# # But because model is owned by the reinforcer, model should not be accessed using this variable +# # but from reinforcer.model property +# model_factory = DeterministicPolicyModelFactory( +# input_block=NormalizeObservationsFactory(input_shape=24), +# policy_backbone=MLPFactory(input_length=24, hidden_layers=[64, 64], normalization='layer'), +# value_backbone=MLPFactory(input_length=28, hidden_layers=[64, 64], normalization='layer') +# ) +# +# # Reinforcer - an object managing the learning process +# reinforcer = BufferedOffPolicyIterationReinforcer( +# device=device, +# settings=BufferedOffPolicyIterationReinforcerSettings( +# rollout_steps=4, +# training_steps=1, +# ), +# environment=vec_env, +# algo=DeepDeterministicPolicyGradient( +# model_factory=model_factory, +# tau=0.01, +# discount_factor=0.99, +# max_grad_norm=0.5 +# ), +# model=model_factory.instantiate(action_space=vec_env.action_space), +# env_roller=TransitionReplayEnvRoller( +# environment=vec_env, +# device=device, +# action_noise=OuNoise(std_dev=0.2, environment=vec_env), +# replay_buffer=CircularReplayBuffer( +# buffer_capacity=100, +# buffer_initial_size=100, +# num_envs=vec_env.num_envs, +# observation_space=vec_env.observation_space, +# action_space=vec_env.action_space +# ), +# normalize_returns=True, +# discount_factor=0.99 +# ), +# ) +# +# # Model optimizer +# optimizer = optim.Adam(reinforcer.model.parameters(), lr=2.5e-4, eps=1e-4) +# +# # Overall information store for training information +# training_info = TrainingInfo( +# metrics=[ +# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode +# ], +# callbacks=[ +# FrameTracker(100_000) +# ] # Print live metrics every epoch to standard output +# ) +# +# # A bit of training initialization bookkeeping... +# training_info.initialize() +# reinforcer.initialize_training(training_info) +# training_info.on_train_begin() +# +# # Let's make 100 batches per epoch to average metrics nicely +# num_epochs = 1 +# +# # Normal handrolled training loop +# for i in range(1, num_epochs+1): +# epoch_info = EpochInfo( +# training_info=training_info, +# global_epoch_idx=i, +# batches_per_epoch=1, +# optimizer=optimizer +# ) +# +# reinforcer.train_epoch(epoch_info, interactive=False) +# +# training_info.on_train_end() +# +# +# def test_trpo_bipedal_walker(): +# """ +# 1 iteration of TRPO on bipedal walker +# """ +# device = torch.device('cpu') +# seed = 1001 +# +# # Set random seed in python std lib, numpy and pytorch +# set_seed(seed) +# +# vec_env = DummyVecEnvWrapper( +# MujocoEnv('BipedalWalker-v2', normalize_returns=True), +# ).instantiate(parallel_envs=8, seed=seed) +# +# # Again, use a helper to create a model +# # But because model is owned by the reinforcer, model should not be accessed using this variable +# # but from reinforcer.model property +# model_factory = StochasticPolicyModelSeparateFactory( +# input_block=NormalizeObservationsFactory(input_shape=24), +# policy_backbone=MLPFactory(input_length=24, hidden_layers=[32, 32]), +# value_backbone=MLPFactory(input_length=24, hidden_layers=[32]) +# ) +# +# # Reinforcer - an object managing the learning process +# reinforcer = OnPolicyIterationReinforcer( +# device=device, +# settings=OnPolicyIterationReinforcerSettings( +# number_of_steps=12, +# ), +# model=model_factory.instantiate(action_space=vec_env.action_space), +# algo=TrpoPolicyGradient( +# max_kl=0.01, +# cg_iters=10, +# line_search_iters=10, +# improvement_acceptance_ratio=0.1, +# cg_damping=0.1, +# vf_iters=5, +# entropy_coef=0.0, +# discount_factor=0.99, +# max_grad_norm=0.5, +# gae_lambda=1.0 +# ), +# env_roller=StepEnvRoller( +# environment=vec_env, +# device=device, +# ) +# ) +# +# # Model optimizer +# optimizer = optim.Adam(reinforcer.model.parameters(), lr=1.0e-3, eps=1e-4) +# +# # Overall information store for training information +# training_info = TrainingInfo( +# metrics=[ +# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode +# ], +# callbacks=[ +# FrameTracker(100_000) +# ] # Print live metrics every epoch to standard output +# ) +# +# # A bit of training initialization bookkeeping... +# training_info.initialize() +# reinforcer.initialize_training(training_info) +# training_info.on_train_begin() +# +# # Let's make 100 batches per epoch to average metrics nicely +# num_epochs = 1 +# +# # Normal handrolled training loop +# for i in range(1, num_epochs+1): +# epoch_info = EpochInfo( +# training_info=training_info, +# global_epoch_idx=i, +# batches_per_epoch=1, +# optimizer=optimizer +# ) +# +# reinforcer.train_epoch(epoch_info, interactive=False) +# +# training_info.on_train_end() +# +# +# def test_acer_breakout(): +# """ +# 1 iteration of ACER on breakout environment +# """ +# device = torch.device('cpu') +# seed = 1001 +# +# # Set random seed in python std lib, numpy and pytorch +# set_seed(seed) +# +# # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers +# # These are just helper functions for that +# vec_env = SubprocVecEnvWrapper( +# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 +# ).instantiate(parallel_envs=16, seed=seed) +# +# # Again, use a helper to create a model +# # But because model is owned by the reinforcer, model should not be accessed using this variable +# # but from reinforcer.model property +# model_factory = QStochasticPolicyModelFactory( +# input_block=ImageToTensorFactory(), +# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) +# ) +# +# # Reinforcer - an object managing the learning process +# reinforcer = BufferedMixedPolicyIterationReinforcer( +# device=device, +# settings=BufferedMixedPolicyIterationReinforcerSettings( +# experience_replay=2, +# number_of_steps=12, +# stochastic_experience_replay=False +# ), +# model=model_factory.instantiate(action_space=vec_env.action_space), +# env=vec_env, +# algo=AcerPolicyGradient( +# model_factory=model_factory, +# entropy_coefficient=0.01, +# q_coefficient=0.5, +# rho_cap=10.0, +# retrace_rho_cap=1.0, +# trust_region=True, +# trust_region_delta=1.0, +# discount_factor=0.99, +# max_grad_norm=10.0, +# ), +# env_roller=TrajectoryReplayEnvRoller( +# environment=vec_env, +# device=device, +# replay_buffer=CircularReplayBuffer( +# buffer_capacity=100, +# buffer_initial_size=100, +# num_envs=vec_env.num_envs, +# action_space=vec_env.action_space, +# observation_space=vec_env.observation_space, +# frame_stack_compensation=True, +# frame_history=4, +# ) +# ), +# ) +# +# # Model optimizer +# optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3, alpha=0.99) +# +# # Overall information store for training information +# training_info = TrainingInfo( +# metrics=[ +# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode +# ], +# callbacks=[] # Print live metrics every epoch to standard output +# ) +# +# # A bit of training initialization bookkeeping... +# training_info.initialize() +# reinforcer.initialize_training(training_info) +# training_info.on_train_begin() +# +# # Let's make 100 batches per epoch to average metrics nicely +# num_epochs = 1 +# +# # Normal handrolled training loop +# for i in range(1, num_epochs+1): +# epoch_info = EpochInfo( +# training_info=training_info, +# global_epoch_idx=i, +# batches_per_epoch=1, +# optimizer=optimizer +# ) +# +# reinforcer.train_epoch(epoch_info, interactive=False) +# +# training_info.on_train_end() diff --git a/vel/rl/util/actor.py b/vel/rl/util/actor.py index 43fc4b80..a858c4a7 100644 --- a/vel/rl/util/actor.py +++ b/vel/rl/util/actor.py @@ -9,7 +9,7 @@ class PolicyActor: def __init__(self, num_envs: int, policy: Policy, device: torch.device): self.num_envs = num_envs - self.policy = policy + self.policy = policy.to(device) self.device = device self.state = to_device(self.policy.zero_state(num_envs), self.device) From fe2443e9e9466a80c67d3bb80686ee9009627945 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 20 Jun 2019 19:31:05 -0700 Subject: [PATCH 049/162] Implemented some useful new backbones. --- vel/rl/backbone/lstm.py | 20 ------------ vel/rl/backbone/mlp_rnn.py | 62 ++++++++++++++++++++++++++++++++++++++ vel/rl/backbone/rnn.py | 50 ++++++++++++++++++++++++++++++ 3 files changed, 112 insertions(+), 20 deletions(-) delete mode 100644 vel/rl/backbone/lstm.py create mode 100644 vel/rl/backbone/mlp_rnn.py create mode 100644 vel/rl/backbone/rnn.py diff --git a/vel/rl/backbone/lstm.py b/vel/rl/backbone/lstm.py deleted file mode 100644 index 50356d07..00000000 --- a/vel/rl/backbone/lstm.py +++ /dev/null @@ -1,20 +0,0 @@ -from vel.api import LinearBackboneModel - - -class LstmBackbone(LinearBackboneModel): - """ - Simple 'LSTM' model backbone - """ - - def __init__(self, input_size, hidden_units): - super().__init__() - - self.input_size = input_size - self.hidden_units = hidden_units - - def forward(self, input_data, masks, state): - raise NotImplementedError - - def initial_state(self): - """ Initial state of the network """ - raise NotImplementedError diff --git a/vel/rl/backbone/mlp_rnn.py b/vel/rl/backbone/mlp_rnn.py new file mode 100644 index 00000000..d5229d7f --- /dev/null +++ b/vel/rl/backbone/mlp_rnn.py @@ -0,0 +1,62 @@ +import typing + +from vel.api import LinearBackboneModel, ModelFactory +from vel.rl.backbone.mlp import MLP +from vel.rl.backbone.rnn import RNN + + +class MlpRnn(LinearBackboneModel): + """ MLP followed by an RNN - another simple policy backbone """ + + def __init__(self, input_length: int, mlp_layers: typing.List[int], rnn_units: int, rnn_type: str = 'lstm', + mlp_activation: str = 'tanh', mlp_normalization: typing.Optional[str] = None): + super().__init__() + + self.mlp = MLP( + input_length=input_length, hidden_layers=mlp_layers, activation=mlp_activation, + normalization=mlp_normalization + ) + + self.rnn = RNN(input_length=self.mlp.output_dim, hidden_units=rnn_units, rnn_type=rnn_type) + + @property + def output_dim(self) -> int: + return self.rnn.output_dim + + @property + def state_dim(self) -> int: + """ Initial state of the network """ + return self.rnn.state_dim + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return True + + def zero_state(self, batch_size): + """ Potential state for the model """ + return self.rnn.zero_state(batch_size) + + def forward(self, input_data, state): + mlp_output = self.mlp(input_data) + hidden_state, new_state = self.rnn(mlp_output, state) + return hidden_state, new_state + + +def create(input_length: int, mlp_layers: typing.List[int], rnn_units: int, rnn_type: str = 'lstm', + mlp_activation: str = 'tanh', mlp_normalization: typing.Optional[str] = None): + """ Vel factory function """ + def instantiate(**_): + return MlpRnn( + input_length=input_length, + mlp_layers=mlp_layers, + rnn_units=rnn_units, + rnn_type=rnn_type, + mlp_activation=mlp_activation, + mlp_normalization=mlp_normalization + ) + + return ModelFactory.generic(instantiate) + + +MlpRnnFactory = create diff --git a/vel/rl/backbone/rnn.py b/vel/rl/backbone/rnn.py new file mode 100644 index 00000000..973345d4 --- /dev/null +++ b/vel/rl/backbone/rnn.py @@ -0,0 +1,50 @@ +from vel.api import LinearBackboneModel, ModelFactory +from vel.module.rnn_cell import RnnCell + + +class RNN(LinearBackboneModel): + """ Simple recurrent model backbone """ + + def __init__(self, input_length: int, hidden_units: int, rnn_type: str = 'lstm'): + super().__init__() + + self.input_length = input_length + self.hidden_units = hidden_units + + self.rnn_cell = RnnCell(input_size=input_length, hidden_size=self.hidden_units, rnn_type=rnn_type) + + @property + def output_dim(self) -> int: + return self.rnn_cell.output_dim + + @property + def state_dim(self) -> int: + """ Initial state of the network """ + return self.rnn_cell.state_dim + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return True + + def zero_state(self, batch_size): + """ Potential state for the model """ + return self.rnn_cell.zero_state(batch_size) + + def forward(self, input_data, state): + hidden_state, new_state = self.rnn_cell(input_data, state) + return hidden_state, new_state + + +def create(input_length: int, hidden_units: int, rnn_type: str = 'lstm'): + """ Vel factory function """ + def instantiate(**_): + return RNN( + input_length=input_length, + hidden_units=hidden_units, + rnn_type=rnn_type + ) + return ModelFactory.generic(instantiate) + + +RNNFactory = create From ecd5c8535b35beef1278e6c11410fd51c515eed9 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 20 Jun 2019 19:42:40 -0700 Subject: [PATCH 050/162] Added potential output directory override. --- vel/api/model_config.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/vel/api/model_config.py b/vel/api/model_config.py index 23488916..4426dd81 100644 --- a/vel/api/model_config.py +++ b/vel/api/model_config.py @@ -112,6 +112,12 @@ def __init__(self, filename: str, configuration: dict, run_number: int, project_ del self.contents['commands'] self.provider = Provider(self._prepare_environment(), {'model_config': self}, parameters=parameters) + + if self.provider.has_name('output_directory'): + self.output_directory_name = self.provider.get("output_directory") + else: + self.output_directory_name = 'output' + self._model_name = self.provider.get("name") def _prepare_environment(self) -> dict: @@ -153,7 +159,7 @@ def project_data_dir(self, *args) -> str: def output_dir(self, *args) -> str: """ Directory where to store output """ - return os.path.join(self.project_dir, 'output', *args) + return os.path.join(self.project_dir, self.output_directory_name, *args) def project_top_dir(self, *args) -> str: """ Project top-level directory """ From 6de2f6c387828cc0e8ac998c695097f74ef351cc Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 22 Jun 2019 11:27:30 -0700 Subject: [PATCH 051/162] Updated requirements. --- setup.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8a8fa5b9..0fd152f6 100644 --- a/setup.py +++ b/setup.py @@ -41,12 +41,16 @@ ], extras_require={ 'visdom': ['visdom'], + 'tensorboard': ['tb-nightly'], 'mongo': ['pymongo', 'dnspython'], 'gym': ['gym[atari,box2d,classic_control]'], 'mujoco': ['gym[mujoco,robotics]'], 'dev': ['pytest', 'ipython', 'jupyter', 'pip-tools', 'flake8', 'pytest-xdist'], 'text': ['spacy'], - 'all': ['visdom', 'pymongo', 'dnspython', 'gym[all]', 'pytest', 'spacy', 'ipython', 'jupyter'] + 'all': [ + 'visdom', 'pymongo', 'dnspython', 'gym[all]', 'pytest', 'spacy', 'ipython', 'jupyter', + 'pip-tools', 'flake8', 'pytest-xdist', 'tb-nightly' + ] }, tests_require=[ 'pytest' From 6d6679f7b350a30f13eca772956eda2607ecb02d Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 22 Jun 2019 11:27:40 -0700 Subject: [PATCH 052/162] New version of some dependencies. --- requirements.txt | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index e09fcbe8..a0cce232 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,17 +4,17 @@ # # pip-compile # -atari-py==0.1.15 # via gym +atari-py==0.2.0 # via gym atomicwrites==1.3.0 # via pytest attrs==19.1.0 box2d-py==2.3.8 # via gym -certifi==2019.3.9 # via requests +certifi==2019.6.16 # via requests chardet==3.0.4 # via requests cloudpickle==1.2.1 cycler==0.10.0 # via matplotlib dnspython==1.16.0 future==0.17.1 # via pyglet -gym[atari,box2d,classic_control]==0.12.5 +gym[atari,box2d,classic_control]==0.13.0 idna==2.8 # via requests importlib-metadata==0.18 # via pluggy, pytest joblib==0.13.2 # via scikit-learn @@ -45,9 +45,12 @@ torchfile==0.1.0 # via visdom torchtext==0.3.1 torchvision==0.3.0 tornado==6.0.2 # via visdom -tqdm==4.32.1 +tqdm==4.32.2 urllib3==1.25.3 # via requests visdom==0.1.8.8 wcwidth==0.1.7 # via pytest websocket-client==0.56.0 # via visdom zipp==0.5.1 # via importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +# setuptools==41.0.1 # via kiwisolver From 5c048541b0dfcc5e5e54b738da7317894b29550d Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 23 Jun 2019 17:52:35 -0700 Subject: [PATCH 053/162] Large refactoring - work in progress. --- ...cnn_autoencoder.yaml => mnist_cnn_ae.yaml} | 0 .../classification/mnist/mnist_cnn_01.yaml | 23 +- vel/api/__init__.py | 8 +- vel/api/augmentation.py | 19 -- vel/api/dataflow.py | 42 --- vel/api/model.py | 18 +- vel/api/source.py | 280 +++++++++--------- vel/api/train_phase.py | 5 +- vel/api/transformation.py | 43 +++ vel/augmentation/to_tensor.py | 26 -- vel/{augmentation => calc}/__init__.py | 0 vel/{math => calc}/function.py | 0 vel/{math => calc}/process.py | 0 vel/command/augvis_command.py | 20 +- vel/command/train_command.py | 16 +- vel/data/__init__.py | 3 +- .../tta => data/augmentation}/__init__.py | 0 vel/{ => data}/augmentation/center_crop.py | 0 vel/{ => data}/augmentation/normalize.py | 2 +- vel/{ => data}/augmentation/random_crop.py | 0 .../augmentation/random_horizontal_flip.py | 2 +- .../augmentation/random_lighting.py | 2 +- vel/{ => data}/augmentation/random_rotate.py | 0 vel/{ => data}/augmentation/random_scale.py | 0 vel/{ => data}/augmentation/scale_min_size.py | 0 .../augmentation/tta}/__init__.py | 0 vel/{ => data}/augmentation/tta/train_tta.py | 0 vel/{ => data}/augmentation/unsupervised.py | 0 vel/data/dataflow.py | 55 ++++ vel/data/loader.py | 71 +++++ vel/{phase => data/operation}/__init__.py | 0 vel/data/{ => operation}/image_op.py | 0 vel/{schedule => data/source}/__init__.py | 0 vel/{ => data}/source/img_dir_source.py | 0 vel/{source => data/source/nlp}/__init__.py | 0 vel/{ => data}/source/nlp/imdb.py | 0 vel/{ => data}/source/nlp/multi30k.py | 0 vel/{ => data}/source/nlp/text_url.py | 0 vel/{ => data}/source/nlp/wmt14.py | 0 .../nlp => data/source/vision}/__init__.py | 0 vel/{ => data}/source/vision/cifar10.py | 0 vel/data/source/vision/mnist.py | 51 ++++ .../transformation}/__init__.py | 0 vel/data/transformation/image_to_tensor.py | 31 ++ .../transformation}/to_array.py | 10 +- vel/function/__init__.py | 0 vel/{schedule => function}/constant.py | 0 vel/{schedule => function}/linear.py | 0 .../linear_and_constant.py | 0 vel/model/autoencoder/mnist_cnn_vae.py | 4 +- vel/source/vision/mnist.py | 39 --- vel/train/__init__.py | 1 + vel/train/phase/__init__.py | 0 vel/{ => train}/phase/cycle.py | 0 vel/{ => train}/phase/freeze.py | 0 vel/{ => train}/phase/generic.py | 0 vel/{ => train}/phase/unfreeze.py | 0 vel/{api/learner.py => train/trainer.py} | 49 +-- 58 files changed, 483 insertions(+), 337 deletions(-) rename examples-configs/autoencoder/mnist/{mnist_cnn_autoencoder.yaml => mnist_cnn_ae.yaml} (100%) delete mode 100644 vel/api/augmentation.py delete mode 100644 vel/api/dataflow.py create mode 100644 vel/api/transformation.py delete mode 100644 vel/augmentation/to_tensor.py rename vel/{augmentation => calc}/__init__.py (100%) rename vel/{math => calc}/function.py (100%) rename vel/{math => calc}/process.py (100%) rename vel/{augmentation/tta => data/augmentation}/__init__.py (100%) rename vel/{ => data}/augmentation/center_crop.py (100%) rename vel/{ => data}/augmentation/normalize.py (94%) rename vel/{ => data}/augmentation/random_crop.py (100%) rename vel/{ => data}/augmentation/random_horizontal_flip.py (93%) rename vel/{ => data}/augmentation/random_lighting.py (94%) rename vel/{ => data}/augmentation/random_rotate.py (100%) rename vel/{ => data}/augmentation/random_scale.py (100%) rename vel/{ => data}/augmentation/scale_min_size.py (100%) rename vel/{math => data/augmentation/tta}/__init__.py (100%) rename vel/{ => data}/augmentation/tta/train_tta.py (100%) rename vel/{ => data}/augmentation/unsupervised.py (100%) create mode 100644 vel/data/dataflow.py create mode 100644 vel/data/loader.py rename vel/{phase => data/operation}/__init__.py (100%) rename vel/data/{ => operation}/image_op.py (100%) rename vel/{schedule => data/source}/__init__.py (100%) rename vel/{ => data}/source/img_dir_source.py (100%) rename vel/{source => data/source/nlp}/__init__.py (100%) rename vel/{ => data}/source/nlp/imdb.py (100%) rename vel/{ => data}/source/nlp/multi30k.py (100%) rename vel/{ => data}/source/nlp/text_url.py (100%) rename vel/{ => data}/source/nlp/wmt14.py (100%) rename vel/{source/nlp => data/source/vision}/__init__.py (100%) rename vel/{ => data}/source/vision/cifar10.py (100%) create mode 100644 vel/data/source/vision/mnist.py rename vel/{source/vision => data/transformation}/__init__.py (100%) create mode 100644 vel/data/transformation/image_to_tensor.py rename vel/{augmentation => data/transformation}/to_array.py (56%) create mode 100644 vel/function/__init__.py rename vel/{schedule => function}/constant.py (100%) rename vel/{schedule => function}/linear.py (100%) rename vel/{schedule => function}/linear_and_constant.py (100%) delete mode 100644 vel/source/vision/mnist.py create mode 100644 vel/train/__init__.py create mode 100644 vel/train/phase/__init__.py rename vel/{ => train}/phase/cycle.py (100%) rename vel/{ => train}/phase/freeze.py (100%) rename vel/{ => train}/phase/generic.py (100%) rename vel/{ => train}/phase/unfreeze.py (100%) rename vel/{api/learner.py => train/trainer.py} (65%) diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_autoencoder.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml similarity index 100% rename from examples-configs/autoencoder/mnist/mnist_cnn_autoencoder.yaml rename to examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml diff --git a/examples-configs/classification/mnist/mnist_cnn_01.yaml b/examples-configs/classification/mnist/mnist_cnn_01.yaml index 8d3ed19b..b0f75729 100644 --- a/examples-configs/classification/mnist/mnist_cnn_01.yaml +++ b/examples-configs/classification/mnist/mnist_cnn_01.yaml @@ -2,7 +2,7 @@ name: 'mnist_cnn_01' model: - name: vel.models.vision.mnist_cnn_01 + name: vel.model.vision.mnist_cnn_01 img_rows: 28 img_cols: 28 img_channels: 1 @@ -10,26 +10,31 @@ model: source: - name: vel.sources.vision.mnist + name: vel.data.source.vision.mnist + + +loader: + name: vel.data.loader batch_size: 128 - normalize: False num_workers: 4 - + transformations: + - name: vel.data.transformation.image_to_tensor commands: train: - name: vel.commands.train_command + name: vel.command.train_command epochs: 12 log_frequency: 100 optimizer: - name: vel.optimizers.adadelta + name: vel.optimizer.adadelta checkpoint: metric: 'val:loss' - - visdom: - name: vel.commands.vis_store_command + augvis: + name: vel.command.augvis_command + samples: 3 + cases: 3 diff --git a/vel/api/__init__.py b/vel/api/__init__.py index 06b8490c..1e5c9b65 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -1,16 +1,14 @@ -from .augmentation import Augmentation -from .dataflow import DataFlow +from .transformation import Transformation from .callback import Callback from .info import BatchInfo, EpochInfo, TrainingInfo -from .learner import Learner from .model import ( - Model, SupervisedModel, LossFunctionModel, BackboneModel, LinearBackboneModel + Model, GradientModel, LossFunctionModel, BackboneModel, LinearBackboneModel ) from .model_factory import ModelFactory from .optimizer import OptimizerFactory from .schedule import Schedule from .scheduler import SchedulerFactory -from .source import Source, SupervisedTrainingData, SupervisedTextData +from .source import Source from .storage import Storage from .train_phase import TrainPhase, EmptyTrainPhase from .model_config import ModelConfig diff --git a/vel/api/augmentation.py b/vel/api/augmentation.py deleted file mode 100644 index b4b103de..00000000 --- a/vel/api/augmentation.py +++ /dev/null @@ -1,19 +0,0 @@ - - -class Augmentation: - """ Base class for all data augmentations """ - def __init__(self, mode='x', tags=None): - self.mode = mode - self.tags = tags or ['train', 'val', 'test'] - - def __call__(self, *args): - """ Do the transformation """ - print(self) - raise NotImplementedError - - def denormalize(self, *args): - """ Operation reverse to normalization """ - if len(args) == 1: - return args[0] - else: - return args diff --git a/vel/api/dataflow.py b/vel/api/dataflow.py deleted file mode 100644 index 6b880b4f..00000000 --- a/vel/api/dataflow.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch.utils.data as data - - -class DataFlow(data.Dataset): - """ A dataset wrapping underlying data source with transformations """ - def __init__(self, dataset, transformations, tag): - self.dataset = dataset - - if transformations is None: - self.transformations = [] - else: - self.transformations = [t for t in transformations if tag in t.tags] - - self.tag = tag - - def get_raw(self, index): - return self.dataset[index] - - def __getitem__(self, index): - raw_x, raw_y = self.dataset[index] - - for t in self.transformations: - if t.mode == 'x': - raw_x = t(raw_x) - elif t.mode == 'y': - raw_y = t(raw_y) - elif t.mode == 'both': - raw_x, raw_y = t(raw_x, raw_y) - else: - raise RuntimeError(f"Mode {t.mode} not recognized") - - return raw_x, raw_y - - def denormalize(self, datum, mode='x'): - for t in self.transformations[::-1]: - if t.mode == mode: - datum = t.denormalize(datum) - - return datum - - def __len__(self): - return len(self.dataset) diff --git a/vel/api/model.py b/vel/api/model.py index be09907b..53406a3e 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -63,10 +63,10 @@ def zero_state(self, batch_size): return None -class SupervisedModel(Model): +class GradientModel(Model): """ Model for a supervised learning problem """ - def calculate_gradient(self, x_data, y_true) -> dict: + def calculate_gradient(self, data: dict) -> dict: """ Calculate gradient for given batch of supervised learning. Returns a dictionary of metrics @@ -74,25 +74,25 @@ def calculate_gradient(self, x_data, y_true) -> dict: raise NotImplementedError -class LossFunctionModel(SupervisedModel): +class LossFunctionModel(GradientModel): """ Model for a supervised learning with a simple loss function """ def metrics(self) -> list: """ Set of metrics for this model """ return [Loss()] - def calculate_gradient(self, x_data, y_true) -> dict: - y_pred = self(x_data) - loss_value = self.loss_value(x_data, y_true, y_pred) + def calculate_gradient(self, data: dict) -> dict: + y_hat = self(data['x']) + loss_value = self.loss_value(data['x'], data['y'], y_hat) if self.training: loss_value.backward() return { 'loss': loss_value.item(), - 'data': x_data, - 'target': y_true, - 'output': y_pred + 'data': data['x'], + 'target': data['y'], + 'output': y_hat } def loss_value(self, x_data, y_true, y_pred) -> torch.tensor: diff --git a/vel/api/source.py b/vel/api/source.py index be1d864e..c6209c3a 100644 --- a/vel/api/source.py +++ b/vel/api/source.py @@ -1,139 +1,149 @@ -import torch.utils.data as data +import typing -from .dataflow import DataFlow +import torch.utils.data as data class Source: - """ Source of data for supervised learning algorithms """ - def __init__(self): - pass - - @property - def train_loader(self): - """ PyTorch loader of training data """ - raise NotImplementedError - - @property - def val_loader(self): - """ PyTorch loader of validation data """ - raise NotImplementedError - - @property - def train_dataset(self): - """ Return the training dataset """ - raise NotImplementedError - - @property - def val_dataset(self): - """ Return the validation dataset """ - raise NotImplementedError - - @property - def train_iterations_per_epoch(self): - """ Return number of iterations per epoch """ - raise NotImplementedError - - @property - def val_iterations_per_epoch(self): - """ Return number of iterations per epoch - validation """ - raise NotImplementedError - - -class SupervisedTextData(Source): - """ An NLP torchtext data source """ - def __init__(self, train_source, val_source, train_iterator, val_iterator, data_field, target_field): - super().__init__() - - self.train_source = train_source - self.val_source = val_source - self.train_iterator = train_iterator - self.val_iterator = val_iterator - self.data_field = data_field - self.target_field = target_field - - @property - def train_loader(self): - """ PyTorch loader of training data """ - return self.train_iterator - - @property - def val_loader(self): - """ PyTorch loader of validation data """ - return self.val_iterator - - @property - def train_dataset(self): - """ Return the training dataset """ - return self.train_source - - @property - def val_dataset(self): - """ Return the validation dataset """ - return self.val_source - - @property - def train_iterations_per_epoch(self): - """ Return number of iterations per epoch """ - return len(self.train_iterator) - - @property - def val_iterations_per_epoch(self): - """ Return number of iterations per epoch - validation """ - return len(self.val_iterator) - - -class SupervisedTrainingData(Source): - """ Most common source of data combining a basic datasource and sampler """ - def __init__(self, train_source, val_source, num_workers, batch_size, augmentations=None): - - super().__init__() - - self.train_source = train_source - self.val_source = val_source - - self.num_workers = num_workers - self.batch_size = batch_size - - self.augmentations = augmentations - - # Derived values - self.train_ds = DataFlow(self.train_source, augmentations, tag='train') - self.val_ds = DataFlow(self.val_source, augmentations, tag='val') - - self._train_loader = data.DataLoader( - self.train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers - ) - - self._val_loader = data.DataLoader( - self.val_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers - ) - - @property - def train_loader(self): - """ PyTorch loader of training data """ - return self._train_loader - - @property - def val_loader(self): - """ PyTorch loader of validation data """ - return self._val_loader - - @property - def train_dataset(self): - """ Return the training dataset """ - return self.train_ds - - @property - def val_dataset(self): - """ Return the validation dataset """ - return self.val_ds - - @property - def train_iterations_per_epoch(self): - """ Return number of iterations per epoch """ - return len(self._train_loader) - - @property - def val_iterations_per_epoch(self): - """ Return number of iterations per epoch - validation """ - return len(self._val_loader) + """ + Single simple container for train/validation/test datasets. + + PyTorch datasets by default support only __len__ and __getitem__ operations + """ + + def __init__(self, train: data.Dataset, validation: data.Dataset, + test: typing.Optional[data.Dataset] = None, metadata: typing.Optional[dict] = None): + self.train = train + self.validation = validation + self.test = test + + self.metadata = {} if metadata is None else metadata + +# @property +# def train_loader(self): +# """ PyTorch loader of training data """ +# raise NotImplementedError +# +# @property +# def val_loader(self): +# """ PyTorch loader of validation data """ +# raise NotImplementedError +# +# @property +# def train_dataset(self): +# """ Return the training dataset """ +# raise NotImplementedError +# +# @property +# def val_dataset(self): +# """ Return the validation dataset """ +# raise NotImplementedError +# +# @property +# def train_iterations_per_epoch(self): +# """ Return number of iterations per epoch """ +# raise NotImplementedError +# +# @property +# def val_iterations_per_epoch(self): +# """ Return number of iterations per epoch - validation """ +# raise NotImplementedError +# +# +# class SupervisedTextData(Source): +# """ An NLP torchtext data source """ +# def __init__(self, train_source, val_source, train_iterator, val_iterator, data_field, target_field): +# super().__init__() +# +# self.train_source = train_source +# self.val_source = val_source +# self.train_iterator = train_iterator +# self.val_iterator = val_iterator +# self.data_field = data_field +# self.target_field = target_field +# +# @property +# def train_loader(self): +# """ PyTorch loader of training data """ +# return self.train_iterator +# +# @property +# def val_loader(self): +# """ PyTorch loader of validation data """ +# return self.val_iterator +# +# @property +# def train_dataset(self): +# """ Return the training dataset """ +# return self.train_source +# +# @property +# def val_dataset(self): +# """ Return the validation dataset """ +# return self.val_source +# +# @property +# def train_iterations_per_epoch(self): +# """ Return number of iterations per epoch """ +# return len(self.train_iterator) +# +# @property +# def val_iterations_per_epoch(self): +# """ Return number of iterations per epoch - validation """ +# return len(self.val_iterator) +# +# +# class SupervisedTrainingData(Source): +# """ Most common source of data combining a basic datasource and sampler """ +# def __init__(self, train_source, val_source, num_workers, batch_size, augmentations=None): +# +# super().__init__() +# +# self.train_source = train_source +# self.val_source = val_source +# +# self.num_workers = num_workers +# self.batch_size = batch_size +# +# self.augmentations = augmentations +# +# # Derived values +# self.train_ds = DataFlow(self.train_source, augmentations, tag='train') +# self.val_ds = DataFlow(self.val_source, augmentations, tag='val') +# +# self._train_loader = data.DataLoader( +# self.train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers +# ) +# +# self._val_loader = data.DataLoader( +# self.val_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers +# ) +# +# @property +# def train_loader(self): +# """ PyTorch loader of training data """ +# return self._train_loader +# +# @property +# def val_loader(self): +# """ PyTorch loader of validation data """ +# return self._val_loader +# +# @property +# def train_dataset(self): +# """ Return the training dataset """ +# return self.train_ds +# +# @property +# def val_dataset(self): +# """ Return the validation dataset """ +# return self.val_ds +# +# @property +# def train_iterations_per_epoch(self): +# """ Return number of iterations per epoch """ +# return len(self._train_loader) +# +# @property +# def val_iterations_per_epoch(self): +# """ Return number of iterations per epoch - validation """ +# return len(self._val_loader) diff --git a/vel/api/train_phase.py b/vel/api/train_phase.py index d73e7e31..cd0f9b58 100644 --- a/vel/api/train_phase.py +++ b/vel/api/train_phase.py @@ -1,6 +1,7 @@ from torch.optim import Optimizer -from vel.api import TrainingInfo, EpochInfo, Learner, Model, Source +from vel.api import TrainingInfo, EpochInfo, Model, Source +from vel.train import Trainer class TrainPhase: @@ -25,7 +26,7 @@ def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: in """ Create Epoch info """ raise NotImplementedError - def execute_epoch(self, epoch_info: EpochInfo, learner: Learner): + def execute_epoch(self, epoch_info: EpochInfo, trainer: Trainer): """ Execute epoch training. """ diff --git a/vel/api/transformation.py b/vel/api/transformation.py new file mode 100644 index 00000000..80ee433e --- /dev/null +++ b/vel/api/transformation.py @@ -0,0 +1,43 @@ +class Transformation: + """ Base class for all data augmentations """ + def __init__(self, tags=None): + self.tags = ['train', 'val', 'test'] if tags is None else tags + + def __call__(self, datapoint): + """ Do the transformation """ + raise NotImplementedError + + def denormalize(self, datapoint): + """ Operation reverse to normalization """ + return datapoint + + +class ScopedTransformation(Transformation): + """ Transformation applied only to certain keys of the datapoint """ + + def __init__(self, scope=None, tags=None): + super().__init__(tags) + + self.scope = ['x'] if scope is None else scope + + def transform(self, value): + """ Actual transformation code """ + raise NotImplementedError + + def denormalization_transform(self, value): + """ Operation reverse to normalization """ + return value + + def __call__(self, datapoint): + """ Do the transformation """ + for name in self.scope: + datapoint[name] = self.transform(datapoint[name]) + + return datapoint + + def denormalize(self, datapoint): + """ Operation reverse to normalization """ + for name in self.scope: + datapoint[name] = self.denormalization_transform(datapoint[name]) + + return datapoint diff --git a/vel/augmentation/to_tensor.py b/vel/augmentation/to_tensor.py deleted file mode 100644 index 33285e77..00000000 --- a/vel/augmentation/to_tensor.py +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np - -import torchvision.transforms.functional as F - -import vel.data as data - - -class ToTensor(data.Augmentation): - """ Convert image array to a tensor """ - def __init__(self, mode='x', tags=None): - super().__init__(mode, tags) - - def __call__(self, datum): - if len(datum.shape) == 2: - # If the image has only one channel, it still needs to be specified - datum = datum.reshape(datum.shape[0], datum.shape[1], 1) - - return F.to_tensor(datum) - - def denormalize(self, datum): - return np.transpose(datum.numpy(), (1, 2, 0)) - - -def create(mode='x', tags=None): - """ Vel factory function """ - return ToTensor(mode, tags) diff --git a/vel/augmentation/__init__.py b/vel/calc/__init__.py similarity index 100% rename from vel/augmentation/__init__.py rename to vel/calc/__init__.py diff --git a/vel/math/function.py b/vel/calc/function.py similarity index 100% rename from vel/math/function.py rename to vel/calc/function.py diff --git a/vel/math/process.py b/vel/calc/process.py similarity index 100% rename from vel/math/process.py rename to vel/calc/process.py diff --git a/vel/command/augvis_command.py b/vel/command/augvis_command.py index df4f5352..edfa4906 100644 --- a/vel/command/augvis_command.py +++ b/vel/command/augvis_command.py @@ -1,19 +1,19 @@ import matplotlib.pyplot as plt import numpy as np -from vel.api import Source +from vel.data import Loader class AugmentationVisualizationCommand: """ Visualize augmentations """ - def __init__(self, source: Source, samples, cases): - self.source = source + def __init__(self, loader: Loader, samples, cases): + self.loader = loader self.samples = samples self.cases = cases def run(self): """ Run the visualization """ - dataset = self.source.train_dataset + dataset = self.loader.transformed_source.train num_samples = len(dataset) fig, ax = plt.subplots(self.cases, self.samples+1) @@ -21,19 +21,19 @@ def run(self): selected_sample = np.sort(np.random.choice(num_samples, self.cases, replace=False)) for i in range(self.cases): - raw_image, _ = dataset.get_raw(selected_sample[i]) + raw_image = dataset.get_raw(selected_sample[i])['x'] ax[i, 0].imshow(raw_image) ax[i, 0].set_title("Original image") for j in range(self.samples): - augmented_image, _ = dataset[selected_sample[i]] - augmented_image = dataset.denormalize(augmented_image) - ax[i, j+1].imshow(augmented_image) + augmented_datapoint = dataset[selected_sample[i]] + denormalized_datapoint = dataset.denormalize(augmented_datapoint) + ax[i, j+1].imshow(denormalized_datapoint['x']) plt.show() -def create(source, samples, cases): +def create(loader: Loader, samples: int, cases: int): """ Vel factory function """ - return AugmentationVisualizationCommand(source, samples, cases) + return AugmentationVisualizationCommand(loader, samples, cases) diff --git a/vel/command/train_command.py b/vel/command/train_command.py index 2e708706..ea7e06d5 100644 --- a/vel/command/train_command.py +++ b/vel/command/train_command.py @@ -1,6 +1,8 @@ import typing import vel.api as api +import vel.data as data +import vel.train as train from vel.callback.time_tracker import TimeTracker @@ -10,7 +12,7 @@ class SimpleTrainCommand: def __init__(self, epochs: int, model_config: api.ModelConfig, model_factory: api.ModelFactory, optimizer_factory: api.OptimizerFactory, scheduler_factory: typing.Optional[api.SchedulerFactory], - source: api.Source, storage: api.Storage, callbacks: typing.Optional[typing.List[api.Callback]], + loader: data.Loader, storage: api.Storage, callbacks: typing.Optional[typing.List[api.Callback]], max_grad_norm: typing.Optional[float]): self.epochs = epochs self.model_config = model_config @@ -19,7 +21,7 @@ def __init__(self, epochs: int, model_config: api.ModelConfig, model_factory: ap self.optimizer_factory = optimizer_factory self.scheduler_factory = scheduler_factory - self.source = source + self.loader = loader self.storage = storage self.callbacks = callbacks if callbacks is not None else [] self.max_grad_norm = max_grad_norm @@ -28,7 +30,7 @@ def run(self): """ Run the command with supplied configuration """ device = self.model_config.torch_device() - learner = api.Learner(device, self.model_factory.instantiate(), self.max_grad_norm) + learner = train.Trainer(device, self.model_factory.instantiate(), self.max_grad_norm) optimizer = self.optimizer_factory.instantiate(learner.model) # All callbacks used for learning @@ -49,12 +51,12 @@ def run(self): epoch_info = api.EpochInfo( training_info=training_info, global_epoch_idx=global_epoch_idx, - batches_per_epoch=self.source.train_iterations_per_epoch, + batches_per_epoch=self.loader.size['train'], optimizer=optimizer ) # Execute learning - learner.run_epoch(epoch_info, self.source) + learner.run_epoch(epoch_info, self.loader) self.storage.checkpoint(epoch_info, learner.model) @@ -99,7 +101,7 @@ def resume_training(self, learner, callbacks, metrics) -> api.TrainingInfo: return training_info -def create(model_config, epochs, optimizer, model, source, storage, scheduler=None, callbacks=None, max_grad_norm=None): +def create(model_config, epochs, optimizer, model, loader, storage, scheduler=None, callbacks=None, max_grad_norm=None): """ Vel factory function """ return SimpleTrainCommand( epochs=epochs, @@ -107,7 +109,7 @@ def create(model_config, epochs, optimizer, model, source, storage, scheduler=No model_factory=model, optimizer_factory=optimizer, scheduler_factory=scheduler, - source=source, + loader=loader, storage=storage, callbacks=callbacks, max_grad_norm=max_grad_norm diff --git a/vel/data/__init__.py b/vel/data/__init__.py index dd02c4a5..41e0ebd8 100644 --- a/vel/data/__init__.py +++ b/vel/data/__init__.py @@ -1 +1,2 @@ -from .image_op import * # noqa +from .dataflow import DataFlow +from .loader import Loader diff --git a/vel/augmentation/tta/__init__.py b/vel/data/augmentation/__init__.py similarity index 100% rename from vel/augmentation/tta/__init__.py rename to vel/data/augmentation/__init__.py diff --git a/vel/augmentation/center_crop.py b/vel/data/augmentation/center_crop.py similarity index 100% rename from vel/augmentation/center_crop.py rename to vel/data/augmentation/center_crop.py diff --git a/vel/augmentation/normalize.py b/vel/data/augmentation/normalize.py similarity index 94% rename from vel/augmentation/normalize.py rename to vel/data/augmentation/normalize.py index b0b787a6..d67a2c6a 100644 --- a/vel/augmentation/normalize.py +++ b/vel/data/augmentation/normalize.py @@ -3,7 +3,7 @@ import vel.api as api -class Normalize(api.Augmentation): +class Normalize(api.Transformation): """ Normalize input mean and standard deviation """ def __init__(self, mean, std, mode='x', tags=None): diff --git a/vel/augmentation/random_crop.py b/vel/data/augmentation/random_crop.py similarity index 100% rename from vel/augmentation/random_crop.py rename to vel/data/augmentation/random_crop.py diff --git a/vel/augmentation/random_horizontal_flip.py b/vel/data/augmentation/random_horizontal_flip.py similarity index 93% rename from vel/augmentation/random_horizontal_flip.py rename to vel/data/augmentation/random_horizontal_flip.py index e4d6c142..ba397519 100644 --- a/vel/augmentation/random_horizontal_flip.py +++ b/vel/data/augmentation/random_horizontal_flip.py @@ -4,7 +4,7 @@ import vel.api as api -class RandomHorizontalFlip(api.Augmentation): +class RandomHorizontalFlip(api.Transformation): """ Apply a horizontal flip randomly to input images """ def __init__(self, p=0.5, mode='x', tags=None): diff --git a/vel/augmentation/random_lighting.py b/vel/data/augmentation/random_lighting.py similarity index 94% rename from vel/augmentation/random_lighting.py rename to vel/data/augmentation/random_lighting.py index d85c450c..9c51d9ae 100644 --- a/vel/augmentation/random_lighting.py +++ b/vel/data/augmentation/random_lighting.py @@ -4,7 +4,7 @@ import vel.data as data -class RandomLighting(api.Augmentation): +class RandomLighting(api.Transformation): """ Apply a horizontal flip randomly to input images """ def __init__(self, b, c, mode='x', tags=None): diff --git a/vel/augmentation/random_rotate.py b/vel/data/augmentation/random_rotate.py similarity index 100% rename from vel/augmentation/random_rotate.py rename to vel/data/augmentation/random_rotate.py diff --git a/vel/augmentation/random_scale.py b/vel/data/augmentation/random_scale.py similarity index 100% rename from vel/augmentation/random_scale.py rename to vel/data/augmentation/random_scale.py diff --git a/vel/augmentation/scale_min_size.py b/vel/data/augmentation/scale_min_size.py similarity index 100% rename from vel/augmentation/scale_min_size.py rename to vel/data/augmentation/scale_min_size.py diff --git a/vel/math/__init__.py b/vel/data/augmentation/tta/__init__.py similarity index 100% rename from vel/math/__init__.py rename to vel/data/augmentation/tta/__init__.py diff --git a/vel/augmentation/tta/train_tta.py b/vel/data/augmentation/tta/train_tta.py similarity index 100% rename from vel/augmentation/tta/train_tta.py rename to vel/data/augmentation/tta/train_tta.py diff --git a/vel/augmentation/unsupervised.py b/vel/data/augmentation/unsupervised.py similarity index 100% rename from vel/augmentation/unsupervised.py rename to vel/data/augmentation/unsupervised.py diff --git a/vel/data/dataflow.py b/vel/data/dataflow.py new file mode 100644 index 00000000..ae4b28ad --- /dev/null +++ b/vel/data/dataflow.py @@ -0,0 +1,55 @@ +import typing +import torch.utils.data as data + +from vel.api import Source, Transformation + + +def pre_map(datapoint): + """ Map datapoint from a list into the dictionary """ + if isinstance(datapoint, (list, tuple)): + return dict(zip("xyzw", datapoint)) + return datapoint + + +class DataFlow(data.Dataset): + """ A dataset wrapping underlying data source with transformations """ + + @staticmethod + def transform(source: Source, transformations: typing.List[Transformation]) -> Source: + """ Transform supplied source with a list of given transformations """ + return Source( + train=DataFlow(source.train, transformations, 'train'), + validation=DataFlow(source.validation, transformations, 'val'), + test=None if source.test is None else DataFlow(source.test, transformations, 'test') + ) + + def __init__(self, dataset, transformations, tag): + self.dataset = dataset + + if transformations is None: + self.transformations = [] + else: + self.transformations = [t for t in transformations if tag in t.tags] + + self.tag = tag + + def get_raw(self, index): + return pre_map(self.dataset[index]) + + def __getitem__(self, index): + datapoint = self.get_raw(index) + + for t in self.transformations: + datapoint = t(datapoint) + + return datapoint + + def denormalize(self, datapoint): + """ Perform a reverse normalization (for viewing) """ + for t in self.transformations[::-1]: + datapoint = t.denormalize(datapoint) + + return datapoint + + def __len__(self): + return len(self.dataset) diff --git a/vel/data/loader.py b/vel/data/loader.py new file mode 100644 index 00000000..4e98742f --- /dev/null +++ b/vel/data/loader.py @@ -0,0 +1,71 @@ +import typing +import torch.utils.data as data + +from vel.api import Source + +from .dataflow import DataFlow + + +class Loader: + """ Loads data from a data source to serve it to the model """ + + def __init__(self, source: Source, batch_size: int, num_workers: int, + transformations: typing.Optional[list] = None): + self.source = source + self.batch_size = batch_size + self.num_workers = num_workers + self.transformations = transformations + + if transformations is not None: + self.transformed_source = DataFlow.transform(self.source, transformations) + else: + self.transformed_source = source + + self.train_loader = data.DataLoader( + self.transformed_source.train, batch_size=batch_size, shuffle=True, num_workers=num_workers, + drop_last=True + ) + + self.val_loader = data.DataLoader( + self.transformed_source.validation, batch_size=batch_size, shuffle=False, num_workers=num_workers, + ) + + if self.transformed_source.test is not None: + self.test_loader = data.DataLoader( + self.transformed_source.test, batch_size=batch_size, shuffle=False, num_workers=num_workers + ) + else: + self.test_loader = None + + self._loaders = { + 'train': self.train_loader, + 'val': self.val_loader, + 'test': self.test_loader + } + + self._loader_sizes = { + 'train': len(self.train_loader), + 'val': len(self.val_loader), + 'test': 0 if self.test_loader is None else len(self.test_loader) + } + + def __getitem__(self, item): + return self._loaders[item] + + @property + def loader(self): + return self._loaders + + @property + def size(self): + return self._loader_sizes + + +def create(source: Source, batch_size: int, num_workers: int=0, transformations: typing.Optional[list] = None): + """ Vel factory function """ + return Loader( + source=source, + batch_size=batch_size, + num_workers=num_workers, + transformations=transformations + ) diff --git a/vel/phase/__init__.py b/vel/data/operation/__init__.py similarity index 100% rename from vel/phase/__init__.py rename to vel/data/operation/__init__.py diff --git a/vel/data/image_op.py b/vel/data/operation/image_op.py similarity index 100% rename from vel/data/image_op.py rename to vel/data/operation/image_op.py diff --git a/vel/schedule/__init__.py b/vel/data/source/__init__.py similarity index 100% rename from vel/schedule/__init__.py rename to vel/data/source/__init__.py diff --git a/vel/source/img_dir_source.py b/vel/data/source/img_dir_source.py similarity index 100% rename from vel/source/img_dir_source.py rename to vel/data/source/img_dir_source.py diff --git a/vel/source/__init__.py b/vel/data/source/nlp/__init__.py similarity index 100% rename from vel/source/__init__.py rename to vel/data/source/nlp/__init__.py diff --git a/vel/source/nlp/imdb.py b/vel/data/source/nlp/imdb.py similarity index 100% rename from vel/source/nlp/imdb.py rename to vel/data/source/nlp/imdb.py diff --git a/vel/source/nlp/multi30k.py b/vel/data/source/nlp/multi30k.py similarity index 100% rename from vel/source/nlp/multi30k.py rename to vel/data/source/nlp/multi30k.py diff --git a/vel/source/nlp/text_url.py b/vel/data/source/nlp/text_url.py similarity index 100% rename from vel/source/nlp/text_url.py rename to vel/data/source/nlp/text_url.py diff --git a/vel/source/nlp/wmt14.py b/vel/data/source/nlp/wmt14.py similarity index 100% rename from vel/source/nlp/wmt14.py rename to vel/data/source/nlp/wmt14.py diff --git a/vel/source/nlp/__init__.py b/vel/data/source/vision/__init__.py similarity index 100% rename from vel/source/nlp/__init__.py rename to vel/data/source/vision/__init__.py diff --git a/vel/source/vision/cifar10.py b/vel/data/source/vision/cifar10.py similarity index 100% rename from vel/source/vision/cifar10.py rename to vel/data/source/vision/cifar10.py diff --git a/vel/data/source/vision/mnist.py b/vel/data/source/vision/mnist.py new file mode 100644 index 00000000..8ab6b49e --- /dev/null +++ b/vel/data/source/vision/mnist.py @@ -0,0 +1,51 @@ +from torchvision import datasets + +from vel.api import Source + + + +def create(model_config): + """ Create a MNIST dataset, normalized """ + path = model_config.data_dir('mnist') + + train_dataset = datasets.MNIST(path, train=True, download=True) + test_dataset = datasets.MNIST(path, train=False, download=True) + + train_data = train_dataset.data + mean_value = (train_data.double() / 255).mean().item() + std_value = (train_data.double() / 255).std().item() + + return Source( + train=train_dataset, + validation=test_dataset, + metadata={ + 'train_mean': mean_value, + 'train_std': std_value + } + ) + +# from vel.api import SupervisedTrainingData +# +# from vel.augmentations.normalize import Normalize +# from vel.augmentations.to_tensor import ToTensor +# from vel.augmentations.to_array import ToArray +# from vel.augmentations.unsupervised import Unsupervised + + # augmentations = [ToArray()] + (augmentations if augmentations is not None else []) + # + # if normalize: + # + # augmentations.append(Normalize(mean=mean_value, std=std_value, tags=['train', 'val'])) + # + # augmentations.append(ToTensor()) + # + # if unsupervised: + # augmentations.append(Unsupervised()) + # + # return SupervisedTrainingData( + # train_dataset, + # test_dataset, + # num_workers=num_workers, + # batch_size=batch_size, + # augmentations=augmentations + # ) diff --git a/vel/source/vision/__init__.py b/vel/data/transformation/__init__.py similarity index 100% rename from vel/source/vision/__init__.py rename to vel/data/transformation/__init__.py diff --git a/vel/data/transformation/image_to_tensor.py b/vel/data/transformation/image_to_tensor.py new file mode 100644 index 00000000..75eebc32 --- /dev/null +++ b/vel/data/transformation/image_to_tensor.py @@ -0,0 +1,31 @@ +import numpy as np + +import torchvision.transforms.functional as F + +from vel.api.transformation import ScopedTransformation + + +class ImageToTensor(ScopedTransformation): + """ Convert image array to a tensor """ + def transform(self, value): + # First let's make sure it's actually a numpy array + value = np.asarray(value) + + if len(value.shape) == 2: + # If the image has only one channel, it still needs to be specified + value = value.reshape(value.shape[0], value.shape[1], 1) + + return F.to_tensor(value) + + def denormalization_transform(self, value): + image_array = np.transpose(value.numpy(), (1, 2, 0)) + + if len(image_array.shape) == 3 and image_array.shape[-1] == 1: + return image_array[:, :, 0] + + return image_array + + +def create(mode='x', tags=None): + """ Vel factory function """ + return ImageToTensor(mode, tags) diff --git a/vel/augmentation/to_array.py b/vel/data/transformation/to_array.py similarity index 56% rename from vel/augmentation/to_array.py rename to vel/data/transformation/to_array.py index e1f3a5f0..8c1e838c 100644 --- a/vel/augmentation/to_array.py +++ b/vel/data/transformation/to_array.py @@ -1,15 +1,13 @@ import numpy as np -import vel.data as data +from vel.api.transformation import ScopedTransformation -class ToArray(data.Augmentation): +class ToArray(ScopedTransformation): """ Convert image to an array of floats """ - def __init__(self, mode='x', tags=None): - super().__init__(mode, tags) - def __call__(self, x_data): - array = np.array(x_data) + def transform(self, value): + array = np.array(value) if array.dtype == np.uint8: return array.astype(np.float32) / 255.0 diff --git a/vel/function/__init__.py b/vel/function/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/schedule/constant.py b/vel/function/constant.py similarity index 100% rename from vel/schedule/constant.py rename to vel/function/constant.py diff --git a/vel/schedule/linear.py b/vel/function/linear.py similarity index 100% rename from vel/schedule/linear.py rename to vel/function/linear.py diff --git a/vel/schedule/linear_and_constant.py b/vel/function/linear_and_constant.py similarity index 100% rename from vel/schedule/linear_and_constant.py rename to vel/function/linear_and_constant.py diff --git a/vel/model/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py index b678a9de..1a1dd3e9 100644 --- a/vel/model/autoencoder/mnist_cnn_vae.py +++ b/vel/model/autoencoder/mnist_cnn_vae.py @@ -7,13 +7,13 @@ import vel.util.network as net_util -from vel.api import SupervisedModel, ModelFactory +from vel.api import GradientModel, ModelFactory from vel.metric.averaging_metric import AveragingNamedMetric from vel.metric.loss_metric import Loss from vel.module.layers import Flatten, Reshape -class MnistCnnVAE(SupervisedModel): +class MnistCnnVAE(GradientModel): """ A simple MNIST variational autoencoder, containing 3 convolutional layers. """ diff --git a/vel/source/vision/mnist.py b/vel/source/vision/mnist.py deleted file mode 100644 index 0ac79aae..00000000 --- a/vel/source/vision/mnist.py +++ /dev/null @@ -1,39 +0,0 @@ -from torchvision import datasets - - -from vel.api import SupervisedTrainingData - -from vel.augmentations.normalize import Normalize -from vel.augmentations.to_tensor import ToTensor -from vel.augmentations.to_array import ToArray -from vel.augmentations.unsupervised import Unsupervised - - -def create(model_config, batch_size, normalize=True, num_workers=0, augmentations=None, unsupervised=False): - """ Create a MNIST dataset, normalized """ - path = model_config.data_dir('mnist') - - train_dataset = datasets.MNIST(path, train=True, download=True) - test_dataset = datasets.MNIST(path, train=False, download=True) - - augmentations = [ToArray()] + (augmentations if augmentations is not None else []) - - if normalize: - train_data = train_dataset.data - mean_value = (train_data.double() / 255).mean().item() - std_value = (train_data.double() / 255).std().item() - - augmentations.append(Normalize(mean=mean_value, std=std_value, tags=['train', 'val'])) - - augmentations.append(ToTensor()) - - if unsupervised: - augmentations.append(Unsupervised()) - - return SupervisedTrainingData( - train_dataset, - test_dataset, - num_workers=num_workers, - batch_size=batch_size, - augmentations=augmentations - ) diff --git a/vel/train/__init__.py b/vel/train/__init__.py new file mode 100644 index 00000000..260e4c8d --- /dev/null +++ b/vel/train/__init__.py @@ -0,0 +1 @@ +from .trainer import Trainer diff --git a/vel/train/phase/__init__.py b/vel/train/phase/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/phase/cycle.py b/vel/train/phase/cycle.py similarity index 100% rename from vel/phase/cycle.py rename to vel/train/phase/cycle.py diff --git a/vel/phase/freeze.py b/vel/train/phase/freeze.py similarity index 100% rename from vel/phase/freeze.py rename to vel/train/phase/freeze.py diff --git a/vel/phase/generic.py b/vel/train/phase/generic.py similarity index 100% rename from vel/phase/generic.py rename to vel/train/phase/generic.py diff --git a/vel/phase/unfreeze.py b/vel/train/phase/unfreeze.py similarity index 100% rename from vel/phase/unfreeze.py rename to vel/train/phase/unfreeze.py diff --git a/vel/api/learner.py b/vel/train/trainer.py similarity index 65% rename from vel/api/learner.py rename to vel/train/trainer.py index 5dfd4393..44288497 100644 --- a/vel/api/learner.py +++ b/vel/train/trainer.py @@ -4,14 +4,16 @@ import tqdm import typing -from .model import SupervisedModel -from .info import BatchInfo, EpochInfo, TrainingInfo -from .source import Source +from vel.api import GradientModel, TrainingInfo, EpochInfo, BatchInfo +from vel.data import Loader +from vel.util.tensor_util import to_device -class Learner: + +class Trainer: """ Manages training process of a single model """ - def __init__(self, device: torch.device, model: SupervisedModel, max_grad_norm: typing.Optional[float] = None): + + def __init__(self, device: torch.device, model: GradientModel, max_grad_norm: typing.Optional[float] = None): self.device = device self.model = model.to(device) self.max_grad_norm = max_grad_norm @@ -43,64 +45,67 @@ def initialize_training(self, training_info: TrainingInfo, model_state=None, hid else: self.model.load_state_dict(model_state) - def run_epoch(self, epoch_info: EpochInfo, source: 'Source'): + def run_epoch(self, epoch_info: EpochInfo, loader: Loader): """ Run full epoch of learning """ epoch_info.on_epoch_begin() lr = epoch_info.optimizer.param_groups[-1]['lr'] print("|-------- Epoch {:06} Lr={:.6f} ----------|".format(epoch_info.global_epoch_idx, lr)) - self.train_epoch(epoch_info, source) + self.train_epoch(epoch_info, loader) epoch_info.result_accumulator.freeze_results('train') - self.validation_epoch(epoch_info, source) + self.validation_epoch(epoch_info, loader) epoch_info.result_accumulator.freeze_results('val') epoch_info.on_epoch_end() - def train_epoch(self, epoch_info, source: 'Source', interactive=True): + def train_epoch(self, epoch_info, loader: Loader, interactive=True): """ Run a single training epoch """ self.train() if interactive: - iterator = tqdm.tqdm(source.train_loader, desc="Training", unit="iter", file=sys.stdout) + iterator = tqdm.tqdm(loader['train'], desc="Training", unit="iter", file=sys.stdout) else: - iterator = source.train_loader + iterator = loader['train'] - for batch_idx, (data, target) in enumerate(iterator): + for batch_idx, data in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) batch_info.on_batch_begin() - self.train_batch(batch_info, data, target) + self.train_batch(batch_info, data) batch_info.on_batch_end() iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss')) - def validation_epoch(self, epoch_info, source: 'Source'): + def validation_epoch(self, epoch_info, loader: Loader, interactive=True): """ Run a single evaluation epoch """ self.eval() - iterator = tqdm.tqdm(source.val_loader, desc="Validation", unit="iter", file=sys.stdout) + if interactive: + iterator = tqdm.tqdm(loader['val'], desc="Training", unit="iter", file=sys.stdout) + else: + iterator = loader['val'] with torch.no_grad(): - for batch_idx, (data, target) in enumerate(iterator): + for batch_idx, data in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) batch_info.on_validation_batch_begin() - self.feed_batch(batch_info, data, target) + self.feed_batch(batch_info, data) batch_info.on_validation_batch_end() - def feed_batch(self, batch_info, data, target): + def feed_batch(self, batch_info, data): """ Run single batch of data """ - data, target = data.to(self.device), target.to(self.device) - metrics = self.model.calculate_gradient(data, target) + data = to_device(data, self.device) # Move a data batch into the right device + metrics = self.model.calculate_gradient(data) batch_info.update(metrics) - def train_batch(self, batch_info, data, target): + def train_batch(self, batch_info, data): """ Train single batch of data """ batch_info.optimizer.zero_grad() - self.feed_batch(batch_info, data, target) + self.feed_batch(batch_info, data) if self.max_grad_norm is not None: batch_info['grad_norm'] = torch.nn.utils.clip_grad_norm_( From 5120966c505796c39a10a9660c00e07395670755 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 23 Jun 2019 18:00:34 -0700 Subject: [PATCH 054/162] Fixed again AE and VAE models. --- .../autoencoder/mnist/mnist_cnn_ae.yaml | 25 ++++++++++--------- .../autoencoder/mnist/mnist_cnn_vae.yaml | 22 ++++++++-------- .../classification/mnist/mnist_cnn_01.yaml | 7 +++--- vel/data/augmentation/unsupervised.py | 14 ----------- vel/data/transformation/unsupervised.py | 12 +++++++++ vel/model/autoencoder/mnist_cnn_vae.py | 6 ++--- 6 files changed, 43 insertions(+), 43 deletions(-) delete mode 100644 vel/data/augmentation/unsupervised.py create mode 100644 vel/data/transformation/unsupervised.py diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml index ea1782b4..690a7f8c 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml @@ -1,8 +1,8 @@ -name: 'mnist_cnn_autoenoder' +name: 'mnist_cnn_ae' model: - name: vel.models.autoencoder.mnist_cnn_autoencoder + name: vel.model.autoencoder.mnist_cnn_autoencoder img_rows: 28 img_cols: 28 img_channels: 1 @@ -11,28 +11,29 @@ model: source: - name: vel.sources.vision.mnist + name: vel.data.source.vision.mnist + + +loader: + name: vel.data.loader batch_size: 128 num_workers: 4 - normalize: False - unsupervised: true + + transformations: + - name: vel.data.transformation.image_to_tensor + - name: vel.data.transformation.unsupervised optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 1.0e-3 commands: train: - name: vel.commands.train_command + name: vel.command.train_command epochs: 12 log_frequency: 100 - checkpoint: metric: 'val:loss' - - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml index 74c499c9..c5a5f9c9 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml @@ -2,7 +2,7 @@ name: 'mnist_cnn_autoenoder' model: - name: vel.models.autoencoder.mnist_cnn_vae + name: vel.model.autoencoder.mnist_cnn_vae img_rows: 28 img_cols: 28 img_channels: 1 @@ -11,28 +11,28 @@ model: source: - name: vel.sources.vision.mnist + name: vel.data.source.vision.mnist + +loader: + name: vel.data.loader batch_size: 128 - normalize: False num_workers: 4 - unsupervised: true + + transformations: + - name: vel.data.transformation.image_to_tensor + - name: vel.data.transformation.unsupervised optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 1.0e-3 commands: train: - name: vel.commands.train_command + name: vel.command.train_command epochs: 12 log_frequency: 100 - checkpoint: metric: 'val:loss' - - - visdom: - name: vel.commands.vis_store_command diff --git a/examples-configs/classification/mnist/mnist_cnn_01.yaml b/examples-configs/classification/mnist/mnist_cnn_01.yaml index b0f75729..6e06faea 100644 --- a/examples-configs/classification/mnist/mnist_cnn_01.yaml +++ b/examples-configs/classification/mnist/mnist_cnn_01.yaml @@ -22,15 +22,16 @@ loader: - name: vel.data.transformation.image_to_tensor +optimizer: + name: vel.optimizer.adadelta + + commands: train: name: vel.command.train_command epochs: 12 log_frequency: 100 - optimizer: - name: vel.optimizer.adadelta - checkpoint: metric: 'val:loss' diff --git a/vel/data/augmentation/unsupervised.py b/vel/data/augmentation/unsupervised.py deleted file mode 100644 index 678ab7d3..00000000 --- a/vel/data/augmentation/unsupervised.py +++ /dev/null @@ -1,14 +0,0 @@ -import vel.data as data - - -class Unsupervised(data.Augmentation): - """ Simply transform supervised to an unsupervised dataset, cloning data to a target """ - def __init__(self): - super().__init__('both', None) - - def __call__(self, x_data, y_data): - return x_data, x_data - - -def create(): - return Unsupervised() diff --git a/vel/data/transformation/unsupervised.py b/vel/data/transformation/unsupervised.py new file mode 100644 index 00000000..7af7e560 --- /dev/null +++ b/vel/data/transformation/unsupervised.py @@ -0,0 +1,12 @@ +from vel.api import Transformation + + +class Unsupervised(Transformation): + """ Simply transform supervised to an unsupervised dataset, cloning data to a target """ + def __call__(self, datapoint): + datapoint['y'] = datapoint['x'] + return datapoint + + +def create(): + return Unsupervised() diff --git a/vel/model/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py index 1a1dd3e9..3eb1dadd 100644 --- a/vel/model/autoencoder/mnist_cnn_vae.py +++ b/vel/model/autoencoder/mnist_cnn_vae.py @@ -108,9 +108,9 @@ def forward(self, sample): 'std': std } - def calculate_gradient(self, x_data, y_true): + def calculate_gradient(self, data): """ Calculate a gradient of loss function """ - output = self(x_data) + output = self(data['x']) y_pred = output['decoded'] @@ -124,7 +124,7 @@ def calculate_gradient(self, x_data, y_true): # reconstruction = 0.5 * F.mse_loss(y_pred, y_true) # We must sum over all image axis and average only on minibatch axis - reconstruction = F.binary_cross_entropy(y_pred, y_true, reduce=False).sum(1).sum(1).sum(1).mean() + reconstruction = F.binary_cross_entropy(y_pred, data['y'], reduce=False).sum(1).sum(1).sum(1).mean() loss = reconstruction + kl_divergence if self.training: From 5e07fd95e8bbf0ea8c64e0cb3b10f00c501cfb0a Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 23 Jun 2019 18:04:35 -0700 Subject: [PATCH 055/162] Small updates to README. --- README.md | 114 +----------------------------------------------------- 1 file changed, 2 insertions(+), 112 deletions(-) diff --git a/README.md b/README.md index fd5d7c42..5b39c7a7 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,6 @@ [![Build Status](https://travis-ci.org/MillionIntegrals/vel.svg?branch=master)](https://travis-ci.org/MillionIntegrals/vel) [![PyPI version](https://badge.fury.io/py/vel.svg)](https://badge.fury.io/py/vel) [![GitHub](https://img.shields.io/github/license/mashape/apistatus.svg)](https://github.com/MillionIntegrals/vel/blob/master/LICENSE) -[![Gitter chat](https://badges.gitter.im/MillionIngegrals/vel.png)](https://gitter.im/deep-learning-vel) Bring **velocity** to deep-learning research. @@ -130,14 +129,14 @@ Most of the examples for this framework are defined using config files in the For example, to run the A2C algorithm on a Breakout atari environment, simply invoke: ``` -python -m vel.launcher examples-configs/rl/atari/a2c/breakout_a2c.yaml train +python -m vel.launcher examples-configs/rl/atari/atari_a2c.yaml train ``` If you install the library locally, you'll have a special wrapper created that will invoke the launcher for you. Then, above becomes: ``` -vel examples-configs/rl/atari/a2c/breakout_a2c.yaml train +vel examples-configs/rl/atari/atari_a2c.yaml train ``` General command line interface of the launcher is: @@ -154,112 +153,6 @@ If you prefer to use the library from inside your scripts, take a look at the well. Scripts generally don't require any MongoDB or Visdom setup, so they can be run straight away in any setup, but their output will be less rich and less informative. -Here is an example script running the same setup as a config file from above: - -```python -import torch -import torch.optim as optim - -from vel.rl.metrics import EpisodeRewardMetric -from vel.storage.streaming.stdout import StdoutStreaming -from vel.util.random import set_seed - -from vel.rl.env.classic_atari import ClassicAtariEnv -from vel.rl.vecenv.subproc import SubprocVecEnvWrapper - -from vel.modules.input.image_to_tensor import ImageToTensorFactory -from vel.rl.models.stochastic_policy_model import StochasticPolicyModelFactory -from vel.rl.models.backbone.nature_cnn import NatureCnnFactory - - -from vel.rl.reinforcers.on_policy_iteration_reinforcer import ( - OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings -) - -from vel.rl.algo.policy_gradient.a2c import A2CPolicyGradient -from vel.rl.env_roller.step_env_roller import StepEnvRoller - -from vel.api.info import TrainingInfo, EpochInfo - - -def breakout_a2c(): - device = torch.device('cuda:0') - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers - # These are just helper functions for that - vec_env = SubprocVecEnvWrapper( - ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 - ).instantiate(parallel_envs=16, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - model = StochasticPolicyModelFactory( - input_block=ImageToTensorFactory(), - backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) - ).instantiate(action_space=vec_env.action_space) - - # Reinforcer - an object managing the learning process - reinforcer = OnPolicyIterationReinforcer( - device=device, - settings=OnPolicyIterationReinforcerSettings( - batch_size=256, - number_of_steps=5, - ), - model=model, - algo=A2CPolicyGradient( - entropy_coefficient=0.01, - value_coefficient=0.5, - max_grad_norm=0.5, - discount_factor=0.99, - ), - env_roller=StepEnvRoller( - environment=vec_env, - device=device, - ) - ) - - # Model optimizer - optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[StdoutStreaming()] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 100 batches per epoch to average metrics nicely - num_epochs = int(1.1e7 / (5 * 16) / 100) - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=100, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info) - - training_info.on_train_end() - - -if __name__ == '__main__': - breakout_a2c() -``` - # Docker Dockerized version of this library is available in from the Docker Hub as @@ -316,10 +209,7 @@ Possible to be included: Code quality: -- Rename models to policies -- Force dictionary inputs and outputs for policies - Factor action noise back into the policy -- Use linter as a part of the build process # Citing From acdce019e85b6a28323b314b517b9146d154d2e8 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 23 Jun 2019 21:04:58 -0700 Subject: [PATCH 056/162] Updating some metrics. --- vel/api/info.py | 22 ++++++++------------ vel/callback/sample_tracker.py | 0 vel/metric/__init__.py | 2 +- vel/metric/accuracy.py | 4 ++-- vel/metric/averaging_metric.py | 12 +++++------ vel/metric/base_metric.py | 8 +++++++- vel/metric/loss_metric.py | 4 ++-- vel/metric/summing_metric.py | 4 ++-- vel/metric/value_metric.py | 4 ++-- vel/storage/streaming/stdout.py | 25 +++++++++++------------ vel/storage/streaming/tensorboard.py | 30 +++++++++++++++++----------- 11 files changed, 60 insertions(+), 55 deletions(-) create mode 100644 vel/callback/sample_tracker.py diff --git a/vel/api/info.py b/vel/api/info.py index b5f978c3..a84d1873 100644 --- a/vel/api/info.py +++ b/vel/api/info.py @@ -116,34 +116,28 @@ def _reset_metrics(self): for m in self.metrics: m.reset() - def value(self): - """ Return current value of the metrics """ - return {m.name: m.value() for m in self.metrics} + def value(self, dataset=None): + """ Return current dictionary value of the metrics """ + from vel.metric import MetricKey + return {MetricKey(dataset, m.name, m.scope): m.value() for m in self.metrics} def intermediate_value(self, metric): """ Return an intermediate (inter-epoch) value of a metric """ if ':' in metric: + # TODO(jerry) There's got to be a better way to do it metric_name = metric.split(':')[-1] else: metric_name = metric return self.metrics_by_name[metric_name].value() - def freeze_results(self, name=None): - new_results = self.value() - - if name is None: - for key, value in new_results.items(): - self.frozen_results[key] = value - else: - for key, value in new_results.items(): - self.frozen_results[f'{name}:{key}'] = value - + def freeze_results(self, dataset=None): + self.frozen_results.update(self.value(dataset)) self._reset_metrics() def result(self): """ Return the epoch result """ - final_result = {'epoch_idx': self.global_epoch_idx} + final_result = {} for key, value in self.frozen_results.items(): final_result[key] = value diff --git a/vel/callback/sample_tracker.py b/vel/callback/sample_tracker.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/metric/__init__.py b/vel/metric/__init__.py index 7bb2fe79..37708fa3 100644 --- a/vel/metric/__init__.py +++ b/vel/metric/__init__.py @@ -1,3 +1,3 @@ -from .base_metric import BaseMetric # noqa +from .base_metric import BaseMetric, MetricKey # noqa from .averaging_metric import AveragingMetric, AveragingNamedMetric, AveragingSupervisedMetric # noqa from .value_metric import ValueMetric # noqa diff --git a/vel/metric/accuracy.py b/vel/metric/accuracy.py index 442f8470..44dcdb2f 100644 --- a/vel/metric/accuracy.py +++ b/vel/metric/accuracy.py @@ -3,8 +3,8 @@ class Accuracy(AveragingSupervisedMetric): """ Classification accuracy """ - def __init__(self): - super().__init__("accuracy") + def __init__(self, scope="train"): + super().__init__("accuracy", scope=scope) def _value_function(self, x_input, y_true, y_pred): """ Return classification accuracy of input """ diff --git a/vel/metric/averaging_metric.py b/vel/metric/averaging_metric.py index 37b667f3..2355b7fd 100644 --- a/vel/metric/averaging_metric.py +++ b/vel/metric/averaging_metric.py @@ -5,8 +5,8 @@ class AveragingMetric(BaseMetric): """ Base class for metrics that simply calculate the average over the epoch """ - def __init__(self, name): - super().__init__(name) + def __init__(self, name, scope="general"): + super().__init__(name, scope=scope) self.storage = [] @@ -29,8 +29,8 @@ def value(self): class AveragingNamedMetric(AveragingMetric): """ Super simple averaging metric that just takes a value from dictionary and averages it over samples """ - def __init__(self, name): - super().__init__(name) + def __init__(self, name, scope="general"): + super().__init__(name, scope=scope) def _value_function(self, batch_info): return batch_info[self.name] @@ -38,8 +38,8 @@ def _value_function(self, batch_info): class AveragingSupervisedMetric(BaseMetric): """ Base class for metrics that simply calculate the average over the epoch """ - def __init__(self, name): - super().__init__(name) + def __init__(self, name, scope="general"): + super().__init__(name, scope=scope) self.storage = [] diff --git a/vel/metric/base_metric.py b/vel/metric/base_metric.py index d8d12523..6a64d1b2 100644 --- a/vel/metric/base_metric.py +++ b/vel/metric/base_metric.py @@ -1,11 +1,17 @@ +import collections + from vel.api import TrainingInfo +MetricKey = collections.namedtuple('MetricKey', ['dataset', 'name', 'scope']) + + class BaseMetric: """ Base class for all the metrics """ - def __init__(self, name): + def __init__(self, name, scope="general"): self.name = name + self.scope = scope def calculate(self, batch_info): """ Calculate value of a metric based on supplied data """ diff --git a/vel/metric/loss_metric.py b/vel/metric/loss_metric.py index 1e02ce4d..f4fa9df5 100644 --- a/vel/metric/loss_metric.py +++ b/vel/metric/loss_metric.py @@ -3,8 +3,8 @@ class Loss(AveragingMetric): """ Just a loss function """ - def __init__(self): - super().__init__("loss") + def __init__(self, scope="train"): + super().__init__("loss", scope=scope) def _value_function(self, batch_info): """ Just forward a value of the loss""" diff --git a/vel/metric/summing_metric.py b/vel/metric/summing_metric.py index 3d1a389e..f08ceab7 100644 --- a/vel/metric/summing_metric.py +++ b/vel/metric/summing_metric.py @@ -3,8 +3,8 @@ class SummingMetric(BaseMetric): """ Base class for metrics that simply calculate the sum over the epoch """ - def __init__(self, name, reset_value=True): - super().__init__(name) + def __init__(self, name, scope="general", reset_value=True): + super().__init__(name, scope=scope) self.reset_value = reset_value self.buffer = 0 diff --git a/vel/metric/value_metric.py b/vel/metric/value_metric.py index 309e9e57..934ecc0c 100644 --- a/vel/metric/value_metric.py +++ b/vel/metric/value_metric.py @@ -4,8 +4,8 @@ class ValueMetric(BaseMetric): """ Base class for metrics that don't have state and just calculate a simple value """ - def __init__(self, name): - super().__init__(name) + def __init__(self, name, scope="general"): + super().__init__(name, scope=scope) self._metric_value = None diff --git a/vel/storage/streaming/stdout.py b/vel/storage/streaming/stdout.py index e4bc5fc3..d83e8f9d 100644 --- a/vel/storage/streaming/stdout.py +++ b/vel/storage/streaming/stdout.py @@ -9,33 +9,32 @@ def on_epoch_end(self, epoch_info: EpochInfo): else: print(f"=>>>>>>>>>> EPOCH {epoch_info.global_epoch_idx}") - if any(':' not in x for x in epoch_info.result.keys()): - self._print_metrics_line(epoch_info.result, head=None) + if any(x.dataset is None for x in epoch_info.result.keys()): + self._print_metrics_line(epoch_info.result, dataset=None) - head_set = sorted({x.split(':')[0] + ':' for x in epoch_info.result.keys() if ':' in x}) + head_set = sorted({x.dataset for x in epoch_info.result.keys() if x.dataset is not None}) for head in head_set: - if any(x.startswith(head) for x in epoch_info.result.keys()): - self._print_metrics_line(epoch_info.result, head) + self._print_metrics_line(epoch_info.result, head) print(f"=>>>>>>>>>> DONE") @staticmethod - def _print_metrics_line(metrics, head=None): - if head is None: - head = 'Metrics:' + def _print_metrics_line(metrics, dataset=None): + if dataset is None: + dataset = 'Metrics:' metrics_list = [ - "{} {:.06f}".format(k, metrics[k]) - for k in sorted([k for k in metrics.keys() if ':' not in k]) + "{}/{} {:.06f}".format(k.scope, k.name, metrics[k]) + for k in sorted([k for k in metrics.keys() if k.dataset is None]) ] else: metrics_list = [ - "{} {:.06f}".format(k.split(':')[1], metrics[k]) - for k in sorted([k for k in metrics.keys() if k.startswith(head)]) + "{}/{} {:.06f}".format(k.scope, k.name, metrics[k]) + for k in sorted([k for k in metrics.keys() if k.dataset == dataset]) ] - print('{0: <10}'.format(head.capitalize()), " ".join(metrics_list)) + print('{0: <10}'.format(dataset.capitalize()), " ".join(metrics_list)) def create(): diff --git a/vel/storage/streaming/tensorboard.py b/vel/storage/streaming/tensorboard.py index ef90b267..f75ca570 100644 --- a/vel/storage/streaming/tensorboard.py +++ b/vel/storage/streaming/tensorboard.py @@ -1,7 +1,7 @@ import os import shutil -from vel.api import ModelConfig, Callback, TrainingInfo +from vel.api import ModelConfig, Callback, TrainingInfo, EpochInfo from torch.utils.tensorboard import SummaryWriter @@ -18,21 +18,27 @@ def on_train_begin(self, training_info: TrainingInfo) -> None: if os.path.exists(self.logdir): shutil.rmtree(self.logdir) - def on_epoch_end(self, epoch_info): + def on_epoch_end(self, epoch_info: EpochInfo): """ Push data to tensorboard on push """ - summary_writer = SummaryWriter(log_dir=self.logdir) + head_set = sorted({x.dataset for x in epoch_info.result.keys()}) - for key, value in epoch_info.result.items(): - if key == 'epoch_idx': - continue + for head in head_set: + if head is None: + summary_writer = SummaryWriter(log_dir=os.path.join(self.logdir, "generic")) + else: + summary_writer = SummaryWriter(log_dir=os.path.join(self.logdir, head)) - summary_writer.add_scalar( - tag=key, - scalar_value=value, - global_step=epoch_info.global_epoch_idx, - ) + for key, value in epoch_info.result.items(): + if key.dataset == head: + tag = '{}/{}'.format(key.scope, key.name) - summary_writer.close() + summary_writer.add_scalar( + tag=tag, + scalar_value=value, + global_step=epoch_info.global_epoch_idx, + ) + + summary_writer.close() def create(model_config): From 489a7b69623cc51fab28a3276dbcc6474a117c27 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 23 Jun 2019 21:07:19 -0700 Subject: [PATCH 057/162] Added scope for some training metrics. --- vel/model/autoencoder/mnist_cnn_vae.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vel/model/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py index 3eb1dadd..258294ff 100644 --- a/vel/model/autoencoder/mnist_cnn_vae.py +++ b/vel/model/autoencoder/mnist_cnn_vae.py @@ -140,8 +140,8 @@ def metrics(self): """ Set of metrics for this model """ return [ Loss(), - AveragingNamedMetric('reconstruction'), - AveragingNamedMetric('kl_divergence') + AveragingNamedMetric('reconstruction', scope="train"), + AveragingNamedMetric('kl_divergence', scope="train") ] From 9f59f0908c81b423e861c18e72eae86c5b669a84 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 23 Jun 2019 21:08:51 -0700 Subject: [PATCH 058/162] Added some comment docstring. --- vel/api/model_config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vel/api/model_config.py b/vel/api/model_config.py index 4426dd81..bd100eeb 100644 --- a/vel/api/model_config.py +++ b/vel/api/model_config.py @@ -34,6 +34,7 @@ def find_project_directory(start_path) -> str: @staticmethod def from_project_directory(path) -> str: + """ Locate given path relative to project directory """ return os.path.join(ModelConfig.find_project_directory('.'), path) @classmethod From c146b323facf81e925dd82627e535accaff61c51 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 23 Jun 2019 21:59:28 -0700 Subject: [PATCH 059/162] Updated CIFAR10 configs. --- .../autoencoder/mnist/mnist_cnn_vae.yaml | 3 - .../cifar10/cifar10_cnn_01.yaml | 50 ++++++----- .../cifar10/cifar10_resnetv1_110.yaml | 81 ++++++++--------- .../cifar10/cifar10_resnetv1_32.yaml | 85 ++++++++---------- .../cifar10/cifar10_resnetv2_110.yaml | 84 ++++++++--------- .../cifar10_resnetv2_164_bottleneck.yaml | 89 ++++++++----------- .../cifar10/cifar10_resnetv2_32.yaml | 84 ++++++++--------- .../cifar10/cifar10_resnext_29_c1.yaml | 88 ++++++++---------- .../cifar10/cifar10_resnext_29_c8.yaml | 89 ++++++++----------- .../classification/mnist/mnist_cnn_01.yaml | 3 - vel/api/__init__.py | 5 +- vel/api/transformation.py | 8 ++ vel/data/augmentation/normalize.py | 24 ----- vel/data/augmentation/random_crop.py | 25 +++--- .../augmentation/random_horizontal_flip.py | 12 +-- vel/data/dataflow.py | 10 ++- vel/data/loader.py | 4 +- vel/data/source/vision/cifar10.py | 52 ++++++----- vel/data/source/vision/mnist.py | 27 ------ vel/data/transformation/image_to_tensor.py | 5 ++ vel/data/transformation/normalize.py | 28 ++++++ vel/{api => train}/train_phase.py | 0 22 files changed, 389 insertions(+), 467 deletions(-) delete mode 100644 vel/data/augmentation/normalize.py create mode 100644 vel/data/transformation/normalize.py rename vel/{api => train}/train_phase.py (100%) diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml index c5a5f9c9..74619b28 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml @@ -32,7 +32,4 @@ commands: train: name: vel.command.train_command epochs: 12 - log_frequency: 100 - checkpoint: - metric: 'val:loss' diff --git a/examples-configs/classification/cifar10/cifar10_cnn_01.yaml b/examples-configs/classification/cifar10/cifar10_cnn_01.yaml index cce9c77b..8ce60349 100644 --- a/examples-configs/classification/cifar10/cifar10_cnn_01.yaml +++ b/examples-configs/classification/cifar10/cifar10_cnn_01.yaml @@ -2,36 +2,46 @@ name: 'cifar10_cnn_01' model: - name: vel.models.vision.cifar10_cnn_01 + name: vel.model.vision.cifar10_cnn_01 img_rows: 32 img_cols: 32 img_channels: 3 num_classes: 10 + source: - name: vel.sources.vision.cifar10 -# batch_size: 128 - batch_size: 32 + name: vel.data.source.vision.cifar10 + + +loader: + name: vel.data.loader + batch_size: 128 num_workers: 4 - normalize: True - augmentations: - - name: vel.augmentations.random_crop - mode: x - tags: ["train"] - width: 32 - height: 32 - padding: 4 - padding_mode: 'reflect' + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.normalize + - name: vel.data.augmentation.random_crop + tags: ["train"] + width: 32 + height: 32 + padding: 4 + padding_mode: 'reflect' + - name: vel.data.augmentation.random_horizontal_flip + tags: ["train"] + - name: vel.data.transformation.image_to_tensor + + +optimizer: + name: vel.optimizer.adadelta - - name: vel.augmentations.random_horizontal_flip - mode: x - tags: ["train"] commands: train: - name: vel.commands.train_command + name: vel.command.train_command epochs: 12 - log_frequency: 20 - optimizer: - name: vel.optimizers.adadelta + + augvis: + name: vel.command.augvis_command + samples: 3 + cases: 3 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml b/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml index a34c34a5..fc01f757 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml @@ -2,32 +2,51 @@ name: 'cifar10_resnetv1_110' model: - name: vel.models.vision.cifar_resnet_v1 + name: vel.model.vision.cifar_resnet_v1 img_channels: 3 num_classes: 10 mode: basic # Basic or bottleneck blocks: [18, 18, 18] # ResNet110 + source: - name: vel.sources.vision.cifar10 + name: vel.data.source.vision.cifar10 + + +loader: + name: vel.data.loader batch_size: 128 -# batch_size: 32 num_workers: 4 - normalize: True - augmentations: - - name: vel.augmentations.random_crop - mode: x - tags: ["train"] - width: 32 - height: 32 - padding: 4 - padding_mode: 'reflect' + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.normalize + - name: vel.data.augmentation.random_crop + tags: ["train"] + width: 32 + height: 32 + padding: 4 + padding_mode: 'reflect' + - name: vel.data.augmentation.random_horizontal_flip + tags: ["train"] + - name: vel.data.transformation.image_to_tensor + + +optimizer: + name: vel.optimizers.sgd + lr: 0.1 + weight_decay: 0.0001 + momentum: 0.9 + - - name: vel.augmentations.random_horizontal_flip - mode: x - tags: ["train"] +scheduler: + name: vel.scheduler.ladder + ladder: + - [5, 0.1] # Special iteration to kickstart convergence + - [75, 1.0] + - [40, 0.1] + - [80, 0.01] commands: @@ -36,38 +55,8 @@ commands: epochs: 200 log_frequency: 100 - optimizer: - name: vel.optimizers.sgd - lr: 0.1 - weight_decay: 0.0001 - momentum: 0.9 - -# optimizer: -# name: vel.optimizers.adam -# lr: 0.001 -# weight_decay: 0.001 - -# scheduler: -# name: vel.scheduler.multi_step -# milestones: [20, 50, 100, 150] -# gamma: 0.33 - - scheduler: - name: vel.scheduler.ladder - ladder: - - [5, 0.1] # Special iteration to kickstart convergence - - [75, 1.0] - - [40, 0.1] - - [80, 0.01] - -# scheduler: -# name: vel.scheduler.reduce_lr_on_plateau -# metric_name: val:accuracy -# factor: 0.33 -# patience: 15 -# cooldown: 20 -# min_lr: 0.5e-6 augvis: name: vel.commands.augvis_command samples: 5 + cases: 3 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml b/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml index d85ce5bf..f04e40fc 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml @@ -2,74 +2,59 @@ name: 'cifar10_resnetv1_32' model: - name: vel.models.vision.cifar_resnet_v1 + name: vel.model.vision.cifar_resnet_v1 img_channels: 3 num_classes: 10 mode: basic # Basic or bottleneck blocks: [5, 5, 5] # ResNet32 + source: - name: vel.sources.vision.cifar10 + name: vel.data.source.vision.cifar10 + + +loader: + name: vel.data.loader batch_size: 128 -# batch_size: 32 num_workers: 4 - normalize: True - augmentations: - - name: vel.augmentations.random_crop - mode: x - tags: ["train"] - width: 32 - height: 32 - padding: 4 - padding_mode: 'reflect' + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.normalize + - name: vel.data.augmentation.random_crop + tags: ["train"] + width: 32 + height: 32 + padding: 4 + padding_mode: 'reflect' + - name: vel.data.augmentation.random_horizontal_flip + tags: ["train"] + - name: vel.data.transformation.image_to_tensor - - name: vel.augmentations.random_horizontal_flip - mode: x - tags: ["train"] +optimizer: + name: vel.optimizer.sgd + lr: 0.1 + weight_decay: 0.0001 + momentum: 0.9 + + +scheduler: + name: vel.scheduler.ladder + ladder: + - [80, 1.0] + - [40, 0.1] + - [80, 0.01] commands: train: - name: vel.commands.train_command + name: vel.command.train_command epochs: 200 - log_frequency: 100 - - - optimizer: - name: vel.optimizers.sgd - lr: 0.1 - weight_decay: 0.0001 - momentum: 0.9 - -# optimizer: -# name: vel.optimizers.adam -# lr: 0.001 -# weight_decay: 0.001 - -# scheduler: -# name: vel.scheduler.multi_step -# milestones: [20, 50, 100, 150] -# gamma: 0.33 - - scheduler: - name: vel.scheduler.ladder - ladder: - - [80, 1.0] - - [40, 0.1] - - [80, 0.01] - -# scheduler: -# name: vel.scheduler.reduce_lr_on_plateau -# metric_name: val:accuracy -# factor: 0.33 -# patience: 15 -# cooldown: 20 -# min_lr: 0.5e-6 + augvis: - name: vel.commands.augvis_command + name: vel.command.augvis_command samples: 5 cases: 3 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml b/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml index b9998cdf..9291ee8d 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml @@ -2,7 +2,7 @@ name: 'cifar10_resnetv2_110' model: - name: vel.models.vision.cifar_resnet_v2 + name: vel.model.vision.cifar_resnet_v2 img_channels: 3 num_classes: 10 @@ -10,63 +10,51 @@ model: blocks: [18, 18, 18] # ResNet110 source: - name: vel.sources.vision.cifar10 + name: vel.data.source.vision.cifar10 + + +loader: + name: vel.data.loader batch_size: 128 -# batch_size: 32 num_workers: 4 - normalize: True - augmentations: - - name: vel.augmentations.random_horizontal_flip - mode: x - tags: ["train"] - - name: vel.augmentations.random_crop - mode: x - tags: ["train"] - width: 32 - height: 32 - padding: 4 - padding_mode: 'reflect' + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.normalize + - name: vel.data.augmentation.random_crop + tags: ["train"] + width: 32 + height: 32 + padding: 4 + padding_mode: 'reflect' + - name: vel.data.augmentation.random_horizontal_flip + tags: ["train"] + - name: vel.data.transformation.image_to_tensor -commands: - train: - name: vel.commands.train_command - epochs: 200 - log_frequency: 100 +optimizer: + name: vel.optimizer.sgd + lr: 0.1 + weight_decay: 0.0001 + momentum: 0.9 - optimizer: - name: vel.optimizers.sgd - lr: 0.1 - weight_decay: 0.0001 - momentum: 0.9 -# optimizer: -# name: vel.optimizers.adam -# lr: 0.001 -# weight_decay: 0.001 +scheduler: + name: vel.scheduler.ladder + ladder: + - [5, 0.1] # Special iteration to kickstart convergence + - [75, 1.0] + - [40, 0.1] + - [80, 0.01] -# scheduler: -# name: vel.scheduler.multi_step -# milestones: [20, 50, 100, 150] -# gamma: 0.33 - scheduler: - name: vel.scheduler.ladder - ladder: - - [5, 0.1] # Special iteration to kickstart convergence - - [75, 1.0] - - [40, 0.1] - - [80, 0.01] +commands: + train: + name: vel.command.train_command + epochs: 200 -# scheduler: -# name: vel.scheduler.reduce_lr_on_plateau -# metric_name: val:accuracy -# factor: 0.33 -# patience: 15 -# cooldown: 20 -# min_lr: 0.5e-6 augvis: - name: vel.commands.augvis_command + name: vel.command.augvis_command samples: 5 + cases: 3 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml b/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml index fb01e547..86d5dbaa 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml @@ -2,7 +2,7 @@ name: 'cifar10_resnetv2_110' model: - name: vel.models.vision.cifar_resnet_v2 + name: vel.model.vision.cifar_resnet_v2 img_channels: 3 num_classes: 10 @@ -10,64 +10,53 @@ model: mode: bottleneck # Basic or bottleneck blocks: [18, 18, 18] # ResNet110 + source: - name: vel.sources.vision.cifar10 + name: vel.data.source.vision.cifar10 + + +loader: + name: vel.data.loader batch_size: 128 -# batch_size: 32 num_workers: 4 - normalize: True - augmentations: - - name: vel.augmentations.random_horizontal_flip - mode: x - tags: ["train"] - - name: vel.augmentations.random_crop - mode: x - tags: ["train"] - width: 32 - height: 32 - padding: 4 - padding_mode: 'reflect' + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.normalize + - name: vel.data.augmentation.random_crop + tags: ["train"] + width: 32 + height: 32 + padding: 4 + padding_mode: 'reflect' + - name: vel.data.augmentation.random_horizontal_flip + tags: ["train"] + - name: vel.data.transformation.image_to_tensor + + +optimizer: + name: vel.optimizer.sgd + lr: 0.1 + weight_decay: 0.0001 + momentum: 0.9 + + +scheduler: + name: vel.scheduler.ladder + ladder: + - [5, 0.1] # Special iteration to kickstart convergence + - [75, 1.0] + - [40, 0.1] + - [80, 0.01] commands: train: - name: vel.commands.train_command + name: vel.command.train_command epochs: 200 - log_frequency: 100 - - optimizer: - name: vel.optimizers.sgd - lr: 0.1 - weight_decay: 0.0001 - momentum: 0.9 - -# optimizer: -# name: vel.optimizers.adam -# lr: 0.001 -# weight_decay: 0.001 - -# scheduler: -# name: vel.scheduler.multi_step -# milestones: [20, 50, 100, 150] -# gamma: 0.33 - - scheduler: - name: vel.scheduler.ladder - ladder: - - [5, 0.1] # Special iteration to kickstart convergence - - [75, 1.0] - - [40, 0.1] - - [80, 0.01] - -# scheduler: -# name: vel.scheduler.reduce_lr_on_plateau -# metric_name: val:accuracy -# factor: 0.33 -# patience: 15 -# cooldown: 20 -# min_lr: 0.5e-6 + augvis: - name: vel.commands.augvis_command + name: vel.command.augvis_command samples: 5 + cases: 3 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml b/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml index 4e0684a4..7f38b3fb 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml @@ -2,7 +2,7 @@ name: 'cifar10_resnetv2_110' model: - name: vel.models.vision.cifar_resnet_v2 + name: vel.model.vision.cifar_resnet_v2 img_channels: 3 num_classes: 10 @@ -10,63 +10,51 @@ model: blocks: [5, 5, 5] # ResNet32 source: - name: vel.sources.vision.cifar10 + name: vel.data.source.vision.cifar10 + + +loader: + name: vel.data.loader batch_size: 128 -# batch_size: 32 num_workers: 4 - normalize: True - augmentations: - - name: vel.augmentations.random_horizontal_flip - mode: x - tags: ["train"] - - name: vel.augmentations.random_crop - mode: x - tags: ["train"] - width: 32 - height: 32 - padding: 4 - padding_mode: 'reflect' + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.normalize + - name: vel.data.augmentation.random_crop + tags: ["train"] + width: 32 + height: 32 + padding: 4 + padding_mode: 'reflect' + - name: vel.data.augmentation.random_horizontal_flip + tags: ["train"] + - name: vel.data.transformation.image_to_tensor -commands: - train: - name: vel.commands.train_command - epochs: 200 - log_frequency: 100 +optimizer: + name: vel.optimizer.sgd + lr: 0.1 + weight_decay: 0.0001 + momentum: 0.9 - optimizer: - name: vel.optimizers.sgd - lr: 0.1 - weight_decay: 0.0001 - momentum: 0.9 -# optimizer: -# name: vel.optimizers.adam -# lr: 0.001 -# weight_decay: 0.001 +scheduler: + name: vel.scheduler.ladder + ladder: + - [5, 0.1] # Special iteration to kickstart convergence + - [75, 1.0] + - [40, 0.1] + - [80, 0.01] -# scheduler: -# name: vel.scheduler.multi_step -# milestones: [20, 50, 100, 150] -# gamma: 0.33 - scheduler: - name: vel.scheduler.ladder - ladder: - - [5, 0.1] # Special iteration to kickstart convergence - - [75, 1.0] - - [40, 0.1] - - [80, 0.01] +commands: + train: + name: vel.command.train_command + epochs: 200 -# scheduler: -# name: vel.scheduler.reduce_lr_on_plateau -# metric_name: val:accuracy -# factor: 0.33 -# patience: 15 -# cooldown: 20 -# min_lr: 0.5e-6 augvis: - name: vel.commands.augvis_command + name: vel.command.augvis_command samples: 5 + cases: 3 diff --git a/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml b/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml index 88099c4e..eabd968a 100644 --- a/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml @@ -2,7 +2,7 @@ name: 'cifar10_resnetv1_32' model: - name: vel.models.vision.cifar_resnext + name: vel.model.vision.cifar_resnext img_channels: 3 num_classes: 10 @@ -13,64 +13,52 @@ model: divisor: 4 cardinality: 1 + source: - name: vel.sources.vision.cifar10 + name: vel.data.source.vision.cifar10 + + +loader: + name: vel.data.loader batch_size: 128 -# batch_size: 32 num_workers: 4 - normalize: True - augmentations: - - name: vel.augmentations.random_horizontal_flip - mode: x - tags: ["train"] - - name: vel.augmentations.random_crop - mode: x - tags: ["train"] - width: 32 - height: 32 - padding: 4 - padding_mode: 'reflect' + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.normalize + - name: vel.data.augmentation.random_crop + tags: ["train"] + width: 32 + height: 32 + padding: 4 + padding_mode: 'reflect' + - name: vel.data.augmentation.random_horizontal_flip + tags: ["train"] + - name: vel.data.transformation.image_to_tensor + + +optimizer: + name: vel.optimizer.sgd + lr: 0.1 + weight_decay: 0.0001 + momentum: 0.9 + + +scheduler: + name: vel.scheduler.ladder + ladder: + - [150, 1.0] + - [75, 0.1] + - [75, 0.01] commands: train: - name: vel.commands.train_command + name: vel.command.train_command epochs: 300 - log_frequency: 100 - - - optimizer: - name: vel.optimizers.sgd - lr: 0.1 - weight_decay: 0.0001 - momentum: 0.9 - -# optimizer: -# name: vel.optimizers.adam -# lr: 0.001 -# weight_decay: 0.001 - -# scheduler: -# name: vel.scheduler.multi_step -# milestones: [20, 50, 100, 150] -# gamma: 0.33 - - scheduler: - name: vel.scheduler.ladder - ladder: - - [150, 1.0] - - [75, 0.1] - - [75, 0.01] - -# scheduler: -# name: vel.scheduler.reduce_lr_on_plateau -# metric_name: val:accuracy -# factor: 0.33 -# patience: 15 -# cooldown: 20 -# min_lr: 0.5e-6 + augvis: - name: vel.commands.augvis_command + name: vel.command.augvis_command samples: 5 + cases: 3 diff --git a/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml b/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml index 23dcb949..9e41c5aa 100644 --- a/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml @@ -2,7 +2,7 @@ name: 'cifar10_resnetv1_32' model: - name: vel.models.vision.cifar_resnext + name: vel.model.vision.cifar_resnext img_channels: 3 num_classes: 10 @@ -13,65 +13,52 @@ model: divisor: 4 cardinality: 8 + source: - name: vel.sources.vision.cifar10 - batch_size: 32 -# batch_size: 32 + name: vel.data.source.vision.cifar10 + + +loader: + name: vel.data.loader + batch_size: 128 num_workers: 4 - normalize: True - augmentations: - - name: vel.augmentations.random_horizontal_flip - mode: x - tags: ["train"] - - name: vel.augmentations.random_crop - mode: x - tags: ["train"] - width: 32 - height: 32 - padding: 4 - padding_mode: 'reflect' + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.normalize + - name: vel.data.augmentation.random_crop + tags: ["train"] + width: 32 + height: 32 + padding: 4 + padding_mode: 'reflect' + - name: vel.data.augmentation.random_horizontal_flip + tags: ["train"] + - name: vel.data.transformation.image_to_tensor + +optimizer: + name: vel.optimizer.sgd + lr: 0.1 + weight_decay: 0.0001 + momentum: 0.9 + + +scheduler: + name: vel.scheduler.ladder + ladder: + - [150, 1.0] + - [75, 0.1] + - [75, 0.01] commands: train: - name: vel.commands.train_command + name: vel.command.train_command epochs: 300 - log_frequency: 100 - - - optimizer: - name: vel.optimizers.sgd - lr: 0.1 - weight_decay: 0.0001 - momentum: 0.9 - -# optimizer: -# name: vel.optimizers.adam -# lr: 0.001 -# weight_decay: 0.001 - -# scheduler: -# name: vel.scheduler.multi_step -# milestones: [20, 50, 100, 150] -# gamma: 0.33 - - scheduler: - name: vel.scheduler.ladder - ladder: - - [150, 1.0] - - [75, 0.1] - - [75, 0.01] - -# scheduler: -# name: vel.scheduler.reduce_lr_on_plateau -# metric_name: val:accuracy -# factor: 0.33 -# patience: 15 -# cooldown: 20 -# min_lr: 0.5e-6 + augvis: - name: vel.commands.augvis_command + name: vel.command.augvis_command samples: 5 + cases: 3 diff --git a/examples-configs/classification/mnist/mnist_cnn_01.yaml b/examples-configs/classification/mnist/mnist_cnn_01.yaml index 6e06faea..e58d5dd9 100644 --- a/examples-configs/classification/mnist/mnist_cnn_01.yaml +++ b/examples-configs/classification/mnist/mnist_cnn_01.yaml @@ -30,10 +30,7 @@ commands: train: name: vel.command.train_command epochs: 12 - log_frequency: 100 - checkpoint: - metric: 'val:loss' augvis: name: vel.command.augvis_command diff --git a/vel/api/__init__.py b/vel/api/__init__.py index 1e5c9b65..89d2026f 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -1,14 +1,13 @@ -from .transformation import Transformation from .callback import Callback from .info import BatchInfo, EpochInfo, TrainingInfo from .model import ( Model, GradientModel, LossFunctionModel, BackboneModel, LinearBackboneModel ) +from .model_config import ModelConfig from .model_factory import ModelFactory from .optimizer import OptimizerFactory from .schedule import Schedule from .scheduler import SchedulerFactory from .source import Source from .storage import Storage -from .train_phase import TrainPhase, EmptyTrainPhase -from .model_config import ModelConfig +from .transformation import Transformation, ScopedTransformation diff --git a/vel/api/transformation.py b/vel/api/transformation.py index 80ee433e..9f84e9de 100644 --- a/vel/api/transformation.py +++ b/vel/api/transformation.py @@ -3,6 +3,10 @@ class Transformation: def __init__(self, tags=None): self.tags = ['train', 'val', 'test'] if tags is None else tags + def initialize(self, source): + """ Initialize transformation from source """ + pass + def __call__(self, datapoint): """ Do the transformation """ raise NotImplementedError @@ -20,6 +24,10 @@ def __init__(self, scope=None, tags=None): self.scope = ['x'] if scope is None else scope + # If there is only one, we wrap it as a list + if isinstance(self.scope, str): + self.scope = [self.scope] + def transform(self, value): """ Actual transformation code """ raise NotImplementedError diff --git a/vel/data/augmentation/normalize.py b/vel/data/augmentation/normalize.py deleted file mode 100644 index d67a2c6a..00000000 --- a/vel/data/augmentation/normalize.py +++ /dev/null @@ -1,24 +0,0 @@ -import numpy as np - -import vel.api as api - - -class Normalize(api.Transformation): - """ Normalize input mean and standard deviation """ - - def __init__(self, mean, std, mode='x', tags=None): - super().__init__(mode, tags) - self.mean = np.array(mean, dtype=np.float32) - self.std = np.array(std, dtype=np.float32) - - def __call__(self, x_data): - return (x_data - self.mean) / self.std - - def denormalize(self, x_data): - """ Operation reverse to normalization """ - return x_data * self.std + self.mean - - -def create(mean, std, mode='x', tags=None): - """ Vel factory function """ - return Normalize(mean=mean, std=std, mode=mode, tags=tags) diff --git a/vel/data/augmentation/random_crop.py b/vel/data/augmentation/random_crop.py index bbc56dec..ffdf89c1 100644 --- a/vel/data/augmentation/random_crop.py +++ b/vel/data/augmentation/random_crop.py @@ -6,10 +6,11 @@ import numbers import random -import vel.data as data +import vel.api as api +import vel.data.operation.image_op as image_op -class RandomCrop(data.Augmentation): +class RandomCrop(api.ScopedTransformation): """Crop the given PIL Image at a random location. Args: @@ -24,8 +25,8 @@ class RandomCrop(data.Augmentation): desired size to avoid raising an exception. """ - def __init__(self, size, padding=0, padding_mode='constant', pad_if_needed=False, mode='x', tags=None): - super().__init__(mode, tags) + def __init__(self, size, padding=0, padding_mode='constant', pad_if_needed=False, scope='x', tags=None): + super().__init__(scope, tags) if isinstance(size, numbers.Number): self.size = (int(size), int(size)) @@ -34,7 +35,7 @@ def __init__(self, size, padding=0, padding_mode='constant', pad_if_needed=False self.padding = padding self.padding_mode = padding_mode - self.padding_mode_cv = data.mode_to_cv2(self.padding_mode) + self.padding_mode_cv = image_op.mode_to_cv2(self.padding_mode) self.pad_if_needed = pad_if_needed @staticmethod @@ -58,7 +59,7 @@ def get_params(img, output_size): j = random.randint(0, w - tw) return i, j, th, tw - def __call__(self, img): + def transform(self, img): """ Args: img (PIL Image): Image to be cropped. @@ -67,24 +68,24 @@ def __call__(self, img): PIL Image: Cropped image. """ if self.padding > 0: - img = data.pad(img, self.padding, mode=self.padding_mode_cv) + img = image_op.pad(img, self.padding, mode=self.padding_mode_cv) # pad the width if needed if self.pad_if_needed and img.size[0] < self.size[1]: - img = data.pad(img, (int((1 + self.size[1] - img.size[0]) / 2), 0), mode=self.padding_mode_cv) + img = image_op.pad(img, (int((1 + self.size[1] - img.size[0]) / 2), 0), mode=self.padding_mode_cv) # pad the height if needed if self.pad_if_needed and img.size[1] < self.size[0]: - img = data.pad(img, (0, int((1 + self.size[0] - img.size[1]) / 2)), mode=self.padding_mode_cv) + img = image_op.pad(img, (0, int((1 + self.size[0] - img.size[1]) / 2)), mode=self.padding_mode_cv) i, j, h, w = self.get_params(img, self.size) - return data.crop(img, j, i, w, h) + return image_op.crop(img, j, i, w, h) def __repr__(self): return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding) -def create(width, height, padding=0, padding_mode='constant', mode='x', tags=None): +def create(width, height, padding=0, padding_mode='constant', scope='x', tags=None): """ Vel factory function """ - return RandomCrop(size=(width, height), padding=padding, padding_mode=padding_mode, mode=mode, tags=tags) + return RandomCrop(size=(width, height), padding=padding, padding_mode=padding_mode, scope=scope, tags=tags) diff --git a/vel/data/augmentation/random_horizontal_flip.py b/vel/data/augmentation/random_horizontal_flip.py index ba397519..b50caa8f 100644 --- a/vel/data/augmentation/random_horizontal_flip.py +++ b/vel/data/augmentation/random_horizontal_flip.py @@ -4,14 +4,14 @@ import vel.api as api -class RandomHorizontalFlip(api.Transformation): +class RandomHorizontalFlip(api.ScopedTransformation): """ Apply a horizontal flip randomly to input images """ - def __init__(self, p=0.5, mode='x', tags=None): - super().__init__(mode, tags) + def __init__(self, p=0.5, scope='x', tags=None): + super().__init__(scope, tags) self.p = p - def __call__(self, img): + def transform(self, img): """ Args: img (PIL Image): Image to be flipped. @@ -27,5 +27,5 @@ def __repr__(self): return self.__class__.__name__ + '(p={})'.format(self.p) -def create(p=0.5): - return RandomHorizontalFlip(p) +def create(p=0.5, scope='x', tags=None): + return RandomHorizontalFlip(p, scope=scope, tags=tags) diff --git a/vel/data/dataflow.py b/vel/data/dataflow.py index ae4b28ad..4c28a96d 100644 --- a/vel/data/dataflow.py +++ b/vel/data/dataflow.py @@ -17,6 +17,10 @@ class DataFlow(data.Dataset): @staticmethod def transform(source: Source, transformations: typing.List[Transformation]) -> Source: """ Transform supplied source with a list of given transformations """ + # Initialize transformations from source + for t in transformations: + t.initialize(source) + return Source( train=DataFlow(source.train, transformations, 'train'), validation=DataFlow(source.validation, transformations, 'val'), @@ -25,18 +29,19 @@ def transform(source: Source, transformations: typing.List[Transformation]) -> S def __init__(self, dataset, transformations, tag): self.dataset = dataset + self.tag = tag if transformations is None: self.transformations = [] else: self.transformations = [t for t in transformations if tag in t.tags] - self.tag = tag - def get_raw(self, index): + """ Get raw data point """ return pre_map(self.dataset[index]) def __getitem__(self, index): + """ Get data point from the dataset """ datapoint = self.get_raw(index) for t in self.transformations: @@ -52,4 +57,5 @@ def denormalize(self, datapoint): return datapoint def __len__(self): + """ Length of the dataset """ return len(self.dataset) diff --git a/vel/data/loader.py b/vel/data/loader.py index 4e98742f..a75a3453 100644 --- a/vel/data/loader.py +++ b/vel/data/loader.py @@ -54,14 +54,16 @@ def __getitem__(self, item): @property def loader(self): + """ Get a dict of loaders """ return self._loaders @property def size(self): + """ Get a dict of sizes of each loader """ return self._loader_sizes -def create(source: Source, batch_size: int, num_workers: int=0, transformations: typing.Optional[list] = None): +def create(source: Source, batch_size: int, num_workers: int = 0, transformations: typing.Optional[list] = None): """ Vel factory function """ return Loader( source=source, diff --git a/vel/data/source/vision/cifar10.py b/vel/data/source/vision/cifar10.py index 53ec1080..17641b59 100644 --- a/vel/data/source/vision/cifar10.py +++ b/vel/data/source/vision/cifar10.py @@ -1,13 +1,9 @@ from torchvision import datasets -from vel.api import SupervisedTrainingData +from vel.api import Source -from vel.augmentation.normalize import Normalize -from vel.augmentation.to_tensor import ToTensor -from vel.augmentation.to_array import ToArray - -def create(model_config, batch_size, normalize=True, num_workers=0, augmentations=None): +def create(model_config): """ Create a CIFAR10 dataset, normalized. Augmentations are the same as in the literature benchmarking CIFAR performance. @@ -17,21 +13,31 @@ def create(model_config, batch_size, normalize=True, num_workers=0, augmentation train_dataset = datasets.CIFAR10(path, train=True, download=True) test_dataset = datasets.CIFAR10(path, train=False, download=True) - augmentations = [ToArray()] + (augmentations if augmentations is not None else []) - - if normalize: - train_data = train_dataset.data - mean_value = (train_data / 255).mean(axis=(0, 1, 2)) - std_value = (train_data / 255).std(axis=(0, 1, 2)) - - augmentations.append(Normalize(mean=mean_value, std=std_value, tags=['train', 'val'])) - - augmentations.append(ToTensor()) - - return SupervisedTrainingData( - train_dataset, - test_dataset, - batch_size=batch_size, - num_workers=num_workers, - augmentations=augmentations + train_data = train_dataset.data + mean_value = (train_data / 255).mean(axis=(0, 1, 2)) + std_value = (train_data / 255).std(axis=(0, 1, 2)) + + return Source( + train=train_dataset, + validation=test_dataset, + metadata={ + 'train_mean': mean_value, + 'train_std': std_value + } ) + + # augmentations = [ToArray()] + (augmentations if augmentations is not None else []) + + # if normalize: + # + # augmentations.append(Normalize(mean=mean_value, std=std_value, tags=['train', 'val'])) + # + # augmentations.append(ToTensor()) + # + # return SupervisedTrainingData( + # train_dataset, + # test_dataset, + # batch_size=batch_size, + # num_workers=num_workers, + # augmentations=augmentations + # ) diff --git a/vel/data/source/vision/mnist.py b/vel/data/source/vision/mnist.py index 8ab6b49e..16640cac 100644 --- a/vel/data/source/vision/mnist.py +++ b/vel/data/source/vision/mnist.py @@ -3,7 +3,6 @@ from vel.api import Source - def create(model_config): """ Create a MNIST dataset, normalized """ path = model_config.data_dir('mnist') @@ -23,29 +22,3 @@ def create(model_config): 'train_std': std_value } ) - -# from vel.api import SupervisedTrainingData -# -# from vel.augmentations.normalize import Normalize -# from vel.augmentations.to_tensor import ToTensor -# from vel.augmentations.to_array import ToArray -# from vel.augmentations.unsupervised import Unsupervised - - # augmentations = [ToArray()] + (augmentations if augmentations is not None else []) - # - # if normalize: - # - # augmentations.append(Normalize(mean=mean_value, std=std_value, tags=['train', 'val'])) - # - # augmentations.append(ToTensor()) - # - # if unsupervised: - # augmentations.append(Unsupervised()) - # - # return SupervisedTrainingData( - # train_dataset, - # test_dataset, - # num_workers=num_workers, - # batch_size=batch_size, - # augmentations=augmentations - # ) diff --git a/vel/data/transformation/image_to_tensor.py b/vel/data/transformation/image_to_tensor.py index 75eebc32..aae7b486 100644 --- a/vel/data/transformation/image_to_tensor.py +++ b/vel/data/transformation/image_to_tensor.py @@ -11,6 +11,11 @@ def transform(self, value): # First let's make sure it's actually a numpy array value = np.asarray(value) + if value.dtype == np.uint8: + value = value.astype(np.float32) / 255.0 + + value = value.astype(np.float32) + if len(value.shape) == 2: # If the image has only one channel, it still needs to be specified value = value.reshape(value.shape[0], value.shape[1], 1) diff --git a/vel/data/transformation/normalize.py b/vel/data/transformation/normalize.py new file mode 100644 index 00000000..ccfa5064 --- /dev/null +++ b/vel/data/transformation/normalize.py @@ -0,0 +1,28 @@ +import vel.api as api + + +class Normalize(api.ScopedTransformation): + """ Normalize input mean and standard deviation """ + + def __init__(self, scope='x', tags=None): + super().__init__(scope, tags) + + self.mean = None + self.std = None + + def initialize(self, source): + """ Initialize transformation from source """ + self.mean = source.metadata['train_mean'] + self.std = source.metadata['train_std'] + + def transform(self, value): + return (value - self.mean) / self.std + + def denormalization_transform(self, value): + """ Operation reverse to normalization """ + return value * self.std + self.mean + + +def create(mode='x', tags=None): + """ Vel factory function """ + return Normalize(scope=mode, tags=tags) diff --git a/vel/api/train_phase.py b/vel/train/train_phase.py similarity index 100% rename from vel/api/train_phase.py rename to vel/train/train_phase.py From 0d4445253b748423639623ee9b7e8ce565ff0be9 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 23 Jun 2019 22:28:51 -0700 Subject: [PATCH 060/162] A bit more work on unifying metrics. Adding samples/sec metric. --- vel/api/callback.py | 18 +++---------- vel/api/info.py | 21 +++------------ vel/callback/sample_tracker.py | 26 +++++++++++++++++++ vel/callback/time_tracker.py | 3 ++- vel/command/train_command.py | 6 +++-- vel/data/dataflow.py | 4 +++ vel/metric/__init__.py | 6 ++--- vel/metric/accuracy.py | 2 +- vel/metric/base/__init__.py | 0 vel/metric/{ => base}/averaging_metric.py | 0 vel/metric/{ => base}/base_metric.py | 0 vel/metric/{ => base}/summing_metric.py | 0 vel/metric/{ => base}/value_metric.py | 0 vel/metric/loss_metric.py | 2 +- vel/metric/samples_per_sec.py | 11 ++++++++ ...fered_mixed_policy_iteration_reinforcer.py | 4 +-- ...uffered_off_policy_iteration_reinforcer.py | 4 +-- .../on_policy_iteration_reinforcer.py | 4 +-- vel/storage/streaming/visdom.py | 2 +- vel/train/trainer.py | 19 ++++++++------ 20 files changed, 78 insertions(+), 54 deletions(-) create mode 100644 vel/metric/base/__init__.py rename vel/metric/{ => base}/averaging_metric.py (100%) rename vel/metric/{ => base}/base_metric.py (100%) rename vel/metric/{ => base}/summing_metric.py (100%) rename vel/metric/{ => base}/value_metric.py (100%) create mode 100644 vel/metric/samples_per_sec.py diff --git a/vel/api/callback.py b/vel/api/callback.py index 6c28ad10..6a4e7819 100644 --- a/vel/api/callback.py +++ b/vel/api/callback.py @@ -1,3 +1,5 @@ +import typing + from .info import EpochInfo, BatchInfo, TrainingInfo @@ -38,30 +40,18 @@ def on_epoch_end(self, epoch_info: EpochInfo) -> None: """ pass - def on_batch_begin(self, batch_info: BatchInfo) -> None: + def on_batch_begin(self, batch_info: BatchInfo, dataset: typing.Optional[str] = None) -> None: """ Runs for each batch before batch is evaluated """ pass - def on_batch_end(self, batch_info: BatchInfo) -> None: + def on_batch_end(self, batch_info: BatchInfo, dataset: typing.Optional[str] = None) -> None: """ Runs for each batch after batch is evaluated """ pass - def on_validation_batch_begin(self, batch_info: BatchInfo) -> None: - """ - Supervised learning only - runs before validation batch - """ - pass - - def on_validation_batch_end(self, batch_info: BatchInfo) -> None: - """ - Supervised learning only - runs after validation batch - """ - pass - def write_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict) -> None: """ Persist callback state to the state dictionary diff --git a/vel/api/info.py b/vel/api/info.py index a84d1873..4e5957d4 100644 --- a/vel/api/info.py +++ b/vel/api/info.py @@ -254,28 +254,15 @@ def __init__(self, epoch_info: EpochInfo, batch_number: int): self.batch_number = batch_number self.data_dict = {} - def on_batch_begin(self): + def on_batch_begin(self, dataset=None): """ Initialize batch processing """ for callback in self.callbacks: - callback.on_batch_begin(self) + callback.on_batch_begin(self, dataset) - def on_batch_end(self): + def on_batch_end(self, dataset=None): """ Finalize batch processing """ for callback in self.callbacks: - callback.on_batch_end(self) - - # Even with all the experience replay, we count the single rollout as a single batch - self.epoch_info.result_accumulator.calculate(self) - - def on_validation_batch_begin(self): - """ Initialize batch processing """ - for callback in self.callbacks: - callback.on_validation_batch_begin(self) - - def on_validation_batch_end(self): - """ Finalize batch processing """ - for callback in self.callbacks: - callback.on_validation_batch_end(self) + callback.on_batch_end(self, dataset) # Even with all the experience replay, we count the single rollout as a single batch self.epoch_info.result_accumulator.calculate(self) diff --git a/vel/callback/sample_tracker.py b/vel/callback/sample_tracker.py index e69de29b..a1c9d789 100644 --- a/vel/callback/sample_tracker.py +++ b/vel/callback/sample_tracker.py @@ -0,0 +1,26 @@ +import typing +import collections + +from vel.api import BatchInfo, TrainingInfo, Callback + + +class SampleTracker(Callback): + """ Callback that calculates number of samples processed during the training process """ + + def on_initialization(self, training_info: TrainingInfo): + training_info['samples'] = collections.defaultdict(int) + + def on_batch_end(self, batch_info: BatchInfo, dataset: typing.Optional[str] = None) -> None: + samples = batch_info['datapoint']['x'].shape[0] + + batch_info['samples'] = samples + + if dataset is not None: + batch_info.training_info['samples'][dataset] += samples + + def write_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict): + hidden_state_dict['sample_tracker/samples'] = training_info['samples'] + + def load_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict): + training_info['samples'] = hidden_state_dict['sample_tracker/samples'] + diff --git a/vel/callback/time_tracker.py b/vel/callback/time_tracker.py index 8a59f9d1..df280213 100644 --- a/vel/callback/time_tracker.py +++ b/vel/callback/time_tracker.py @@ -5,6 +5,7 @@ class TimeTracker(Callback): """ Track training time - in seconds """ + def __init__(self): self.start_time = None @@ -14,7 +15,7 @@ def on_initialization(self, training_info: TrainingInfo): def on_train_begin(self, training_info: TrainingInfo): self.start_time = time.time() - def on_batch_end(self, batch_info: BatchInfo): + def on_batch_end(self, batch_info: BatchInfo, dataset=None): current_time = time.time() batch_time = current_time - self.start_time self.start_time = current_time diff --git a/vel/command/train_command.py b/vel/command/train_command.py index ea7e06d5..6a65d993 100644 --- a/vel/command/train_command.py +++ b/vel/command/train_command.py @@ -4,7 +4,9 @@ import vel.data as data import vel.train as train +from vel.metric.samples_per_sec import SamplesPerSec from vel.callback.time_tracker import TimeTracker +from vel.callback.sample_tracker import SampleTracker class SimpleTrainCommand: @@ -37,7 +39,7 @@ def run(self): callbacks = self.gather_callbacks(optimizer) # Metrics to track through this training - metrics = learner.metrics() + metrics = learner.metrics() + [SamplesPerSec()] # Check if training was already started and potentially continue where we left off training_info = self.resume_training(learner, callbacks, metrics) @@ -66,7 +68,7 @@ def run(self): def gather_callbacks(self, optimizer) -> list: """ Gather all the callbacks to be used in this training run """ - callbacks = [TimeTracker()] + callbacks = [TimeTracker(), SampleTracker()] if self.scheduler_factory is not None: callbacks.append(self.scheduler_factory.instantiate(optimizer)) diff --git a/vel/data/dataflow.py b/vel/data/dataflow.py index 4c28a96d..b0731729 100644 --- a/vel/data/dataflow.py +++ b/vel/data/dataflow.py @@ -8,6 +8,10 @@ def pre_map(datapoint): """ Map datapoint from a list into the dictionary """ if isinstance(datapoint, (list, tuple)): return dict(zip("xyzw", datapoint)) + + if 'x' in datapoint: + datapoint['size'] = datapoint['x'].shape[0] + return datapoint diff --git a/vel/metric/__init__.py b/vel/metric/__init__.py index 37708fa3..be14e6c4 100644 --- a/vel/metric/__init__.py +++ b/vel/metric/__init__.py @@ -1,3 +1,3 @@ -from .base_metric import BaseMetric, MetricKey # noqa -from .averaging_metric import AveragingMetric, AveragingNamedMetric, AveragingSupervisedMetric # noqa -from .value_metric import ValueMetric # noqa +from .base.base_metric import BaseMetric, MetricKey # noqa +from .base.averaging_metric import AveragingMetric, AveragingNamedMetric, AveragingSupervisedMetric # noqa +from .base.value_metric import ValueMetric # noqa diff --git a/vel/metric/accuracy.py b/vel/metric/accuracy.py index 44dcdb2f..1416e61a 100644 --- a/vel/metric/accuracy.py +++ b/vel/metric/accuracy.py @@ -1,4 +1,4 @@ -from vel.metric.averaging_metric import AveragingSupervisedMetric +from vel.metric.base.averaging_metric import AveragingSupervisedMetric class Accuracy(AveragingSupervisedMetric): diff --git a/vel/metric/base/__init__.py b/vel/metric/base/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/metric/averaging_metric.py b/vel/metric/base/averaging_metric.py similarity index 100% rename from vel/metric/averaging_metric.py rename to vel/metric/base/averaging_metric.py diff --git a/vel/metric/base_metric.py b/vel/metric/base/base_metric.py similarity index 100% rename from vel/metric/base_metric.py rename to vel/metric/base/base_metric.py diff --git a/vel/metric/summing_metric.py b/vel/metric/base/summing_metric.py similarity index 100% rename from vel/metric/summing_metric.py rename to vel/metric/base/summing_metric.py diff --git a/vel/metric/value_metric.py b/vel/metric/base/value_metric.py similarity index 100% rename from vel/metric/value_metric.py rename to vel/metric/base/value_metric.py diff --git a/vel/metric/loss_metric.py b/vel/metric/loss_metric.py index f4fa9df5..b2f45ce6 100644 --- a/vel/metric/loss_metric.py +++ b/vel/metric/loss_metric.py @@ -1,4 +1,4 @@ -from vel.metric.averaging_metric import AveragingMetric +from vel.metric.base.averaging_metric import AveragingMetric class Loss(AveragingMetric): diff --git a/vel/metric/samples_per_sec.py b/vel/metric/samples_per_sec.py new file mode 100644 index 00000000..edac2291 --- /dev/null +++ b/vel/metric/samples_per_sec.py @@ -0,0 +1,11 @@ +from vel.metric.base.averaging_metric import AveragingMetric + + +class SamplesPerSec(AveragingMetric): + """ Just a loss function """ + def __init__(self, scope="train"): + super().__init__("samples_per_sec", scope=scope) + + def _value_function(self, batch_info): + """ Just forward a value of the loss""" + return batch_info['samples'] / batch_info['time'] diff --git a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py index c2d97e31..4faa513f 100644 --- a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py @@ -84,9 +84,9 @@ def train_epoch(self, epoch_info: EpochInfo, interactive=True): for batch_idx in iterator: batch_info = BatchInfo(epoch_info, batch_idx) - batch_info.on_batch_begin() + batch_info.on_batch_begin('train') self.train_batch(batch_info) - batch_info.on_batch_end() + batch_info.on_batch_end('train') epoch_info.result_accumulator.freeze_results() epoch_info.on_epoch_end() diff --git a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py index c2ec6fe8..dbef9bd2 100644 --- a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py @@ -82,9 +82,9 @@ def train_epoch(self, epoch_info: EpochInfo, interactive=True) -> None: for batch_idx in iterator: batch_info = BatchInfo(epoch_info, batch_idx) - batch_info.on_batch_begin() + batch_info.on_batch_begin('train') self.train_batch(batch_info) - batch_info.on_batch_end() + batch_info.on_batch_end('train') epoch_info.result_accumulator.freeze_results() epoch_info.on_epoch_end() diff --git a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py index d889c681..d9ff7ab9 100644 --- a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py @@ -86,9 +86,9 @@ def train_epoch(self, epoch_info: EpochInfo, interactive=True) -> None: for batch_idx in iterator: batch_info = BatchInfo(epoch_info, batch_idx) - batch_info.on_batch_begin() + batch_info.on_batch_begin('train') self.train_batch(batch_info) - batch_info.on_batch_end() + batch_info.on_batch_end('train') epoch_info.result_accumulator.freeze_results() epoch_info.on_epoch_end() diff --git a/vel/storage/streaming/visdom.py b/vel/storage/streaming/visdom.py index a32bf83b..c861afe0 100644 --- a/vel/storage/streaming/visdom.py +++ b/vel/storage/streaming/visdom.py @@ -29,7 +29,7 @@ def on_epoch_end(self, epoch_info): first_epoch=epoch_info.global_epoch_idx == 1 ) - def on_batch_end(self, batch_info): + def on_batch_end(self, batch_info, dataset=None): """ Stream LR to visdom """ if self.settings.stream_lr: iteration_idx = ( diff --git a/vel/train/trainer.py b/vel/train/trainer.py index 44288497..89284147 100644 --- a/vel/train/trainer.py +++ b/vel/train/trainer.py @@ -69,12 +69,13 @@ def train_epoch(self, epoch_info, loader: Loader, interactive=True): else: iterator = loader['train'] - for batch_idx, data in enumerate(iterator): + for batch_idx, datapoint in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) + batch_info['datapoint'] = datapoint - batch_info.on_batch_begin() - self.train_batch(batch_info, data) - batch_info.on_batch_end() + batch_info.on_batch_begin('train') + self.train_batch(batch_info, datapoint) + batch_info.on_batch_end('train') iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss')) @@ -88,16 +89,18 @@ def validation_epoch(self, epoch_info, loader: Loader, interactive=True): iterator = loader['val'] with torch.no_grad(): - for batch_idx, data in enumerate(iterator): + for batch_idx, datapoint in enumerate(iterator): batch_info = BatchInfo(epoch_info, batch_idx) + batch_info['datapoint'] = datapoint - batch_info.on_validation_batch_begin() - self.feed_batch(batch_info, data) - batch_info.on_validation_batch_end() + batch_info.on_batch_begin('val') + self.feed_batch(batch_info, datapoint) + batch_info.on_batch_end('val') def feed_batch(self, batch_info, data): """ Run single batch of data """ data = to_device(data, self.device) # Move a data batch into the right device + metrics = self.model.calculate_gradient(data) batch_info.update(metrics) From ea25504ddbf28ddd8880607f102b3092cdf8f4ec Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Mon, 24 Jun 2019 22:06:27 -0700 Subject: [PATCH 061/162] Fixing augmentations of cats vs dogs training. --- .../cifar10/cifar10_cnn_01.yaml | 4 +- .../cats_vs_dogs_resnet34.yaml | 75 ++++------ vel/api/source.py | 130 ------------------ vel/api/transformation.py | 3 + vel/command/augvis_command.py | 2 +- vel/data/augmentation/center_crop.py | 17 +-- vel/data/augmentation/random_lighting.py | 12 +- vel/data/augmentation/random_rotate.py | 17 +-- vel/data/augmentation/random_scale.py | 18 +-- vel/data/augmentation/scale_min_size.py | 17 +-- vel/data/source/img_dir_source.py | 11 +- vel/data/transformation/normalize.py | 25 +++- 12 files changed, 100 insertions(+), 231 deletions(-) diff --git a/examples-configs/classification/cifar10/cifar10_cnn_01.yaml b/examples-configs/classification/cifar10/cifar10_cnn_01.yaml index 8ce60349..14f3c83f 100644 --- a/examples-configs/classification/cifar10/cifar10_cnn_01.yaml +++ b/examples-configs/classification/cifar10/cifar10_cnn_01.yaml @@ -22,13 +22,13 @@ loader: - name: vel.data.transformation.to_array - name: vel.data.transformation.normalize - name: vel.data.augmentation.random_crop - tags: ["train"] + tags: train width: 32 height: 32 padding: 4 padding_mode: 'reflect' - name: vel.data.augmentation.random_horizontal_flip - tags: ["train"] + tags: train - name: vel.data.transformation.image_to_tensor diff --git a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml index 41860c8b..6aaffc16 100644 --- a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml +++ b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml @@ -2,74 +2,61 @@ name: cats_vs_dogs_resnet34 model: - name: vel.models.imagenet.resnet34 + name: vel.model.imagenet.resnet34 fc_layers: [512, 2] dropout: [0.25, 0.5] pretrained: true source: - name: vel.sources.img_dir_source + name: vel.data.source.img_dir_source url: http://files.fast.ai/data/dogscats.zip extract_parent: true path: data/dogscats - num_workers: 8 - batch_size: 64 -# tta: -# name: vel.augmentations.tta.train_tta -# n_augmentations: 4 - augmentations: - - name: vel.augmentations.to_array - mode: x - tags: ["train", "val"] +loader: + name: vel.data.loader + num_workers: 8 + batch_size: 64 - - name: vel.augmentations.random_scale - mode: x - tags: ["train"] + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.augmentation.random_scale + tags: train size: 224 max_zoom: 1.1 - - name: vel.augmentations.random_rotate - mode: x - tags: ["train"] + - name: vel.data.augmentation.random_rotate + tags: train deg: 10.0 - - name: vel.augmentations.random_crop - mode: x - tags: ["train"] + - name: vel.data.augmentation.random_crop + tags: train width: 224 height: 224 - - name: vel.augmentations.random_lighting - mode: x - tags: ["train"] + - name: vel.data.augmentation.random_lighting + tags: train b: 0.05 c: 0.05 - - name: vel.augmentations.random_horizontal_flip - mode: x - tags: ["train"] - - - name: vel.augmentations.scale_min_size - mode: x - tags: ["val"] + - name: vel.data.augmentation.random_horizontal_flip + tags: train + - name: vel.data.augmentation.scale_min_size + tags: val size: 224 - - name: vel.augmentations.center_crop - mode: x - tags: ["val"] + - name: vel.data.augmentation.center_crop + tags: val size: 224 - - name: vel.augmentations.normalize - mode: x + - name: vel.data.transformation.normalize tags: ["train", "val"] mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] - - name: vel.augmentations.to_tensor - mode: x + - name: vel.data.transformation.image_to_tensor tags: ["train", "val"] optimizer: - name: vel.optimizers.sgd + name: vel.optimizer.sgd lr: 0.01 weight_decay: 0.0 momentum: 0.9 @@ -78,8 +65,7 @@ optimizer: commands: train: - name: vel.commands.phase_train_command - restart: false + name: vel.command.phase_train_command phases: - name: vel.phase.freeze - name: vel.phase.cycle @@ -103,15 +89,14 @@ commands: cycle_mult: 2 simple_train: - name: vel.commands.train_command - restart: false + name: vel.command.train_command epochs: 3 summary: - name: vel.commands.summary_command + name: vel.command.summary_command lr_find: - name: vel.commands.lr_find_command + name: vel.command.lr_find_command metric: 'loss' start_lr: 1.0e-5 end_lr: 10.0 @@ -120,6 +105,6 @@ commands: freeze: true augvis: - name: vel.commands.augvis_command + name: vel.command.augvis_command cases: 3 samples: 4 diff --git a/vel/api/source.py b/vel/api/source.py index c6209c3a..fb02e7e6 100644 --- a/vel/api/source.py +++ b/vel/api/source.py @@ -17,133 +17,3 @@ def __init__(self, train: data.Dataset, validation: data.Dataset, self.test = test self.metadata = {} if metadata is None else metadata - -# @property -# def train_loader(self): -# """ PyTorch loader of training data """ -# raise NotImplementedError -# -# @property -# def val_loader(self): -# """ PyTorch loader of validation data """ -# raise NotImplementedError -# -# @property -# def train_dataset(self): -# """ Return the training dataset """ -# raise NotImplementedError -# -# @property -# def val_dataset(self): -# """ Return the validation dataset """ -# raise NotImplementedError -# -# @property -# def train_iterations_per_epoch(self): -# """ Return number of iterations per epoch """ -# raise NotImplementedError -# -# @property -# def val_iterations_per_epoch(self): -# """ Return number of iterations per epoch - validation """ -# raise NotImplementedError -# -# -# class SupervisedTextData(Source): -# """ An NLP torchtext data source """ -# def __init__(self, train_source, val_source, train_iterator, val_iterator, data_field, target_field): -# super().__init__() -# -# self.train_source = train_source -# self.val_source = val_source -# self.train_iterator = train_iterator -# self.val_iterator = val_iterator -# self.data_field = data_field -# self.target_field = target_field -# -# @property -# def train_loader(self): -# """ PyTorch loader of training data """ -# return self.train_iterator -# -# @property -# def val_loader(self): -# """ PyTorch loader of validation data """ -# return self.val_iterator -# -# @property -# def train_dataset(self): -# """ Return the training dataset """ -# return self.train_source -# -# @property -# def val_dataset(self): -# """ Return the validation dataset """ -# return self.val_source -# -# @property -# def train_iterations_per_epoch(self): -# """ Return number of iterations per epoch """ -# return len(self.train_iterator) -# -# @property -# def val_iterations_per_epoch(self): -# """ Return number of iterations per epoch - validation """ -# return len(self.val_iterator) -# -# -# class SupervisedTrainingData(Source): -# """ Most common source of data combining a basic datasource and sampler """ -# def __init__(self, train_source, val_source, num_workers, batch_size, augmentations=None): -# -# super().__init__() -# -# self.train_source = train_source -# self.val_source = val_source -# -# self.num_workers = num_workers -# self.batch_size = batch_size -# -# self.augmentations = augmentations -# -# # Derived values -# self.train_ds = DataFlow(self.train_source, augmentations, tag='train') -# self.val_ds = DataFlow(self.val_source, augmentations, tag='val') -# -# self._train_loader = data.DataLoader( -# self.train_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers -# ) -# -# self._val_loader = data.DataLoader( -# self.val_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers -# ) -# -# @property -# def train_loader(self): -# """ PyTorch loader of training data """ -# return self._train_loader -# -# @property -# def val_loader(self): -# """ PyTorch loader of validation data """ -# return self._val_loader -# -# @property -# def train_dataset(self): -# """ Return the training dataset """ -# return self.train_ds -# -# @property -# def val_dataset(self): -# """ Return the validation dataset """ -# return self.val_ds -# -# @property -# def train_iterations_per_epoch(self): -# """ Return number of iterations per epoch """ -# return len(self._train_loader) -# -# @property -# def val_iterations_per_epoch(self): -# """ Return number of iterations per epoch - validation """ -# return len(self._val_loader) diff --git a/vel/api/transformation.py b/vel/api/transformation.py index 9f84e9de..2bf960bf 100644 --- a/vel/api/transformation.py +++ b/vel/api/transformation.py @@ -3,6 +3,9 @@ class Transformation: def __init__(self, tags=None): self.tags = ['train', 'val', 'test'] if tags is None else tags + if isinstance(self.tags, str): + self.tags = [self.tags] + def initialize(self, source): """ Initialize transformation from source """ pass diff --git a/vel/command/augvis_command.py b/vel/command/augvis_command.py index edfa4906..b4a74fcd 100644 --- a/vel/command/augvis_command.py +++ b/vel/command/augvis_command.py @@ -29,7 +29,7 @@ def run(self): for j in range(self.samples): augmented_datapoint = dataset[selected_sample[i]] denormalized_datapoint = dataset.denormalize(augmented_datapoint) - ax[i, j+1].imshow(denormalized_datapoint['x']) + ax[i, j+1].imshow(np.clip(denormalized_datapoint['x'], 0.0, 1.0)) plt.show() diff --git a/vel/data/augmentation/center_crop.py b/vel/data/augmentation/center_crop.py index 21cf33bd..8bead415 100644 --- a/vel/data/augmentation/center_crop.py +++ b/vel/data/augmentation/center_crop.py @@ -3,10 +3,11 @@ https://github.com/fastai/fastai/blob/master/fastai/transforms.py """ -import vel.data as data +import vel.api as api +import vel.data.operation.image_op as op -class CenterCrop(data.Augmentation): +class CenterCrop(api.ScopedTransformation): """ A class that represents a Center Crop. This transforms (optionally) transforms x,y at with the same parameters. @@ -17,14 +18,14 @@ class CenterCrop(data.Augmentation): tfm_y : TfmType type of y transformation. """ - def __init__(self, size, mode='x', tags=None): - super().__init__(mode, tags) + def __init__(self, size, scope='x', tags=None): + super().__init__(scope, tags) self.size = size - def __call__(self, x): - return data.center_crop(x, self.size) + def transform(self, x): + return op.center_crop(x, self.size) -def create(size, mode='x', tags=None): - return CenterCrop(size, mode, tags) +def create(size, scope='x', tags=None): + return CenterCrop(size, scope, tags) diff --git a/vel/data/augmentation/random_lighting.py b/vel/data/augmentation/random_lighting.py index 9c51d9ae..8331d75e 100644 --- a/vel/data/augmentation/random_lighting.py +++ b/vel/data/augmentation/random_lighting.py @@ -1,26 +1,26 @@ import random import vel.api as api -import vel.data as data +import vel.data.operation.image_op as op -class RandomLighting(api.Transformation): +class RandomLighting(api.ScopedTransformation): """ Apply a horizontal flip randomly to input images """ def __init__(self, b, c, mode='x', tags=None): super().__init__(mode, tags) self.b, self.c = b, c - def __call__(self, img): + def transform(self, img): """ Adjust lighting """ rand_b = random.uniform(-self.b, self.b) rand_c = random.uniform(-self.c, self.c) rand_c = -1/(rand_c-1) if rand_c < 0 else rand_c+1 - return data.lighting(img, rand_b, rand_c) + return op.lighting(img, rand_b, rand_c) def __repr__(self): return self.__class__.__name__ + '(b={}, c={})'.format(self.b, self.c) -def create(b, c, mode='x', tags=None): - return RandomLighting(b, c, mode, tags) +def create(b, c, scope='x', tags=None): + return RandomLighting(b, c, scope, tags) diff --git a/vel/data/augmentation/random_rotate.py b/vel/data/augmentation/random_rotate.py index c2c02246..65ae8a04 100644 --- a/vel/data/augmentation/random_rotate.py +++ b/vel/data/augmentation/random_rotate.py @@ -5,25 +5,26 @@ import cv2 import random -import vel.data as data +import vel.api as api +import vel.data.operation.image_op as op -class RandomRotate(data.Augmentation): +class RandomRotate(api.ScopedTransformation): """ Rotate image randomly by an angle between (-deg, +deg) """ - def __init__(self, deg, p=0.75, mode='x', tags=None): - super().__init__(mode, tags) + def __init__(self, deg, p=0.75, scope='x', tags=None): + super().__init__(scope, tags) self.deg = deg self.p = p - def __call__(self, x_data): + def transform(self, x_data): if random.random() < self.p: random_degree = random.uniform(-self.deg, self.deg) - return data.rotate_img(x_data, random_degree, mode=cv2.BORDER_REFLECT) + return op.rotate_img(x_data, random_degree, mode=cv2.BORDER_REFLECT) else: # No, don't do it return x_data -def create(deg, p=0.75, mode='x', tags=None): +def create(deg, p=0.75, scope='x', tags=None): """ Vel factory function """ - return RandomRotate(deg, p, mode, tags) + return RandomRotate(deg, p, scope, tags) diff --git a/vel/data/augmentation/random_scale.py b/vel/data/augmentation/random_scale.py index 882a3eb6..694cdf2f 100644 --- a/vel/data/augmentation/random_scale.py +++ b/vel/data/augmentation/random_scale.py @@ -6,18 +6,19 @@ import collections.abc as abc import random -import vel.data as data +import vel.api as api +import vel.data.operation.image_op as op -class RandomScale(data.Augmentation): +class RandomScale(api.ScopedTransformation): """ Scales the image so that the smallest axis is of 'size' times a random number between 1.0 and max_zoom. """ - def __init__(self, size, max_zoom, p=0.75, mode='x', tags=None): - super().__init__(mode, tags) + def __init__(self, size, max_zoom, p=0.75, scope='x', tags=None): + super().__init__(scope, tags) self.size = size self.max_zoom = max_zoom self.p = p - def __call__(self, x_data): + def transform(self, x_data): if random.random() < self.p: # Yes, do it min_z = 1. @@ -30,8 +31,9 @@ def __call__(self, x_data): # No, don't do it mult = 1.0 - return data.scale_min(x_data, int(self.size * mult), cv2.INTER_AREA) + return op.scale_min(x_data, int(self.size * mult), cv2.INTER_AREA) -def create(size, max_zoom, p=0.75, mode='x', tags=None): - return RandomScale(size, max_zoom, p, mode, tags) +def create(size, max_zoom, p=0.75, scope='x', tags=None): + """ Vel factory function """ + return RandomScale(size, max_zoom, p, scope, tags) diff --git a/vel/data/augmentation/scale_min_size.py b/vel/data/augmentation/scale_min_size.py index c1ebfa5d..d6ecd06b 100644 --- a/vel/data/augmentation/scale_min_size.py +++ b/vel/data/augmentation/scale_min_size.py @@ -4,19 +4,20 @@ """ import PIL.Image as Image -import vel.data as data +import vel.api as api +import vel.data.operation.image_op as op -class ScaleMinSize(data.Augmentation): +class ScaleMinSize(api.ScopedTransformation): """ Scales the image so that the smallest axis is of 'size'. """ - def __init__(self, size, mode='x', tags=None): - super().__init__(mode, tags) + def __init__(self, size, scope='x', tags=None): + super().__init__(scope, tags) self.size = size - def __call__(self, x_data): - return data.scale_min(x_data, self.size, Image.BILINEAR) + def transform(self, x_data): + return op.scale_min(x_data, self.size, Image.BILINEAR) -def create(size, mode='x', tags=None): +def create(size, scope='x', tags=None): """ Vel factory function """ - return ScaleMinSize(size, mode, tags) + return ScaleMinSize(size, scope, tags) diff --git a/vel/data/source/img_dir_source.py b/vel/data/source/img_dir_source.py index bbca4ad6..45b4ec2b 100644 --- a/vel/data/source/img_dir_source.py +++ b/vel/data/source/img_dir_source.py @@ -4,7 +4,7 @@ import torchvision.datasets as ds import torchvision.datasets.utils as ds_util -from vel.api import SupervisedTrainingData +from vel.api import Source class ImageDirSource(ds.ImageFolder): @@ -12,8 +12,7 @@ class ImageDirSource(ds.ImageFolder): pass -def create(model_config, path, num_workers, batch_size, augmentations=None, tta=None, url=None, - extract_parent=False): +def create(model_config, path, url=None, extract_parent=False): """ Create an ImageDirSource with supplied arguments """ if not os.path.isabs(path): path = model_config.project_top_dir(path) @@ -43,11 +42,7 @@ def create(model_config, path, num_workers, batch_size, augmentations=None, tta= train_ds = ImageDirSource(train_path) val_ds = ImageDirSource(valid_path) - return SupervisedTrainingData( + return Source( train_ds, val_ds, - num_workers=num_workers, - batch_size=batch_size, - augmentations=augmentations, - # test_time_augmentation=tta ) diff --git a/vel/data/transformation/normalize.py b/vel/data/transformation/normalize.py index ccfa5064..a29ad225 100644 --- a/vel/data/transformation/normalize.py +++ b/vel/data/transformation/normalize.py @@ -1,19 +1,30 @@ +import numpy as np + import vel.api as api class Normalize(api.ScopedTransformation): """ Normalize input mean and standard deviation """ - def __init__(self, scope='x', tags=None): + def __init__(self, mean=None, std=None, scope='x', tags=None): super().__init__(scope, tags) - self.mean = None - self.std = None + self.mean = mean + self.std = std + + if self.mean is not None: + self.mean = np.asarray(self.mean) + + if self.std is not None: + self.std = np.asarray(self.std) def initialize(self, source): """ Initialize transformation from source """ - self.mean = source.metadata['train_mean'] - self.std = source.metadata['train_std'] + if self.mean is None: + self.mean = source.metadata['train_mean'] + + if self.std is None: + self.std = source.metadata['train_std'] def transform(self, value): return (value - self.mean) / self.std @@ -23,6 +34,6 @@ def denormalization_transform(self, value): return value * self.std + self.mean -def create(mode='x', tags=None): +def create(mean=None, std=None, mode='x', tags=None): """ Vel factory function """ - return Normalize(scope=mode, tags=tags) + return Normalize(mean=mean, std=std, scope=mode, tags=tags) From 5061e91ea7295445ad9819c7aaddbb3fe6796094 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Mon, 24 Jun 2019 22:16:23 -0700 Subject: [PATCH 062/162] Fixing lr find command. --- vel/command/lr_find_command.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/vel/command/lr_find_command.py b/vel/command/lr_find_command.py index 32544c5d..ff927a52 100644 --- a/vel/command/lr_find_command.py +++ b/vel/command/lr_find_command.py @@ -8,8 +8,9 @@ import vel.util.interpolate as interp -from vel.api import Learner, TrainingInfo, EpochInfo, BatchInfo -from vel.metric.averaging_metric import AveragingNamedMetric +from vel.api import TrainingInfo, EpochInfo, BatchInfo +from vel.metric.base.averaging_metric import AveragingNamedMetric +from vel.train import Trainer class LrFindCommand: @@ -45,11 +46,11 @@ class LrFindCommand: http://arxiv.org/abs/1506.01186 """ - def __init__(self, model_config, model, source, optimizer_factory, start_lr=1e-5, end_lr=10, num_it=100, + def __init__(self, model_config, model, loader, optimizer_factory, start_lr=1e-5, end_lr=10, num_it=100, interpolation='logscale', freeze=False, stop_dv=True, divergence_threshold=4.0, metric='loss'): # Mandatory pieces self.model = model - self.source = source + self.loader = loader self.optimizer_factory = optimizer_factory self.model_config = model_config # Settings @@ -65,7 +66,7 @@ def __init__(self, model_config, model, source, optimizer_factory, start_lr=1e-5 def run(self): """ Run the command with supplied configuration """ device = self.model_config.torch_device() - learner = Learner(device, self.model.instantiate()) + learner = Trainer(device, self.model.instantiate()) lr_schedule = interp.interpolate_series(self.start_lr, self.end_lr, self.num_it, self.interpolation) @@ -75,7 +76,7 @@ def run(self): # Optimizer shoudl be created after freeze optimizer = self.optimizer_factory.instantiate(learner.model) - iterator = iter(self.source.train_loader) + iterator = iter(self.loader['train']) # Metrics to track through this training metrics = learner.metrics() + [AveragingNamedMetric("lr")] @@ -99,12 +100,12 @@ def run(self): param_group['lr'] = lr try: - data, target = next(iterator) + datapoint = next(iterator) except StopIteration: - iterator = iter(self.source.train_loader) - data, target = next(iterator) + iterator = iter(self.loader['train']) + datapoint = next(iterator) - learner.train_batch(batch_info, data, target) + learner.train_batch(batch_info, datapoint) batch_info['lr'] = lr @@ -149,13 +150,13 @@ def run(self): plt.show() -def create(model_config, model, source, optimizer, start_lr=1e-5, end_lr=10, iterations=100, freeze=False, +def create(model_config, model, loader, optimizer, start_lr=1e-5, end_lr=10, iterations=100, freeze=False, interpolation='logscale', stop_dv=True, divergence_threshold=4.0, metric='loss'): """ Vel factory function """ return LrFindCommand( model_config=model_config, model=model, - source=source, + loader=loader, optimizer_factory=optimizer, start_lr=start_lr, end_lr=end_lr, From 2e4ede029488cab5f2450a45d7eebd291f3da715 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 25 Jun 2019 21:37:40 -0700 Subject: [PATCH 063/162] Fixing cats vs dogs transfer learning example. --- .../cats_vs_dogs_resnet34.yaml | 8 ++--- vel/command/phase_train_command.py | 31 ++++++++++++------- vel/train/__init__.py | 1 + vel/train/phase/cycle.py | 16 +++++----- vel/train/phase/freeze.py | 6 ++-- vel/train/phase/generic.py | 14 +++++---- vel/train/phase/unfreeze.py | 6 ++-- vel/train/train_phase.py | 8 +++-- 8 files changed, 52 insertions(+), 38 deletions(-) diff --git a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml index 6aaffc16..d764ce9e 100644 --- a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml +++ b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml @@ -67,8 +67,8 @@ commands: train: name: vel.command.phase_train_command phases: - - name: vel.phase.freeze - - name: vel.phase.cycle + - name: vel.train.phase.freeze + - name: vel.train.phase.cycle init_lr: 0.001 init_iter: 20 max_lr: 0.01 @@ -76,8 +76,8 @@ commands: interpolate: 'cosine' cycles: 3 cycle_len: 1 - - name: vel.phase.unfreeze - - name: vel.phase.cycle + - name: vel.train.phase.unfreeze + - name: vel.train.phase.cycle init_lr: 0.001 init_iter: 20 diff --git a/vel/command/phase_train_command.py b/vel/command/phase_train_command.py index e80768b2..b80a1b7a 100644 --- a/vel/command/phase_train_command.py +++ b/vel/command/phase_train_command.py @@ -2,17 +2,24 @@ import bisect import typing -from vel.api import Learner, TrainingInfo, ModelConfig, TrainPhase +import vel.api as api +import vel.data as data +import vel.train as train + +from vel.metric.samples_per_sec import SamplesPerSec +from vel.callback.time_tracker import TimeTracker +from vel.callback.sample_tracker import SampleTracker class PhaseTrainCommand: """ Training command - learn according to a set of phases """ - def __init__(self, model_config: ModelConfig, model_factory, source, storage, phases: typing.List[TrainPhase], + def __init__(self, model_config: api.ModelConfig, model_factory: api.ModelFactory, loader: data.Loader, + storage: api.Storage, phases: typing.List[train.TrainPhase], callbacks=None, restart=True): self.model_config = model_config self.model_factory = model_factory - self.source = source + self.loader = loader self.storage = storage self.phases = phases self.ladder = self._build_phase_ladder(phases) @@ -49,13 +56,13 @@ def _select_phase_right_bound(self, epoch_number): def run(self): """ Run the command with supplied configuration """ device = self.model_config.torch_device() - learner = Learner(device, self.model_factory.instantiate()) + learner = train.Trainer(device, self.model_factory.instantiate()) # All callbacks useful for learning callbacks = self.gather_callbacks() # Metrics to track through this training - metrics = learner.metrics() + metrics = learner.metrics() + [SamplesPerSec()] # Check if training was already started and potentially continue where we left off training_info, hidden_state = self.resume_training(learner, callbacks, metrics) @@ -65,7 +72,7 @@ def run(self): current_phase = self.phases[current_phase_idx] local_idx = training_info.start_epoch_idx - self.ladder[current_phase_idx] - current_phase.set_up_phase(training_info, learner.model, self.source) + current_phase.set_up_phase(training_info, learner.model, self.loader) print(current_phase.banner()) if training_info.start_epoch_idx > 0: @@ -84,7 +91,7 @@ def run(self): current_phase_idx += 1 current_phase = self.phases[current_phase_idx] - current_phase.set_up_phase(training_info, learner.model, self.source) + current_phase.set_up_phase(training_info, learner.model, self.loader) print(current_phase.banner()) # Create epoch info @@ -106,21 +113,21 @@ def run(self): def gather_callbacks(self) -> list: """ Gather all the callbacks to be used in this training run """ - callbacks = [] + callbacks = [TimeTracker(), SampleTracker()] callbacks.extend(self.callbacks) callbacks.extend(self.storage.streaming_callbacks()) return callbacks - def resume_training(self, learner, callbacks, metrics) -> (TrainingInfo, dict): + def resume_training(self, learner, callbacks, metrics) -> (api.TrainingInfo, dict): """ Possibly resume training from a saved state from the storage """ if self.model_config.continue_training: start_epoch = self.storage.last_epoch_idx() else: start_epoch = 0 - training_info = TrainingInfo( + training_info = api.TrainingInfo( start_epoch_idx=start_epoch, run_name=self.model_config.run_name, metrics=metrics, @@ -139,12 +146,12 @@ def resume_training(self, learner, callbacks, metrics) -> (TrainingInfo, dict): return training_info, hidden_state -def create(model_config, model, source, storage, phases, callbacks=None, restart=True): +def create(model_config, model, loader, storage, phases, callbacks=None, restart=True): """ Vel factory function """ return PhaseTrainCommand( model_config=model_config, model_factory=model, - source=source, + loader=loader, storage=storage, phases=phases, callbacks=callbacks, diff --git a/vel/train/__init__.py b/vel/train/__init__.py index 260e4c8d..482d91cd 100644 --- a/vel/train/__init__.py +++ b/vel/train/__init__.py @@ -1 +1,2 @@ from .trainer import Trainer +from .train_phase import TrainPhase, EmptyTrainPhase diff --git a/vel/train/phase/cycle.py b/vel/train/phase/cycle.py index 9b38b8be..1f3358c0 100644 --- a/vel/train/phase/cycle.py +++ b/vel/train/phase/cycle.py @@ -1,8 +1,10 @@ +import typing import numpy as np import vel.util.interpolate as interp -from vel.api import BatchInfo, EpochInfo, TrainingInfo, Callback, TrainPhase +from vel.api import BatchInfo, EpochInfo, TrainingInfo, Callback +from vel.train import TrainPhase class CycleCallback(Callback): @@ -52,7 +54,7 @@ def _init_cycle_dict(self): return dict_arr, length_arr, start_arr - def on_batch_begin(self, batch_info: BatchInfo): + def on_batch_begin(self, batch_info: BatchInfo, dataset: typing.Optional[str] = None): """ Set proper learning rate """ cycle_length = self.cycle_lengths[batch_info.local_epoch_number - 1] cycle_start = self.cycle_starts[batch_info.local_epoch_number - 1] @@ -113,7 +115,7 @@ def __init__(self, optimizer_factory, max_lr, min_lr, cycles, cycle_len=1, cycle self.freeze = freeze self._optimizer_instance = None - self._source = None + self._loader = None self.special_callback = None @@ -121,11 +123,11 @@ def __init__(self, optimizer_factory, max_lr, min_lr, cycles, cycle_len=1, cycle def number_of_epochs(self) -> int: return self.epochs - def set_up_phase(self, training_info, model, source): + def set_up_phase(self, training_info, model, loader): """ Prepare the phase for learning """ # To parameter groups handles properly filtering parameters that don't require gradient self._optimizer_instance = self.optimizer_factory.instantiate(model) - self._source = source + self._loader = loader self.special_callback = CycleCallback( self._optimizer_instance, @@ -142,7 +144,7 @@ def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: in training_info=training_info, global_epoch_idx=global_idx, local_epoch_idx=local_idx, - batches_per_epoch=self._source.train_iterations_per_epoch, + batches_per_epoch=self._loader.size['train'], optimizer=self._optimizer_instance, # Add special callback for this epoch callbacks=[self.special_callback] + training_info.callbacks @@ -150,7 +152,7 @@ def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: in def execute_epoch(self, epoch_info, learner): """ Prepare the phase for learning """ - learner.run_epoch(epoch_info, self._source) + learner.run_epoch(epoch_info, self._loader) def create(optimizer, max_lr, min_lr, cycles, cycle_len=1, cycle_mult=1, interpolate='linear', init_lr=0, init_iter=0): diff --git a/vel/train/phase/freeze.py b/vel/train/phase/freeze.py index 576e372a..c230c762 100644 --- a/vel/train/phase/freeze.py +++ b/vel/train/phase/freeze.py @@ -1,10 +1,10 @@ -import vel.api as api +import vel.train as train -class FreezePhase(api.EmptyTrainPhase): +class FreezePhase(train.EmptyTrainPhase): """ Freeze the model """ - def set_up_phase(self, training_info, model, source): + def set_up_phase(self, training_info, model, loader): """ Freeze the model """ model.freeze() diff --git a/vel/train/phase/generic.py b/vel/train/phase/generic.py index 25c52c1e..f57e9923 100644 --- a/vel/train/phase/generic.py +++ b/vel/train/phase/generic.py @@ -1,4 +1,6 @@ -from vel.api import TrainingInfo, EpochInfo, TrainPhase, Source +from vel.api import TrainingInfo, EpochInfo +from vel.data import Loader +from vel.train import TrainPhase class GenericPhase(TrainPhase): @@ -10,16 +12,16 @@ def __init__(self, lr, epochs, optimizer_factory): self.optimizer_factory = optimizer_factory self._optimizer_instance = None - self._source = None + self._loader = None @property def number_of_epochs(self) -> int: return self.epochs - def set_up_phase(self, training_info, model, source: Source): + def set_up_phase(self, training_info, model, loader: Loader): """ Prepare the phase for learning """ self._optimizer_instance = self.optimizer_factory.instantiate(model) - self._source = source + self._loader = loader def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: int) -> EpochInfo: """ Create Epoch info """ @@ -27,7 +29,7 @@ def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: in training_info=training_info, global_epoch_idx=global_idx, local_epoch_idx=local_idx, - batches_per_epoch=self._source.train_iterations_per_epoch, + batches_per_epoch=self._loader.size['train'], optimizer=self._optimizer_instance ) @@ -36,7 +38,7 @@ def execute_epoch(self, epoch_info, learner): for param_group in epoch_info.optimizer.param_groups: param_group['lr'] = self.lr - epoch_result = learner.run_epoch(epoch_info, self._source) + epoch_result = learner.run_epoch(epoch_info, self._loader) return epoch_result diff --git a/vel/train/phase/unfreeze.py b/vel/train/phase/unfreeze.py index 48b15a0e..8fd40090 100644 --- a/vel/train/phase/unfreeze.py +++ b/vel/train/phase/unfreeze.py @@ -1,10 +1,10 @@ -import vel.api as api +import vel.train as train -class UnfreezePhase(api.EmptyTrainPhase): +class UnfreezePhase(train.EmptyTrainPhase): """ Freeze the model """ - def set_up_phase(self, training_info, model, source): + def set_up_phase(self, training_info, model, loader): """ Freeze the model """ model.unfreeze() diff --git a/vel/train/train_phase.py b/vel/train/train_phase.py index cd0f9b58..2515d4bb 100644 --- a/vel/train/train_phase.py +++ b/vel/train/train_phase.py @@ -1,7 +1,9 @@ from torch.optim import Optimizer -from vel.api import TrainingInfo, EpochInfo, Model, Source -from vel.train import Trainer +from vel.api import TrainingInfo, EpochInfo, Model +from vel.data import Loader + +from .trainer import Trainer class TrainPhase: @@ -12,7 +14,7 @@ def number_of_epochs(self) -> int: """ How many epochs does this phase take """ raise NotImplementedError - def set_up_phase(self, training_info: TrainingInfo, model: Model, source: Source) -> Optimizer: + def set_up_phase(self, training_info: TrainingInfo, model: Model, loader: Loader) -> Optimizer: """ Prepare the phase for learning, returns phase optimizer """ pass From dd3adca673664b36eea5ceefc70089fa5ea4fd8c Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 25 Jun 2019 22:32:24 -0700 Subject: [PATCH 064/162] Working on a loader for text. --- .../autoencoder/mnist/mnist_cnn_ae.yaml | 2 +- .../autoencoder/mnist/mnist_cnn_vae.yaml | 2 +- .../cifar10/cifar10_cnn_01.yaml | 2 +- .../cifar10/cifar10_resnetv1_110.yaml | 2 +- .../cifar10/cifar10_resnetv1_32.yaml | 2 +- .../cifar10/cifar10_resnetv2_110.yaml | 2 +- .../cifar10_resnetv2_164_bottleneck.yaml | 2 +- .../cifar10/cifar10_resnetv2_32.yaml | 2 +- .../cifar10/cifar10_resnext_29_c1.yaml | 2 +- .../cifar10/cifar10_resnext_29_c8.yaml | 2 +- .../cats_vs_dogs_resnet34.yaml | 2 +- .../classification/mnist/mnist_cnn_01.yaml | 2 +- notebooks/classic/mnist_cnn_01.ipynb | 158 ++++++++ .../imagenet_transfer/cats_vs_dogs.ipynb | 345 ++++++++++++++++++ vel/command/augvis_command.py | 6 +- vel/command/phase_train_command.py | 2 +- vel/command/train_command.py | 2 +- vel/data/__init__.py | 2 +- vel/data/{loader.py => dataset_loader.py} | 4 +- vel/data/source/nlp/text_url.py | 137 +------ vel/data/text_character_loader.py | 105 ++++++ vel/train/phase/generic.py | 4 +- vel/train/train_phase.py | 4 +- vel/train/trainer.py | 8 +- 24 files changed, 648 insertions(+), 153 deletions(-) create mode 100644 notebooks/classic/mnist_cnn_01.ipynb create mode 100644 notebooks/imagenet_transfer/cats_vs_dogs.ipynb rename vel/data/{loader.py => dataset_loader.py} (97%) create mode 100644 vel/data/text_character_loader.py diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml index 690a7f8c..d4ce36c7 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml @@ -15,7 +15,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml index 74619b28..043edd22 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml @@ -14,7 +14,7 @@ source: name: vel.data.source.vision.mnist loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_cnn_01.yaml b/examples-configs/classification/cifar10/cifar10_cnn_01.yaml index 14f3c83f..e6292546 100644 --- a/examples-configs/classification/cifar10/cifar10_cnn_01.yaml +++ b/examples-configs/classification/cifar10/cifar10_cnn_01.yaml @@ -14,7 +14,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml b/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml index fc01f757..3ce7feb8 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml @@ -15,7 +15,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml b/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml index f04e40fc..935b8277 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml @@ -15,7 +15,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml b/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml index 9291ee8d..f0bd7291 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml @@ -14,7 +14,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml b/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml index 86d5dbaa..a7ff1491 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml @@ -16,7 +16,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml b/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml index 7f38b3fb..60ebf5ad 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml @@ -14,7 +14,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml b/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml index eabd968a..c007c5fe 100644 --- a/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml @@ -19,7 +19,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml b/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml index 9e41c5aa..6e90611d 100644 --- a/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml @@ -19,7 +19,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml index d764ce9e..1b2ab425 100644 --- a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml +++ b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml @@ -16,7 +16,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader num_workers: 8 batch_size: 64 diff --git a/examples-configs/classification/mnist/mnist_cnn_01.yaml b/examples-configs/classification/mnist/mnist_cnn_01.yaml index e58d5dd9..aaa96cef 100644 --- a/examples-configs/classification/mnist/mnist_cnn_01.yaml +++ b/examples-configs/classification/mnist/mnist_cnn_01.yaml @@ -14,7 +14,7 @@ source: loader: - name: vel.data.loader + name: vel.data.dataset_loader batch_size: 128 num_workers: 4 diff --git a/notebooks/classic/mnist_cnn_01.ipynb b/notebooks/classic/mnist_cnn_01.ipynb new file mode 100644 index 00000000..164dd088 --- /dev/null +++ b/notebooks/classic/mnist_cnn_01.ipynb @@ -0,0 +1,158 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Put these at the top of every notebook, to get automatic reloading and inline plotting\n", + "%reload_ext autoreload\n", + "%autoreload 2\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Adding /mnt/fast-data/repos/waterboy to the PYTHONPATH\n" + ] + } + ], + "source": [ + "# Initialize pythonpath to include waterboy. I don't know the way how to walk around this in other way\n", + "import os.path\n", + "import sys\n", + "\n", + "path = os.path.abspath('.')\n", + "\n", + "while True:\n", + " \n", + " if os.path.basename(path) == 'waterboy':\n", + " if path not in sys.path:\n", + " print(f'Adding {path} to the PYTHONPATH')\n", + " sys.path.append(path)\n", + " break\n", + " else:\n", + " up_path = os.path.realpath(os.path.join(path, '..'))\n", + " if path == up_path:\n", + " break\n", + " else:\n", + " path = up_path " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import waterboy.notebook as nb" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import importlib\n", + "importlib.reload(nb)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "model_config = nb.load(\"../../examples/classification/mnist/mnist_cnn_01.yaml\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import waterboy.internals.model_config\n", + "importlib.reload(waterboy.internals.model_config)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "waterboy", + "language": "python", + "name": "waterboy" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/imagenet_transfer/cats_vs_dogs.ipynb b/notebooks/imagenet_transfer/cats_vs_dogs.ipynb new file mode 100644 index 00000000..a8d657d3 --- /dev/null +++ b/notebooks/imagenet_transfer/cats_vs_dogs.ipynb @@ -0,0 +1,345 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "# Put these at the top of every notebook, to get automatic reloading and inline plotting\n", + "%reload_ext autoreload\n", + "%autoreload 2\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Adding /mnt/fast-data/repos/waterboy to the PYTHONPATH\n" + ] + } + ], + "source": [ + "# Initialize pythonpath to include waterboy. I don't know the way how to walk around this in other way\n", + "import os.path\n", + "import sys\n", + "\n", + "path = os.path.abspath('.')\n", + "\n", + "while True:\n", + " \n", + " if os.path.basename(path) == 'waterboy':\n", + " if path not in sys.path:\n", + " print(f'Adding {path} to the PYTHONPATH')\n", + " sys.path.append(path)\n", + " break\n", + " else:\n", + " up_path = os.path.realpath(os.path.join(path, '..'))\n", + " if path == up_path:\n", + " break\n", + " else:\n", + " path = up_path " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import torch\n", + "import waterboy.notebook as nb" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "model_config = nb.load(\"../../examples/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml\", device='cuda:1')" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "================================================================================\n", + "Pytorch version: 0.4.0 cuda version 9.1.85 cudnn version 7102\n", + "Running model cats_vs_dogs_resnet34, run 0 -- command notebook -- device cuda:1\n", + "CUDA Device name GeForce GTX 1080 Ti\n", + "2018/06/10 - 12:19:38\n", + "================================================================================\n" + ] + } + ], + "source": [ + "model_config.banner(\"notebook\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "collapsed": true, + "scrolled": true + }, + "outputs": [], + "source": [ + "model = model_config.provide(\"model\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "model.model.load_state_dict(torch.load(\"/tmp/weight.pt\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "----------------------------------------------------------------\n", + " Layer (type) Output Shape Param #\n", + "================================================================\n", + " Conv2d-1 [-1, 64, 112, 112] 9,408\n", + " BatchNorm2d-2 [-1, 64, 112, 112] 128\n", + " ReLU-3 [-1, 64, 112, 112] 0\n", + " MaxPool2d-4 [-1, 64, 56, 56] 0\n", + " Conv2d-5 [-1, 64, 56, 56] 36,864\n", + " BatchNorm2d-6 [-1, 64, 56, 56] 128\n", + " ReLU-7 [-1, 64, 56, 56] 0\n", + " Conv2d-8 [-1, 64, 56, 56] 36,864\n", + " BatchNorm2d-9 [-1, 64, 56, 56] 128\n", + " ReLU-10 [-1, 64, 56, 56] 0\n", + " BasicBlock-11 [-1, 64, 56, 56] 0\n", + " Conv2d-12 [-1, 64, 56, 56] 36,864\n", + " BatchNorm2d-13 [-1, 64, 56, 56] 128\n", + " ReLU-14 [-1, 64, 56, 56] 0\n", + " Conv2d-15 [-1, 64, 56, 56] 36,864\n", + " BatchNorm2d-16 [-1, 64, 56, 56] 128\n", + " ReLU-17 [-1, 64, 56, 56] 0\n", + " BasicBlock-18 [-1, 64, 56, 56] 0\n", + " Conv2d-19 [-1, 64, 56, 56] 36,864\n", + " BatchNorm2d-20 [-1, 64, 56, 56] 128\n", + " ReLU-21 [-1, 64, 56, 56] 0\n", + " Conv2d-22 [-1, 64, 56, 56] 36,864\n", + " BatchNorm2d-23 [-1, 64, 56, 56] 128\n", + " ReLU-24 [-1, 64, 56, 56] 0\n", + " BasicBlock-25 [-1, 64, 56, 56] 0\n", + " Conv2d-26 [-1, 128, 28, 28] 73,728\n", + " BatchNorm2d-27 [-1, 128, 28, 28] 256\n", + " ReLU-28 [-1, 128, 28, 28] 0\n", + " Conv2d-29 [-1, 128, 28, 28] 147,456\n", + " BatchNorm2d-30 [-1, 128, 28, 28] 256\n", + " Conv2d-31 [-1, 128, 28, 28] 8,192\n", + " BatchNorm2d-32 [-1, 128, 28, 28] 256\n", + " ReLU-33 [-1, 128, 28, 28] 0\n", + " BasicBlock-34 [-1, 128, 28, 28] 0\n", + " Conv2d-35 [-1, 128, 28, 28] 147,456\n", + " BatchNorm2d-36 [-1, 128, 28, 28] 256\n", + " ReLU-37 [-1, 128, 28, 28] 0\n", + " Conv2d-38 [-1, 128, 28, 28] 147,456\n", + " BatchNorm2d-39 [-1, 128, 28, 28] 256\n", + " ReLU-40 [-1, 128, 28, 28] 0\n", + " BasicBlock-41 [-1, 128, 28, 28] 0\n", + " Conv2d-42 [-1, 128, 28, 28] 147,456\n", + " BatchNorm2d-43 [-1, 128, 28, 28] 256\n", + " ReLU-44 [-1, 128, 28, 28] 0\n", + " Conv2d-45 [-1, 128, 28, 28] 147,456\n", + " BatchNorm2d-46 [-1, 128, 28, 28] 256\n", + " ReLU-47 [-1, 128, 28, 28] 0\n", + " BasicBlock-48 [-1, 128, 28, 28] 0\n", + " Conv2d-49 [-1, 128, 28, 28] 147,456\n", + " BatchNorm2d-50 [-1, 128, 28, 28] 256\n", + " ReLU-51 [-1, 128, 28, 28] 0\n", + " Conv2d-52 [-1, 128, 28, 28] 147,456\n", + " BatchNorm2d-53 [-1, 128, 28, 28] 256\n", + " ReLU-54 [-1, 128, 28, 28] 0\n", + " BasicBlock-55 [-1, 128, 28, 28] 0\n", + " Conv2d-56 [-1, 256, 14, 14] 294,912\n", + " BatchNorm2d-57 [-1, 256, 14, 14] 512\n", + " ReLU-58 [-1, 256, 14, 14] 0\n", + " Conv2d-59 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-60 [-1, 256, 14, 14] 512\n", + " Conv2d-61 [-1, 256, 14, 14] 32,768\n", + " BatchNorm2d-62 [-1, 256, 14, 14] 512\n", + " ReLU-63 [-1, 256, 14, 14] 0\n", + " BasicBlock-64 [-1, 256, 14, 14] 0\n", + " Conv2d-65 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-66 [-1, 256, 14, 14] 512\n", + " ReLU-67 [-1, 256, 14, 14] 0\n", + " Conv2d-68 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-69 [-1, 256, 14, 14] 512\n", + " ReLU-70 [-1, 256, 14, 14] 0\n", + " BasicBlock-71 [-1, 256, 14, 14] 0\n", + " Conv2d-72 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-73 [-1, 256, 14, 14] 512\n", + " ReLU-74 [-1, 256, 14, 14] 0\n", + " Conv2d-75 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-76 [-1, 256, 14, 14] 512\n", + " ReLU-77 [-1, 256, 14, 14] 0\n", + " BasicBlock-78 [-1, 256, 14, 14] 0\n", + " Conv2d-79 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-80 [-1, 256, 14, 14] 512\n", + " ReLU-81 [-1, 256, 14, 14] 0\n", + " Conv2d-82 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-83 [-1, 256, 14, 14] 512\n", + " ReLU-84 [-1, 256, 14, 14] 0\n", + " BasicBlock-85 [-1, 256, 14, 14] 0\n", + " Conv2d-86 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-87 [-1, 256, 14, 14] 512\n", + " ReLU-88 [-1, 256, 14, 14] 0\n", + " Conv2d-89 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-90 [-1, 256, 14, 14] 512\n", + " ReLU-91 [-1, 256, 14, 14] 0\n", + " BasicBlock-92 [-1, 256, 14, 14] 0\n", + " Conv2d-93 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-94 [-1, 256, 14, 14] 512\n", + " ReLU-95 [-1, 256, 14, 14] 0\n", + " Conv2d-96 [-1, 256, 14, 14] 589,824\n", + " BatchNorm2d-97 [-1, 256, 14, 14] 512\n", + " ReLU-98 [-1, 256, 14, 14] 0\n", + " BasicBlock-99 [-1, 256, 14, 14] 0\n", + " Conv2d-100 [-1, 512, 7, 7] 1,179,648\n", + " BatchNorm2d-101 [-1, 512, 7, 7] 1,024\n", + " ReLU-102 [-1, 512, 7, 7] 0\n", + " Conv2d-103 [-1, 512, 7, 7] 2,359,296\n", + " BatchNorm2d-104 [-1, 512, 7, 7] 1,024\n", + " Conv2d-105 [-1, 512, 7, 7] 131,072\n", + " BatchNorm2d-106 [-1, 512, 7, 7] 1,024\n", + " ReLU-107 [-1, 512, 7, 7] 0\n", + " BasicBlock-108 [-1, 512, 7, 7] 0\n", + " Conv2d-109 [-1, 512, 7, 7] 2,359,296\n", + " BatchNorm2d-110 [-1, 512, 7, 7] 1,024\n", + " ReLU-111 [-1, 512, 7, 7] 0\n", + " Conv2d-112 [-1, 512, 7, 7] 2,359,296\n", + " BatchNorm2d-113 [-1, 512, 7, 7] 1,024\n", + " ReLU-114 [-1, 512, 7, 7] 0\n", + " BasicBlock-115 [-1, 512, 7, 7] 0\n", + " Conv2d-116 [-1, 512, 7, 7] 2,359,296\n", + " BatchNorm2d-117 [-1, 512, 7, 7] 1,024\n", + " ReLU-118 [-1, 512, 7, 7] 0\n", + " Conv2d-119 [-1, 512, 7, 7] 2,359,296\n", + " BatchNorm2d-120 [-1, 512, 7, 7] 1,024\n", + " ReLU-121 [-1, 512, 7, 7] 0\n", + " BasicBlock-122 [-1, 512, 7, 7] 0\n", + "AdaptiveMaxPool2d-123 [-1, 512, 1, 1] 0\n", + "AdaptiveAvgPool2d-124 [-1, 512, 1, 1] 0\n", + "AdaptiveConcatPool2d-125 [-1, 1024, 1, 1] 0\n", + " Flatten-126 [-1, 1024] 0\n", + " BatchNorm1d-127 [-1, 1024] 2,048\n", + " Dropout-128 [-1, 1024] 0\n", + " Linear-129 [-1, 512] 524,800\n", + " ReLU-130 [-1, 512] 0\n", + " BatchNorm1d-131 [-1, 512] 1,024\n", + " Dropout-132 [-1, 512] 0\n", + " Linear-133 [-1, 2] 1,026\n", + " LogSoftmax-134 [-1, 2] 0\n", + "================================================================\n", + "Total params: 21,813,570\n", + "Trainable params: 21,813,570\n", + "Non-trainable params: 0\n", + "----------------------------------------------------------------\n" + ] + } + ], + "source": [ + "model.summary(input_size=(3, 224, 224))" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "run_command = model_config.get_command('train')" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Epoch: 0001 [000000/023000 (00%)]\tloss: 0.854900 accuracy: 0.562500\n", + "Train Epoch: 0001 [006400/023000 (28%)]\tloss: 0.150858 accuracy: 0.939202\n", + "Train Epoch: 0001 [012800/023000 (56%)]\tloss: 0.117378 accuracy: 0.953358\n", + "Train Epoch: 0001 [019200/023000 (83%)]\tloss: 0.104418 accuracy: 0.958576\n", + "=>>>>>>>>>> EPOCH 1\n", + "Train accuracy 0.960822 loss 0.100100\n", + "Validation accuracy 0.983887 loss 0.035918\n", + "=>>>>>>>>>> DONE\n", + "Train Epoch: 0002 [000000/023000 (00%)]\tloss: 0.058765 accuracy: 0.953125\n", + "Train Epoch: 0002 [006400/023000 (28%)]\tloss: 0.076278 accuracy: 0.971225\n", + "Train Epoch: 0002 [012800/023000 (56%)]\tloss: 0.071321 accuracy: 0.973025\n", + "Train Epoch: 0002 [019200/023000 (83%)]\tloss: 0.069153 accuracy: 0.973681\n", + "=>>>>>>>>>> EPOCH 2\n", + "Train accuracy 0.973799 loss 0.069813\n", + "Validation accuracy 0.983887 loss 0.037188\n", + "=>>>>>>>>>> DONE\n" + ] + } + ], + "source": [ + "run_command.run()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "waterboy", + "language": "python", + "name": "waterboy" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/vel/command/augvis_command.py b/vel/command/augvis_command.py index b4a74fcd..880830b9 100644 --- a/vel/command/augvis_command.py +++ b/vel/command/augvis_command.py @@ -1,12 +1,12 @@ import matplotlib.pyplot as plt import numpy as np -from vel.data import Loader +from vel.data import DatasetLoader class AugmentationVisualizationCommand: """ Visualize augmentations """ - def __init__(self, loader: Loader, samples, cases): + def __init__(self, loader: DatasetLoader, samples, cases): self.loader = loader self.samples = samples self.cases = cases @@ -34,6 +34,6 @@ def run(self): plt.show() -def create(loader: Loader, samples: int, cases: int): +def create(loader: DatasetLoader, samples: int, cases: int): """ Vel factory function """ return AugmentationVisualizationCommand(loader, samples, cases) diff --git a/vel/command/phase_train_command.py b/vel/command/phase_train_command.py index b80a1b7a..18566ebf 100644 --- a/vel/command/phase_train_command.py +++ b/vel/command/phase_train_command.py @@ -14,7 +14,7 @@ class PhaseTrainCommand: """ Training command - learn according to a set of phases """ - def __init__(self, model_config: api.ModelConfig, model_factory: api.ModelFactory, loader: data.Loader, + def __init__(self, model_config: api.ModelConfig, model_factory: api.ModelFactory, loader: data.DatasetLoader, storage: api.Storage, phases: typing.List[train.TrainPhase], callbacks=None, restart=True): self.model_config = model_config diff --git a/vel/command/train_command.py b/vel/command/train_command.py index 6a65d993..9d1a1965 100644 --- a/vel/command/train_command.py +++ b/vel/command/train_command.py @@ -14,7 +14,7 @@ class SimpleTrainCommand: def __init__(self, epochs: int, model_config: api.ModelConfig, model_factory: api.ModelFactory, optimizer_factory: api.OptimizerFactory, scheduler_factory: typing.Optional[api.SchedulerFactory], - loader: data.Loader, storage: api.Storage, callbacks: typing.Optional[typing.List[api.Callback]], + loader: data.DatasetLoader, storage: api.Storage, callbacks: typing.Optional[typing.List[api.Callback]], max_grad_norm: typing.Optional[float]): self.epochs = epochs self.model_config = model_config diff --git a/vel/data/__init__.py b/vel/data/__init__.py index 41e0ebd8..122edbd6 100644 --- a/vel/data/__init__.py +++ b/vel/data/__init__.py @@ -1,2 +1,2 @@ from .dataflow import DataFlow -from .loader import Loader +from .dataset_loader import DatasetLoader diff --git a/vel/data/loader.py b/vel/data/dataset_loader.py similarity index 97% rename from vel/data/loader.py rename to vel/data/dataset_loader.py index a75a3453..8228cf52 100644 --- a/vel/data/loader.py +++ b/vel/data/dataset_loader.py @@ -6,7 +6,7 @@ from .dataflow import DataFlow -class Loader: +class DatasetLoader: """ Loads data from a data source to serve it to the model """ def __init__(self, source: Source, batch_size: int, num_workers: int, @@ -65,7 +65,7 @@ def size(self): def create(source: Source, batch_size: int, num_workers: int = 0, transformations: typing.Optional[list] = None): """ Vel factory function """ - return Loader( + return DatasetLoader( source=source, batch_size=batch_size, num_workers=num_workers, diff --git a/vel/data/source/nlp/text_url.py b/vel/data/source/nlp/text_url.py index 5478837c..5f3d61eb 100644 --- a/vel/data/source/nlp/text_url.py +++ b/vel/data/source/nlp/text_url.py @@ -9,88 +9,12 @@ from vel.api import Source -class TextIterator: - """ Iterator over a text dataset """ - def __init__(self, padded_sequence, sequence_length, batch_size, alphabet_size, num_batches): - self.sequence_length = sequence_length - self.batch_size = batch_size - self.num_batches = num_batches - self.alphabet_size = alphabet_size - - self.padded_sequence = padded_sequence[:-1].reshape(self.num_batches * self.batch_size, self.sequence_length) - self.padded_sequence_next = padded_sequence[1:].reshape( - self.num_batches * self.batch_size, self.sequence_length - ) - - self.sequence_indices = np.arange(self.num_batches * self.batch_size) - - np.random.shuffle(self.sequence_indices) - - self.sequence_indices = self.sequence_indices.reshape(self.num_batches, self.batch_size) - - self.batch_idx = 0 - - def __iter__(self): - return self - - def __next__(self): - if self.batch_idx == self.num_batches: - raise StopIteration - else: - input_data = torch.from_numpy(self.padded_sequence[self.sequence_indices[self.batch_idx]]) - target_data = torch.from_numpy(self.padded_sequence_next[self.sequence_indices[self.batch_idx]]) - - self.batch_idx += 1 - - return input_data.to(torch.long), target_data.to(torch.long) - - -class TextLoader: - """ Loader of sequential text data """ - def __init__(self, sequence, sequence_length, batch_size, alphabet_size): - self.sequence = sequence - self.sequence_length = sequence_length - self.batch_size = batch_size - self.alphabet_size = alphabet_size - - # 1 is for the last element as the target needs to be shifted by 1 - residual_length = (len(self.sequence) - self.sequence_length - 1) - full_size = self.sequence_length * self.batch_size - - rest = residual_length % full_size - self.num_batches = residual_length // full_size - - if rest > 0: - self.sequence = np.pad(self.sequence, (0, full_size - rest), mode='constant') - self.num_batches += 1 - - def __iter__(self): - initial_offset = np.random.randint(self.sequence_length) - relevant_subsequence = self.sequence[ - # 1 is for the last element as the target needs to be shifted by 1 - initial_offset:self.num_batches * self.sequence_length * self.batch_size + initial_offset + 1 - ] - - return TextIterator( - relevant_subsequence, self.sequence_length, self.batch_size, - alphabet_size=self.alphabet_size, - num_batches=self.num_batches - ) - - def __len__(self): - """ Number of batches in this loader """ - return self.num_batches - - class TextUrlSource(Source): """ Download text from source and model it character by character """ - def __init__(self, url, absolute_data_path, sequence_length, batch_size, train_val_split=0.8): - super().__init__() + def __init__(self, url, absolute_data_path, train_val_split=0.8): self.url = url self.data_path = absolute_data_path - self.sequence_length = sequence_length - self.batch_size = batch_size self.train_val_split = train_val_split self.text_path = os.path.join(self.data_path, 'text.txt') @@ -99,55 +23,20 @@ def __init__(self, url, absolute_data_path, sequence_length, batch_size, train_v self.data_dict = self.download() content_encoded = self.data_dict['content_encoded'] - alphabet_size = len(self.data_dict['alphabet']) split_idx = int(len(content_encoded) * train_val_split) - self._train_loader = TextLoader( - sequence=content_encoded[:split_idx], - sequence_length=sequence_length, - batch_size=batch_size, - alphabet_size=alphabet_size, - ) - - self._val_loader = TextLoader( - sequence=content_encoded[split_idx:], - sequence_length=sequence_length, - batch_size=batch_size, - alphabet_size=alphabet_size, + super().__init__( + train=content_encoded[:split_idx], + validation=content_encoded[split_idx:], + metadata={ + 'alphabet': self.data_dict['alphabet'], + 'character_to_index': self.data_dict['character_to_index'], + 'index_to_character': self.data_dict['index_to_character'] + } ) - def encode_character(self, char): - return self.data_dict['character_to_index'][char] - - def decode_character(self, index): - return self.data_dict['index_to_character'][index] - - def train_loader(self): - """ PyTorch loader of training data """ - return self._train_loader - - def val_loader(self): - """ PyTorch loader of validation data """ - return self._val_loader - - def train_dataset(self): - """ Return the training dataset """ - return None - - def val_dataset(self): - """ Return the validation dataset """ - return None - - def train_iterations_per_epoch(self): - """ Return number of iterations per epoch """ - return len(self._train_loader) - - def val_iterations_per_epoch(self): - """ Return number of iterations per epoch - validation """ - return len(self._val_loader) - - def download(self): + def download(self) -> dict: """ Make sure data file is downloaded and stored properly """ if not os.path.exists(self.data_path): # Create if it doesn't exist @@ -188,7 +77,7 @@ def download(self): return data_dict -def create(model_config, url, local_dir, sequence_length=64, batch_size=64, train_val_split=0.8): +def create(model_config, url, local_dir, train_val_split=0.8): """ Vel factory function """ if not os.path.isabs(local_dir): local_dir = model_config.project_data_dir(local_dir) @@ -196,7 +85,5 @@ def create(model_config, url, local_dir, sequence_length=64, batch_size=64, trai return TextUrlSource( url, absolute_data_path=local_dir, - sequence_length=sequence_length, - batch_size=batch_size, train_val_split=train_val_split, - ) +) diff --git a/vel/data/text_character_loader.py b/vel/data/text_character_loader.py new file mode 100644 index 00000000..d193bbb8 --- /dev/null +++ b/vel/data/text_character_loader.py @@ -0,0 +1,105 @@ +import numpy as np +import torch + +from vel.api import Source + + +class TextIterator: + """ Iterator over a text dataset """ + def __init__(self, padded_sequence, sequence_length, batch_size, alphabet_size, num_batches): + self.sequence_length = sequence_length + self.batch_size = batch_size + self.num_batches = num_batches + self.alphabet_size = alphabet_size + + self.padded_sequence = padded_sequence[:-1].reshape(self.num_batches * self.batch_size, self.sequence_length) + self.padded_sequence_next = padded_sequence[1:].reshape( + self.num_batches * self.batch_size, self.sequence_length + ) + + self.sequence_indices = np.arange(self.num_batches * self.batch_size) + + np.random.shuffle(self.sequence_indices) + + self.sequence_indices = self.sequence_indices.reshape(self.num_batches, self.batch_size) + + self.batch_idx = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.batch_idx == self.num_batches: + raise StopIteration + else: + input_data = torch.from_numpy(self.padded_sequence[self.sequence_indices[self.batch_idx]]) + target_data = torch.from_numpy(self.padded_sequence_next[self.sequence_indices[self.batch_idx]]) + + self.batch_idx += 1 + + return input_data.to(torch.long), target_data.to(torch.long) + + +class TextLoader: + """ Loader of sequential text data """ + def __init__(self, sequence, sequence_length, batch_size, alphabet_size): + self.sequence = sequence + self.sequence_length = sequence_length + self.batch_size = batch_size + self.alphabet_size = alphabet_size + + # 1 is for the last element as the target needs to be shifted by 1 + residual_length = (len(self.sequence) - self.sequence_length - 1) + full_size = self.sequence_length * self.batch_size + + rest = residual_length % full_size + self.num_batches = residual_length // full_size + + if rest > 0: + self.sequence = np.pad(self.sequence, (0, full_size - rest), mode='constant') + self.num_batches += 1 + + def __iter__(self): + initial_offset = np.random.randint(self.sequence_length) + relevant_subsequence = self.sequence[ + # 1 is for the last element as the target needs to be shifted by 1 + initial_offset:self.num_batches * self.sequence_length * self.batch_size + initial_offset + 1 + ] + + return TextIterator( + relevant_subsequence, self.sequence_length, self.batch_size, + alphabet_size=self.alphabet_size, + num_batches=self.num_batches + ) + + def __len__(self): + """ Number of batches in this loader """ + return self.num_batches + + +class TextCharacterLoader: + """ Loader for the text character data source """ + + def __init__(self, source, sequence_length: int, batch_size: int): + self.source = source + self.sequence_length = sequence_length + self.batch_size = batch_size + self.alphabet = self.source.metadata['alphabet'] + + self._loaders = { + 'train': TextLoader(self.source.train, self.sequence_length, self.batch_size, len(self.alphabet)) + } + + +def create(source: Source, sequence_length: int = 64, batch_size: int = 64): + """ Vel factory function """ + return TextCharacterLoader( + source=source, + sequence_length=sequence_length, + batch_size=batch_size + ) + + + + + diff --git a/vel/train/phase/generic.py b/vel/train/phase/generic.py index f57e9923..db790fed 100644 --- a/vel/train/phase/generic.py +++ b/vel/train/phase/generic.py @@ -1,5 +1,5 @@ from vel.api import TrainingInfo, EpochInfo -from vel.data import Loader +from vel.data import DatasetLoader from vel.train import TrainPhase @@ -18,7 +18,7 @@ def __init__(self, lr, epochs, optimizer_factory): def number_of_epochs(self) -> int: return self.epochs - def set_up_phase(self, training_info, model, loader: Loader): + def set_up_phase(self, training_info, model, loader: DatasetLoader): """ Prepare the phase for learning """ self._optimizer_instance = self.optimizer_factory.instantiate(model) self._loader = loader diff --git a/vel/train/train_phase.py b/vel/train/train_phase.py index 2515d4bb..daeb5733 100644 --- a/vel/train/train_phase.py +++ b/vel/train/train_phase.py @@ -1,7 +1,7 @@ from torch.optim import Optimizer from vel.api import TrainingInfo, EpochInfo, Model -from vel.data import Loader +from vel.data import DatasetLoader from .trainer import Trainer @@ -14,7 +14,7 @@ def number_of_epochs(self) -> int: """ How many epochs does this phase take """ raise NotImplementedError - def set_up_phase(self, training_info: TrainingInfo, model: Model, loader: Loader) -> Optimizer: + def set_up_phase(self, training_info: TrainingInfo, model: Model, loader: DatasetLoader) -> Optimizer: """ Prepare the phase for learning, returns phase optimizer """ pass diff --git a/vel/train/trainer.py b/vel/train/trainer.py index 89284147..9bddd3d5 100644 --- a/vel/train/trainer.py +++ b/vel/train/trainer.py @@ -5,7 +5,7 @@ import typing from vel.api import GradientModel, TrainingInfo, EpochInfo, BatchInfo -from vel.data import Loader +from vel.data import DatasetLoader from vel.util.tensor_util import to_device @@ -45,7 +45,7 @@ def initialize_training(self, training_info: TrainingInfo, model_state=None, hid else: self.model.load_state_dict(model_state) - def run_epoch(self, epoch_info: EpochInfo, loader: Loader): + def run_epoch(self, epoch_info: EpochInfo, loader: DatasetLoader): """ Run full epoch of learning """ epoch_info.on_epoch_begin() @@ -60,7 +60,7 @@ def run_epoch(self, epoch_info: EpochInfo, loader: Loader): epoch_info.on_epoch_end() - def train_epoch(self, epoch_info, loader: Loader, interactive=True): + def train_epoch(self, epoch_info, loader: DatasetLoader, interactive=True): """ Run a single training epoch """ self.train() @@ -79,7 +79,7 @@ def train_epoch(self, epoch_info, loader: Loader, interactive=True): iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss')) - def validation_epoch(self, epoch_info, loader: Loader, interactive=True): + def validation_epoch(self, epoch_info, loader: DatasetLoader, interactive=True): """ Run a single evaluation epoch """ self.eval() From 055c007c661f7909a44d2c83602501ce67c8fc95 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Wed, 26 Jun 2019 13:00:49 -0700 Subject: [PATCH 065/162] Finished fixing shakespeare text generation. --- .../nlp/generation/gen_shakespeare_gru.yaml | 16 +++++--- .../gen_shakespeare_gru_embedding.yaml | 16 +++++--- .../nlp/generation/gen_shakespeare_lstm.yaml | 16 +++++--- .../gen_shakespeare_lstm_embedding.yaml | 16 +++++--- vel/data/text_character_loader.py | 41 ++++++++++++++++--- vel/module/rnn_layer.py | 4 ++ vel/util/tensor_util.py | 2 + 7 files changed, 81 insertions(+), 30 deletions(-) diff --git a/examples-configs/nlp/generation/gen_shakespeare_gru.yaml b/examples-configs/nlp/generation/gen_shakespeare_gru.yaml index 4e54fb31..2ae82918 100644 --- a/examples-configs/nlp/generation/gen_shakespeare_gru.yaml +++ b/examples-configs/nlp/generation/gen_shakespeare_gru.yaml @@ -2,19 +2,23 @@ name: 'gen_shakespeare_gru' source: - name: vel.sources.nlp.text_url + name: vel.data.source.nlp.text_url # Andrej Karpathy built a small (4.4mb) file with combined all works of Shakespeare url: 'https://cs.stanford.edu/people/karpathy/char-rnn/shakespeare_input.txt' local_dir: './rnn_shakespeare' + + +loader: + name: vel.data.text_character_loader sequence_length: 128 batch_size: 64 model: - name: vel.models.rnn.multilayer_rnn_sequence_model + name: vel.model.rnn.multilayer_rnn_sequence_model input_block: - name: vel.modules.input.one_hot_encoding + name: vel.module.input.one_hot_encoding alphabet_size: 68 # Size of the alphabet + 1 hidden_layers: [512, 512, 512] @@ -24,19 +28,19 @@ model: optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 1.0e-3 epsilon: 1.0e-5 commands: train: - name: vel.commands.train_command + name: vel.command.train_command max_grad_norm: 0.5 epochs: 20 generate: - name: vel.commands.rnn.generate_text + name: vel.command.rnn.generate_text start_letter: !param start_letter = 'A' length: !param length = 500 temperature: !param temperature = 0.8 diff --git a/examples-configs/nlp/generation/gen_shakespeare_gru_embedding.yaml b/examples-configs/nlp/generation/gen_shakespeare_gru_embedding.yaml index 9603cb8d..70e1a961 100644 --- a/examples-configs/nlp/generation/gen_shakespeare_gru_embedding.yaml +++ b/examples-configs/nlp/generation/gen_shakespeare_gru_embedding.yaml @@ -2,19 +2,23 @@ name: 'gen_shakespeare_gru_embedding' source: - name: vel.sources.nlp.text_url + name: vel.data.source.nlp.text_url # Andrej Karpathy built a small (4.4mb) file with combined all works of Shakespeare url: 'https://cs.stanford.edu/people/karpathy/char-rnn/shakespeare_input.txt' local_dir: './rnn_shakespeare' + + +loader: + name: vel.data.text_character_loader sequence_length: 128 batch_size: 64 model: - name: vel.models.rnn.multilayer_rnn_sequence_model + name: vel.model.rnn.multilayer_rnn_sequence_model input_block: - name: vel.modules.input.embedding + name: vel.module.input.embedding alphabet_size: 68 # Size of the alphabet + 1 output_dim: 512 # Embedding dimension @@ -25,19 +29,19 @@ model: optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 1.0e-3 epsilon: 1.0e-5 commands: train: - name: vel.commands.train_command + name: vel.command.train_command max_grad_norm: 0.5 epochs: 20 generate: - name: vel.commands.rnn.generate_text + name: vel.command.rnn.generate_text start_letter: !param start_letter = 'A' length: !param length = 500 temperature: !param temperature = 0.8 diff --git a/examples-configs/nlp/generation/gen_shakespeare_lstm.yaml b/examples-configs/nlp/generation/gen_shakespeare_lstm.yaml index 56129c6f..0f06d487 100644 --- a/examples-configs/nlp/generation/gen_shakespeare_lstm.yaml +++ b/examples-configs/nlp/generation/gen_shakespeare_lstm.yaml @@ -2,19 +2,23 @@ name: 'gen_shakespeare_lstm' source: - name: vel.sources.nlp.text_url + name: vel.data.source.nlp.text_url # Andrej Karpathy built a small (4.4mb) file with combined all works of Shakespeare url: 'https://cs.stanford.edu/people/karpathy/char-rnn/shakespeare_input.txt' local_dir: './rnn_shakespeare' + + +loader: + name: vel.data.text_character_loader sequence_length: 128 batch_size: 64 model: - name: vel.models.rnn.multilayer_rnn_sequence_model + name: vel.model.rnn.multilayer_rnn_sequence_model input_block: - name: vel.modules.input.one_hot_encoding + name: vel.module.input.one_hot_encoding alphabet_size: 68 # Size of the alphabet + 1 hidden_layers: [512, 512, 512] @@ -24,19 +28,19 @@ model: optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 1.0e-3 epsilon: 1.0e-5 commands: train: - name: vel.commands.train_command + name: vel.command.train_command max_grad_norm: 0.5 epochs: 20 generate: - name: vel.commands.rnn.generate_text + name: vel.command.rnn.generate_text start_letter: !param start_letter = 'A' length: !param length = 500 temperature: !param temperature = 0.8 diff --git a/examples-configs/nlp/generation/gen_shakespeare_lstm_embedding.yaml b/examples-configs/nlp/generation/gen_shakespeare_lstm_embedding.yaml index 79a91d57..c84af0d0 100644 --- a/examples-configs/nlp/generation/gen_shakespeare_lstm_embedding.yaml +++ b/examples-configs/nlp/generation/gen_shakespeare_lstm_embedding.yaml @@ -2,19 +2,23 @@ name: 'gen_shakespeare_lstm_embedding' source: - name: vel.sources.nlp.text_url + name: vel.data.source.nlp.text_url # Andrej Karpathy built a small (4.4mb) file with combined all works of Shakespeare url: 'https://cs.stanford.edu/people/karpathy/char-rnn/shakespeare_input.txt' local_dir: './rnn_shakespeare' + + +loader: + name: vel.data.text_character_loader sequence_length: 128 batch_size: 64 model: - name: vel.models.rnn.multilayer_rnn_sequence_model + name: vel.model.rnn.multilayer_rnn_sequence_model input_block: - name: vel.modules.input.embedding + name: vel.module.input.embedding alphabet_size: 68 # Size of the alphabet + 1 output_dim: 512 # Embedding dimension @@ -25,19 +29,19 @@ model: optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 1.0e-3 epsilon: 1.0e-5 commands: train: - name: vel.commands.train_command + name: vel.command.train_command max_grad_norm: 0.5 epochs: 20 generate: - name: vel.commands.rnn.generate_text + name: vel.command.rnn.generate_text start_letter: !param start_letter = 'A' length: !param length = 500 temperature: !param temperature = 0.8 diff --git a/vel/data/text_character_loader.py b/vel/data/text_character_loader.py index d193bbb8..92f9d405 100644 --- a/vel/data/text_character_loader.py +++ b/vel/data/text_character_loader.py @@ -37,11 +37,11 @@ def __next__(self): self.batch_idx += 1 - return input_data.to(torch.long), target_data.to(torch.long) + return {'x': input_data.to(torch.long), 'y': target_data.to(torch.long)} class TextLoader: - """ Loader of sequential text data """ + """ Creates iterators over a sequential block of text """ def __init__(self, sequence, sequence_length, batch_size, alphabet_size): self.sequence = sequence self.sequence_length = sequence_length @@ -62,9 +62,9 @@ def __init__(self, sequence, sequence_length, batch_size, alphabet_size): def __iter__(self): initial_offset = np.random.randint(self.sequence_length) relevant_subsequence = self.sequence[ - # 1 is for the last element as the target needs to be shifted by 1 - initial_offset:self.num_batches * self.sequence_length * self.batch_size + initial_offset + 1 - ] + # 1 is for the last element as the target needs to be shifted by 1 + initial_offset:self.num_batches * self.sequence_length * self.batch_size + initial_offset + 1 + ] return TextIterator( relevant_subsequence, self.sequence_length, self.batch_size, @@ -86,10 +86,39 @@ def __init__(self, source, sequence_length: int, batch_size: int): self.batch_size = batch_size self.alphabet = self.source.metadata['alphabet'] + self.train_loader = TextLoader(self.source.train, self.sequence_length, self.batch_size, len(self.alphabet)) + self.val_loader = TextLoader(self.source.validation, self.sequence_length, self.batch_size, len(self.alphabet)) + + if self.source.test is None: + self.test_loader = None + else: + self.test_loader = TextLoader(self.source.test, self.sequence_length, self.batch_size, len(self.alphabet)) + self._loaders = { - 'train': TextLoader(self.source.train, self.sequence_length, self.batch_size, len(self.alphabet)) + 'train': self.train_loader, + 'val': self.val_loader, + 'test': self.test_loader + } + + self._loader_sizes = { + 'train': len(self.train_loader), + 'val': len(self.val_loader), + 'test': 0 if self.test_loader is None else len(self.test_loader) } + def __getitem__(self, item): + return self._loaders[item] + + @property + def loader(self): + """ Get a dict of loaders """ + return self._loaders + + @property + def size(self): + """ Get a dict of sizes of each loader """ + return self._loader_sizes + def create(source: Source, sequence_length: int = 64, batch_size: int = 64): """ Vel factory function """ diff --git a/vel/module/rnn_layer.py b/vel/module/rnn_layer.py index 54d61904..0c5b2c85 100644 --- a/vel/module/rnn_layer.py +++ b/vel/module/rnn_layer.py @@ -57,6 +57,10 @@ def state_dim(self) -> int: else: return self.hidden_size + def zero_state(self, batch_size): + """ State for the model """ + return torch.zeros(batch_size, self.state_dim) + def forward(self, input_data, state=None): if state is None: if self.bidirectional: diff --git a/vel/util/tensor_util.py b/vel/util/tensor_util.py index b0683099..554ce2d2 100644 --- a/vel/util/tensor_util.py +++ b/vel/util/tensor_util.py @@ -28,5 +28,7 @@ def to_device(tensor, device: torch.device): return {k: to_device(v, device) for k, v in tensor.items()} elif isinstance(tensor, list): return [to_device(v, device) for v in tensor] + elif isinstance(tensor, tuple): + return tuple(to_device(v, device) for v in tensor) else: raise NotImplementedError From 634fe77db6322e878c4a9e37fe5ce0b5fce214a6 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 27 Jun 2019 13:23:35 -0700 Subject: [PATCH 066/162] Fixed a few broken imports. --- vel/metric/base/__init__.py | 4 ++++ vel/rl/algo/policy_gradient/a2c.py | 4 ++-- vel/rl/algo/policy_gradient/acer.py | 2 +- vel/rl/algo/policy_gradient/ddpg.py | 2 +- vel/rl/algo/policy_gradient/ppo.py | 6 +++--- vel/rl/algo/policy_gradient/trpo.py | 4 ++-- vel/rl/command/rl_train_command.py | 4 ++-- vel/scheduler/linear_batch_scaler.py | 4 +++- 8 files changed, 18 insertions(+), 12 deletions(-) diff --git a/vel/metric/base/__init__.py b/vel/metric/base/__init__.py index e69de29b..20f27c6d 100644 --- a/vel/metric/base/__init__.py +++ b/vel/metric/base/__init__.py @@ -0,0 +1,4 @@ +from .averaging_metric import AveragingSupervisedMetric, AveragingNamedMetric, AveragingMetric +from .base_metric import BaseMetric, MetricKey +from .summing_metric import SummingMetric, SummingNamedMetric +from .value_metric import ValueMetric \ No newline at end of file diff --git a/vel/rl/algo/policy_gradient/a2c.py b/vel/rl/algo/policy_gradient/a2c.py index 86485184..fdbbbb61 100644 --- a/vel/rl/algo/policy_gradient/a2c.py +++ b/vel/rl/algo/policy_gradient/a2c.py @@ -1,8 +1,8 @@ import torch import torch.nn.functional as F -from vel.metric.averaging_metric import AveragingNamedMetric -from vel.math.function import explained_variance +from vel.metric.base import AveragingNamedMetric +from vel.calc.function import explained_variance from vel.rl.api import OptimizerAlgoBase, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae diff --git a/vel/rl/algo/policy_gradient/acer.py b/vel/rl/algo/policy_gradient/acer.py index 9426957d..6d78a603 100644 --- a/vel/rl/algo/policy_gradient/acer.py +++ b/vel/rl/algo/policy_gradient/acer.py @@ -1,7 +1,7 @@ import torch import torch.nn.functional as F -from vel.metric.averaging_metric import AveragingNamedMetric +from vel.metric.base import AveragingNamedMetric from vel.rl.api import Trajectories, OptimizerAlgoBase diff --git a/vel/rl/algo/policy_gradient/ddpg.py b/vel/rl/algo/policy_gradient/ddpg.py index 2150cab4..1e47b5a0 100644 --- a/vel/rl/algo/policy_gradient/ddpg.py +++ b/vel/rl/algo/policy_gradient/ddpg.py @@ -4,7 +4,7 @@ import torch.nn.functional as F from vel.rl.api import OptimizerAlgoBase -from vel.metric.averaging_metric import AveragingNamedMetric +from vel.metric.base import AveragingNamedMetric class DeepDeterministicPolicyGradient(OptimizerAlgoBase): diff --git a/vel/rl/algo/policy_gradient/ppo.py b/vel/rl/algo/policy_gradient/ppo.py index d835cf6a..3ef76c72 100644 --- a/vel/rl/algo/policy_gradient/ppo.py +++ b/vel/rl/algo/policy_gradient/ppo.py @@ -2,11 +2,11 @@ import numbers -from vel.math.function import explained_variance -from vel.metric.averaging_metric import AveragingNamedMetric +from vel.calc.function import explained_variance +from vel.function.constant import ConstantSchedule +from vel.metric.base import AveragingNamedMetric from vel.rl.api import OptimizerAlgoBase, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae -from vel.schedule.constant import ConstantSchedule class PpoPolicyGradient(OptimizerAlgoBase): diff --git a/vel/rl/algo/policy_gradient/trpo.py b/vel/rl/algo/policy_gradient/trpo.py index 2922e128..f4fa4206 100644 --- a/vel/rl/algo/policy_gradient/trpo.py +++ b/vel/rl/algo/policy_gradient/trpo.py @@ -4,8 +4,8 @@ import torch.nn.functional as F import torch.nn.utils -from vel.metric.averaging_metric import AveragingNamedMetric -from vel.math.function import explained_variance +from vel.calc.function import explained_variance +from vel.metric.base import AveragingNamedMetric from vel.rl.api import AlgoBase, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae diff --git a/vel/rl/command/rl_train_command.py b/vel/rl/command/rl_train_command.py index 0e852826..f0363cbb 100644 --- a/vel/rl/command/rl_train_command.py +++ b/vel/rl/command/rl_train_command.py @@ -18,14 +18,14 @@ def on_initialization(self, training_info: TrainingInfo): training_info['frames'] = 0 - def on_batch_begin(self, batch_info: BatchInfo): + def on_batch_begin(self, batch_info: BatchInfo, dataset: typing.Optional[str] = None): if 'total_frames' in batch_info.training_info: # Track progress during learning batch_info['progress'] = ( float(batch_info.training_info['frames']) / batch_info.training_info['total_frames'] ) - def on_batch_end(self, batch_info: BatchInfo): + def on_batch_end(self, batch_info: BatchInfo, dataset: typing.Optional[str] = None): batch_info.training_info['frames'] += batch_info['frames'] def write_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict): diff --git a/vel/scheduler/linear_batch_scaler.py b/vel/scheduler/linear_batch_scaler.py index 584251f4..203b0980 100644 --- a/vel/scheduler/linear_batch_scaler.py +++ b/vel/scheduler/linear_batch_scaler.py @@ -1,3 +1,5 @@ +import typing + import vel.api as base from vel.api import BatchInfo, TrainingInfo @@ -18,7 +20,7 @@ def write_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict) def load_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict): self.starting_lr = hidden_state_dict['linear_batch_scaler/starting_lr'] - def on_batch_begin(self, batch_info: BatchInfo): + def on_batch_begin(self, batch_info: BatchInfo, dataset: typing.Optional[str] = None): for starting_lr, param_group in zip(self.starting_lr, self.optimizer.param_groups): param_group['lr'] = starting_lr * (1.0 - batch_info['progress']) From 8c7aae17ae53ecb919301586bef09c9a413dc7c7 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 12:49:18 -0700 Subject: [PATCH 067/162] Updated dependency in makefile. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3f2fc3d3..a7e44717 100644 --- a/Makefile +++ b/Makefile @@ -39,7 +39,7 @@ test: partestc: pytest -n 4 . -requirements.txt: +requirements.txt: requirements.in pip-compile requirements.in requpgrade: From eb3029992523dc6495c0d0449630f6e390577b39 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 14:49:11 -0700 Subject: [PATCH 068/162] Updated to PyTorch 1.2 --- requirements.in | 2 +- requirements.txt | 58 ++++++++++++++++++++++++------------------------ 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/requirements.in b/requirements.in index a1fd3edb..6ffb91bd 100644 --- a/requirements.in +++ b/requirements.in @@ -6,7 +6,7 @@ opencv-python pandas pyyaml scikit-learn -torch~=1.1 +torch~=1.2 torchtext torchvision tqdm diff --git a/requirements.txt b/requirements.txt index a0cce232..fc2a4418 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,53 +4,53 @@ # # pip-compile # -atari-py==0.2.0 # via gym +atari-py==0.2.6 # via gym atomicwrites==1.3.0 # via pytest attrs==19.1.0 box2d-py==2.3.8 # via gym -certifi==2019.6.16 # via requests +certifi==2019.9.11 # via requests chardet==3.0.4 # via requests -cloudpickle==1.2.1 +cloudpickle==1.2.2 cycler==0.10.0 # via matplotlib dnspython==1.16.0 future==0.17.1 # via pyglet -gym[atari,box2d,classic_control]==0.13.0 +gym[atari,box2d,classic_control]==0.14.0 idna==2.8 # via requests -importlib-metadata==0.18 # via pluggy, pytest +importlib-metadata==0.22 # via pluggy, pytest joblib==0.13.2 # via scikit-learn kiwisolver==1.1.0 # via matplotlib -matplotlib==3.1.0 -more-itertools==7.0.0 # via pytest -numpy==1.16.4 -opencv-python==4.1.0.25 -packaging==19.0 # via pytest -pandas==0.24.2 -pillow==6.0.0 # via gym, torchvision, visdom -pluggy==0.12.0 # via pytest +matplotlib==3.1.1 +more-itertools==7.2.0 # via pytest, zipp +numpy==1.17.2 +opencv-python==4.1.1.26 +packaging==19.1 # via pytest +pandas==0.25.1 +pillow==6.1.0 # via gym, torchvision, visdom +pluggy==0.13.0 # via pytest py==1.8.0 # via pytest pyglet==1.3.2 # via gym -pymongo==3.8.0 -pyparsing==2.4.0 # via matplotlib, packaging -pytest==4.6.3 +pymongo==3.9.0 +pyparsing==2.4.2 # via matplotlib, packaging +pytest==5.1.2 python-dateutil==2.8.0 # via matplotlib, pandas -pytz==2019.1 # via pandas -pyyaml==5.1.1 -pyzmq==18.0.1 # via visdom +pytz==2019.2 # via pandas +pyyaml==5.1.2 +pyzmq==18.1.0 # via visdom requests==2.22.0 # via torchtext, visdom -scikit-learn==0.21.2 -scipy==1.3.0 # via gym, scikit-learn, visdom -six==1.12.0 # via atari-py, cycler, gym, packaging, pytest, python-dateutil, torchvision, visdom, websocket-client -torch==1.1.0 +scikit-learn==0.21.3 +scipy==1.3.1 # via gym, scikit-learn, visdom +six==1.12.0 # via atari-py, cycler, gym, packaging, python-dateutil, torchtext, torchvision, visdom, websocket-client +torch==1.2.0 torchfile==0.1.0 # via visdom -torchtext==0.3.1 -torchvision==0.3.0 -tornado==6.0.2 # via visdom -tqdm==4.32.2 +torchtext==0.4.0 +torchvision==0.4.0 +tornado==6.0.3 # via visdom +tqdm==4.35.0 urllib3==1.25.3 # via requests visdom==0.1.8.8 wcwidth==0.1.7 # via pytest websocket-client==0.56.0 # via visdom -zipp==0.5.1 # via importlib-metadata +zipp==0.6.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -# setuptools==41.0.1 # via kiwisolver +# setuptools==41.2.0 # via kiwisolver From 6c8cd48d52241ec7b5cb83b1e2740fae6ebc387e Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 14:55:07 -0700 Subject: [PATCH 069/162] Removing parallel pytest. --- Makefile | 3 --- setup.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/Makefile b/Makefile index a7e44717..9725ace3 100644 --- a/Makefile +++ b/Makefile @@ -36,9 +36,6 @@ serve-visdom: test: pytest . -partestc: - pytest -n 4 . - requirements.txt: requirements.in pip-compile requirements.in diff --git a/setup.py b/setup.py index 0fd152f6..cdad2f8d 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ 'text': ['spacy'], 'all': [ 'visdom', 'pymongo', 'dnspython', 'gym[all]', 'pytest', 'spacy', 'ipython', 'jupyter', - 'pip-tools', 'flake8', 'pytest-xdist', 'tb-nightly' + 'pip-tools', 'flake8', 'tb-nightly' ] }, tests_require=[ From 57420cee58a98b476b46aa3f9ad6a3a3c2ec2a80 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 15:03:04 -0700 Subject: [PATCH 070/162] Worked on fixing unit tests after the refactors. --- examples-configs/rl/atari/atari_ppo.yaml | 2 +- examples-configs/rl/atari/atari_ppo_gru.yaml | 2 +- vel/model/autoencoder/mnist_cnn_vae.py | 2 +- vel/rl/algo/distributional_dqn.py | 2 +- vel/rl/algo/dqn.py | 2 +- vel/rl/module/noise/eps_greedy.py | 2 +- vel/rl/module/noise/ou_noise.py | 2 +- vel/rl/test/test_integration.py | 4 ++-- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/examples-configs/rl/atari/atari_ppo.yaml b/examples-configs/rl/atari/atari_ppo.yaml index 4850edba..013f8ef0 100644 --- a/examples-configs/rl/atari/atari_ppo.yaml +++ b/examples-configs/rl/atari/atari_ppo.yaml @@ -39,7 +39,7 @@ reinforcer: max_grad_norm: 0.5 # Gradient clipping parameter cliprange: - name: vel.schedule.linear + name: vel.function.linear initial_value: 0.1 final_value: 0.0 diff --git a/examples-configs/rl/atari/atari_ppo_gru.yaml b/examples-configs/rl/atari/atari_ppo_gru.yaml index 43244de9..27303c92 100644 --- a/examples-configs/rl/atari/atari_ppo_gru.yaml +++ b/examples-configs/rl/atari/atari_ppo_gru.yaml @@ -41,7 +41,7 @@ reinforcer: max_grad_norm: 0.5 # Gradient clipping parameter cliprange: - name: vel.schedule.linear + name: vel.function.linear initial_value: 0.1 final_value: 0.0 diff --git a/vel/model/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py index 258294ff..5559bcfd 100644 --- a/vel/model/autoencoder/mnist_cnn_vae.py +++ b/vel/model/autoencoder/mnist_cnn_vae.py @@ -8,7 +8,7 @@ import vel.util.network as net_util from vel.api import GradientModel, ModelFactory -from vel.metric.averaging_metric import AveragingNamedMetric +from vel.metric import AveragingNamedMetric from vel.metric.loss_metric import Loss from vel.module.layers import Flatten, Reshape diff --git a/vel/rl/algo/distributional_dqn.py b/vel/rl/algo/distributional_dqn.py index 4b05ecf2..95ca440e 100644 --- a/vel/rl/algo/distributional_dqn.py +++ b/vel/rl/algo/distributional_dqn.py @@ -2,7 +2,7 @@ import torch.nn.utils from vel.api import ModelFactory -from vel.metric.averaging_metric import AveragingNamedMetric +from vel.metric import AveragingNamedMetric from vel.rl.api import OptimizerAlgoBase diff --git a/vel/rl/algo/dqn.py b/vel/rl/algo/dqn.py index 1437a062..bd3c355e 100644 --- a/vel/rl/algo/dqn.py +++ b/vel/rl/algo/dqn.py @@ -3,7 +3,7 @@ import torch.nn.utils from vel.api import ModelFactory -from vel.metric.averaging_metric import AveragingNamedMetric +from vel.metric import AveragingNamedMetric from vel.rl.api import OptimizerAlgoBase diff --git a/vel/rl/module/noise/eps_greedy.py b/vel/rl/module/noise/eps_greedy.py index 5764a489..0f3346eb 100644 --- a/vel/rl/module/noise/eps_greedy.py +++ b/vel/rl/module/noise/eps_greedy.py @@ -5,7 +5,7 @@ from vel.api import Schedule from vel.internal.generic_factory import GenericFactory -from vel.schedule.constant import ConstantSchedule +from vel.function.constant import ConstantSchedule class EpsGreedy(nn.Module): diff --git a/vel/rl/module/noise/ou_noise.py b/vel/rl/module/noise/ou_noise.py index a87f9786..d54a67e9 100644 --- a/vel/rl/module/noise/ou_noise.py +++ b/vel/rl/module/noise/ou_noise.py @@ -2,7 +2,7 @@ import numpy as np import torch.nn as nn -from vel.math.process import OrnsteinUhlenbeckNoiseProcess +from vel.calc.process import OrnsteinUhlenbeckNoiseProcess from vel.internal.generic_factory import GenericFactory diff --git a/vel/rl/test/test_integration.py b/vel/rl/test/test_integration.py index 912debf1..ed73a972 100644 --- a/vel/rl/test/test_integration.py +++ b/vel/rl/test/test_integration.py @@ -12,8 +12,8 @@ from vel.rl.metrics import EpisodeRewardMetric from vel.rl.module.noise.eps_greedy import EpsGreedy from vel.rl.module.noise.ou_noise import OuNoise -from vel.schedule.linear import LinearSchedule -from vel.schedule.linear_and_constant import LinearAndConstantSchedule +from vel.function.linear import LinearSchedule +from vel.function.linear_and_constant import LinearAndConstantSchedule from vel.util.random import set_seed from vel.rl.env.classic_atari import ClassicAtariEnv From e072337e8cd1fb8f01d6a313c36654540a94900a Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 22:48:33 -0700 Subject: [PATCH 071/162] Added more publications on optimizers to bibliography. --- docs/Bibliography.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/Bibliography.md b/docs/Bibliography.md index 7e7303ea..808ff7e6 100644 --- a/docs/Bibliography.md +++ b/docs/Bibliography.md @@ -16,10 +16,26 @@ on this library: - (2012) **Lecture 6.5 -- RmsProp: Divide the gradient by a running average of its recent magnitude** Geoff Hinton http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf + +- (Dec 2012) **ADADELTA: An Adaptive Learning Rate Method** + Matthew D. Zeiler + https://arxiv.org/abs/1212.5701 + +- (Dec 2014) **Adam: A Method for Stochastic Optimization** + Diederik P. Kingma, Jimmy Ba + https://arxiv.org/abs/1412.6980 - (Jun 2015) **Cyclical Learning Rates for Training Neural Networks** Leslie N. Smith https://arxiv.org/abs/1506.01186 + +- (Jul 2019) **Lookahead Optimizer: k steps forward, 1 step back** + Michael R. Zhang, James Lucas, Geoffrey Hinton, Jimmy Ba + https://arxiv.org/abs/1907.08610 + +- (Aug 2019) **On the Variance of the Adaptive Learning Rate and Beyond** + Liu, Liyuan and Jiang, Haoming and He, Pengcheng and Chen, Weizhu and Liu, Xiaodong and Gao, Jianfeng and Han, Jiawei + https://arxiv.org/abs/1908.03265 ### Residual Networks From 54bd258c54cad2725f5d3d04240f8932d3788c28 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 22:48:44 -0700 Subject: [PATCH 072/162] New optimizers: RAdam + Ranger --- vel/optimizer/radam.py | 133 ++++++++++++++++++++++++ vel/optimizer/ranger.py | 218 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 351 insertions(+) create mode 100644 vel/optimizer/radam.py create mode 100644 vel/optimizer/ranger.py diff --git a/vel/optimizer/radam.py b/vel/optimizer/radam.py new file mode 100644 index 00000000..607b11e9 --- /dev/null +++ b/vel/optimizer/radam.py @@ -0,0 +1,133 @@ +""" +RAdam implementation from: https://github.com/LiyuanLucasLiu/RAdam/blob/master/cifar_imagenet/utils/radam.py +""" +import math +import collections +import torch.optim + +import vel.util.module_util as mu + +from vel.api import OptimizerFactory, Model + + +class RAdam(torch.optim.Optimizer): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + self.buffer = [[None, None, None] for ind in range(10)] + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdam, self).__setstate__(state) + + def step(self, closure=None): + + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_data_fp32 = p.data.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_data_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + state['step'] += 1 + buffered = self.buffer[int(state['step'] % 10)] + if state['step'] == buffered[0]: + N_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + N_sma_max = 2 / (1 - beta2) - 1 + N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = N_sma + + # more conservative since it's an approximated value + if N_sma >= 5: + step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = group['lr'] / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) + + # more conservative since it's an approximated value + if N_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_data_fp32.addcdiv_(-step_size, exp_avg, denom) + else: + p_data_fp32.add_(-step_size, exp_avg) + + p.data.copy_(p_data_fp32) + + return loss + + +class RAdamFactory(OptimizerFactory): + """ RAdam optimizer factory """ + + def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, layer_groups=False): + self.lr = lr + self.betas = betas + self.eps = eps + self.weight_decay = weight_decay + self.layer_groups = layer_groups + + def instantiate(self, model: Model) -> RAdam: + if self.layer_groups: + parameters = mu.to_parameter_groups(model.get_layer_groups()) + + if isinstance(self.lr, collections.Sequence): + for idx, lr in enumerate(self.lr): + parameters[idx]['lr'] = lr + + default_lr = self.lr[0] + else: + default_lr = float(self.lr) + + if isinstance(self.weight_decay, collections.Sequence): + for idx, weight_decay in enumerate(self.weight_decay): + parameters[idx]['weight_decay'] = weight_decay + + default_weight_decay = self.weight_decay[0] + else: + default_weight_decay = self.weight_decay + + return RAdam( + parameters, + lr=default_lr, betas=self.betas, eps=self.eps, weight_decay=default_weight_decay, + ) + else: + parameters = filter(lambda p: p.requires_grad, model.parameters()) + + return RAdam( + parameters, + lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay, + ) + + +def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, layer_groups=False): + """ Vel factory function """ + return RAdamFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, layer_groups=layer_groups) diff --git a/vel/optimizer/ranger.py b/vel/optimizer/ranger.py new file mode 100644 index 00000000..3ab7f896 --- /dev/null +++ b/vel/optimizer/ranger.py @@ -0,0 +1,218 @@ +#Ranger deep learning optimizer - RAdam + Lookahead combined. +#https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer + +#Ranger has now been used to capture 12 records on the FastAI leaderboard. + +#This version = 9.3.19 + +#Credits: +#RAdam --> https://github.com/LiyuanLucasLiu/RAdam +#Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code. +#Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610 + +#summary of changes: +#full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights), +#supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues. +#changes 8/31/19 - fix references to *self*.N_sma_threshold; + #changed eps to 1e-5 as better default than 1e-8. + +import math +import torch +import collections + +from torch.optim.optimizer import Optimizer + + +import vel.util.module_util as mu + +from vel.api import OptimizerFactory, Model + + +class Ranger(Optimizer): + + def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95,0.999), eps=1e-5, weight_decay=0): + #parameter checks + if not 0.0 <= alpha <= 1.0: + raise ValueError(f'Invalid slow update rate: {alpha}') + if not 1 <= k: + raise ValueError(f'Invalid lookahead steps: {k}') + if not lr > 0: + raise ValueError(f'Invalid Learning Rate: {lr}') + if not eps > 0: + raise ValueError(f'Invalid eps: {eps}') + + #parameter comments: + # beta1 (momentum) of .95 seems to work better than .90... + #N_sma_threshold of 5 seems better in testing than 4. + #In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you. + + #prep defaults and init torch.optim base + defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay) + super().__init__(params,defaults) + + #adjustable threshold + self.N_sma_threshhold = N_sma_threshhold + + #now we can get to work... + #removed as we now use step from RAdam...no need for duplicate step counting + #for group in self.param_groups: + # group["step_counter"] = 0 + #print("group step counter init") + + #look ahead params + self.alpha = alpha + self.k = k + + #radam buffer for state + self.radam_buffer = [[None,None,None] for ind in range(10)] + + #self.first_run_check=0 + + #lookahead weights + #9/2/19 - lookahead param tensors have been moved to state storage. + #This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs. + + #self.slow_weights = [[p.clone().detach() for p in group['params']] + # for group in self.param_groups] + + #don't use grad for lookahead weights + #for w in it.chain(*self.slow_weights): + # w.requires_grad = False + + def __setstate__(self, state): + print("set state called") + super(Ranger, self).__setstate__(state) + + + def step(self, closure=None): + loss = None + #note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure. + #Uncomment if you need to use the actual closure... + + #if closure is not None: + #loss = closure() + + #Evaluate averages and grad, update param tensors + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data.float() + if grad.is_sparse: + raise RuntimeError('Ranger optimizer does not support sparse gradients') + + p_data_fp32 = p.data.float() + + state = self.state[p] #get state dict for this param + + if len(state) == 0: #if first time to run...init dictionary with our desired entries + #if self.first_run_check==0: + #self.first_run_check=1 + #print("Initializing slow buffer...should not see this at load from saved model!") + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_data_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) + + #look ahead weight storage now in state dict + state['slow_buffer'] = torch.empty_like(p.data) + state['slow_buffer'].copy_(p.data) + + else: + state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) + + #begin computations + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + #compute variance mov avg + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + #compute mean moving avg + exp_avg.mul_(beta1).add_(1 - beta1, grad) + + state['step'] += 1 + + + buffered = self.radam_buffer[int(state['step'] % 10)] + if state['step'] == buffered[0]: + N_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + N_sma_max = 2 / (1 - beta2) - 1 + N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = N_sma + if N_sma > self.N_sma_threshhold: + step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = 1.0 / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) + + if N_sma > self.N_sma_threshhold: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom) + else: + p_data_fp32.add_(-step_size * group['lr'], exp_avg) + + p.data.copy_(p_data_fp32) + + #integrated look ahead... + #we do it at the param level instead of group level + if state['step'] % group['k'] == 0: + slow_p = state['slow_buffer'] #get access to slow param tensor + slow_p.add_(self.alpha, p.data - slow_p) #(fast weights - slow weights) * alpha + p.data.copy_(slow_p) #copy interpolated weights to RAdam param tensor + + return loss + + +class RangerFactory(OptimizerFactory): + """ RAdam optimizer factory """ + + def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, layer_groups=False): + self.lr = lr + self.betas = betas + self.eps = eps + self.weight_decay = weight_decay + self.layer_groups = layer_groups + + def instantiate(self, model: Model) -> Ranger: + if self.layer_groups: + parameters = mu.to_parameter_groups(model.get_layer_groups()) + + if isinstance(self.lr, collections.Sequence): + for idx, lr in enumerate(self.lr): + parameters[idx]['lr'] = lr + + default_lr = self.lr[0] + else: + default_lr = float(self.lr) + + if isinstance(self.weight_decay, collections.Sequence): + for idx, weight_decay in enumerate(self.weight_decay): + parameters[idx]['weight_decay'] = weight_decay + + default_weight_decay = self.weight_decay[0] + else: + default_weight_decay = self.weight_decay + + return Ranger( + parameters, + lr=default_lr, betas=self.betas, eps=self.eps, weight_decay=default_weight_decay, + ) + else: + parameters = filter(lambda p: p.requires_grad, model.parameters()) + + return Ranger( + parameters, + lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay, + ) + + +def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, layer_groups=False): + """ Vel factory function """ + return RangerFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, layer_groups=layer_groups) From a075293ce99dca656b3088e69250694c8b979a99 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 22:49:16 -0700 Subject: [PATCH 073/162] API to transform only a single coordinate. --- vel/api/transformation.py | 11 +++++++++++ vel/data/dataflow.py | 7 +++++++ 2 files changed, 18 insertions(+) diff --git a/vel/api/transformation.py b/vel/api/transformation.py index 2bf960bf..42f1989c 100644 --- a/vel/api/transformation.py +++ b/vel/api/transformation.py @@ -18,6 +18,10 @@ def denormalize(self, datapoint): """ Operation reverse to normalization """ return datapoint + def denormalize_item(self, datapoint_item, coordinate): + """ Denormalize only a single item of the datapoint """ + return datapoint_item + class ScopedTransformation(Transformation): """ Transformation applied only to certain keys of the datapoint """ @@ -52,3 +56,10 @@ def denormalize(self, datapoint): datapoint[name] = self.denormalization_transform(datapoint[name]) return datapoint + + def denormalize_item(self, datapoint_item, coordinate): + """ Denormalize only a single item of the datapoint """ + if coordinate in self.scope: + return self.denormalization_transform(datapoint_item) + else: + return datapoint_item diff --git a/vel/data/dataflow.py b/vel/data/dataflow.py index b0731729..8d52ab0e 100644 --- a/vel/data/dataflow.py +++ b/vel/data/dataflow.py @@ -60,6 +60,13 @@ def denormalize(self, datapoint): return datapoint + def denormalize_item(self, datapoint_item, coordinate): + """ Perform a reverse normalization of a single item (for viewing) """ + for t in self.transformations[::-1]: + datapoint_item = t.denormalize_item(datapoint_item, coordinate) + + return datapoint_item + def __len__(self): """ Length of the dataset """ return len(self.dataset) From abf7cc72e7b10f3bb6e2e5d92bed3ef00b1d6e7e Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 22:50:14 -0700 Subject: [PATCH 074/162] Iterating on the MNIST VAE. --- .../autoencoder/mnist/mnist_cnn_vae.yaml | 14 +- .../autoencoder/mnist/mnist_fc_vae.yaml | 35 +++++ vel/model/autoencoder/mnist_cnn_vae.py | 2 - vel/model/autoencoder/mnist_fc_vae.py | 143 ++++++++++++++++++ 4 files changed, 185 insertions(+), 9 deletions(-) create mode 100644 examples-configs/autoencoder/mnist/mnist_fc_vae.yaml create mode 100644 vel/model/autoencoder/mnist_fc_vae.py diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml index 043edd22..6dc6525f 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml @@ -1,4 +1,4 @@ -name: 'mnist_cnn_autoenoder' +name: 'mnist_cnn_vae' model: @@ -6,8 +6,8 @@ model: img_rows: 28 img_cols: 28 img_channels: 1 - channels: [8, 16, 16] - representation_length: 16 + channels: [64, 128, 256] + representation_length: 20 source: @@ -15,7 +15,7 @@ source: loader: name: vel.data.dataset_loader - batch_size: 128 + batch_size: 256 num_workers: 4 transformations: @@ -24,12 +24,12 @@ loader: optimizer: - name: vel.optimizer.adam - lr: 1.0e-3 + name: vel.optimizer.radam + lr: 1.0e-4 commands: train: name: vel.command.train_command - epochs: 12 + epochs: 200 diff --git a/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml b/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml new file mode 100644 index 00000000..a6422905 --- /dev/null +++ b/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml @@ -0,0 +1,35 @@ +name: 'mnist_fc_vae' + + +model: + name: vel.model.autoencoder.mnist_fc_vae + img_rows: 28 + img_cols: 28 + img_channels: 1 + layers: [512, 256] + representation_length: 16 + + +source: + name: vel.data.source.vision.mnist + +loader: + name: vel.data.dataset_loader + batch_size: 256 + num_workers: 4 + + transformations: + - name: vel.data.transformation.image_to_tensor + - name: vel.data.transformation.unsupervised + + +optimizer: + name: vel.optimizer.radam + lr: 1.0e-3 + + +commands: + train: + name: vel.command.train_command + epochs: 100 + diff --git a/vel/model/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py index 5559bcfd..b9222c76 100644 --- a/vel/model/autoencoder/mnist_cnn_vae.py +++ b/vel/model/autoencoder/mnist_cnn_vae.py @@ -21,8 +21,6 @@ class MnistCnnVAE(GradientModel): def __init__(self, img_rows, img_cols, img_channels, channels=None, representation_length=32): super(MnistCnnVAE, self).__init__() - assert representation_length % 2 == 0, "Representation length must be even" - if channels is None: channels = [16, 32, 32] diff --git a/vel/model/autoencoder/mnist_fc_vae.py b/vel/model/autoencoder/mnist_fc_vae.py new file mode 100644 index 00000000..fc1c327c --- /dev/null +++ b/vel/model/autoencoder/mnist_fc_vae.py @@ -0,0 +1,143 @@ +import itertools as it + +import torch +import torch.nn as nn +import torch.nn.init as init +import torch.nn.functional as F + +from vel.api import GradientModel, ModelFactory +from vel.metric import AveragingNamedMetric +from vel.metric.loss_metric import Loss +from vel.module.layers import Flatten, Reshape + + +class MnistCnnVAE(GradientModel): + """ + A simple MNIST variational autoencoder, containing 3 convolutional layers. + """ + + def __init__(self, img_rows, img_cols, img_channels, layers=None, representation_length=32): + super(MnistCnnVAE, self).__init__() + + if layers is None: + layers = [512, 256] + + self.representation_length = representation_length + + # self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) + # self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) + self.layers = layers + + input_length = img_rows * img_cols * img_channels + + self.encoder = nn.Sequential( + Flatten(), + nn.Linear(in_features=input_length, out_features=self.layers[0]), + nn.ReLU(True), + nn.Linear(in_features=self.layers[0], out_features=self.layers[1]), + nn.ReLU(True), + nn.Linear(self.layers[1], representation_length * 2) + ) + + self.decoder = nn.Sequential( + nn.Linear(representation_length, self.layers[1]), + nn.ReLU(True), + nn.Linear(self.layers[1], self.layers[0]), + nn.ReLU(True), + nn.Linear(self.layers[0], input_length), + Reshape(img_channels, img_rows, img_cols), + nn.Sigmoid() + ) + + @staticmethod + def _weight_initializer(tensor): + init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) + init.constant_(tensor.bias, 0.0) + + def reset_weights(self): + for m in it.chain(self.encoder, self.decoder): + if isinstance(m, nn.Conv2d): + self._weight_initializer(m) + elif isinstance(m, nn.ConvTranspose2d): + self._weight_initializer(m) + elif isinstance(m, nn.Linear): + self._weight_initializer(m) + + def encode(self, sample): + encoding = self.encoder(sample) + + mu = encoding[:, :self.representation_length] + # I encode std directly as a softplus, rather than exp(logstd) + std = F.softplus(encoding[:, self.representation_length:]) + + return mu + torch.randn_like(std) * std + + def decode(self, sample): + return self.decoder(sample) + + def forward(self, sample): + encoding = self.encoder(sample) + + mu = encoding[:, :self.representation_length] + # I encode std directly as a softplus, rather than exp(logstd) + std = F.softplus(encoding[:, self.representation_length:]) + + z = mu + torch.randn_like(std) * std + + decoded = self.decoder(z) + + return { + 'decoded': decoded, + 'encoding': z, + 'mu': mu, + 'std': std + } + + def calculate_gradient(self, data): + """ Calculate a gradient of loss function """ + output = self(data['x']) + + y_pred = output['decoded'] + + mu = output['mu'] + std = output['std'] + var = std ** 2 + + kl_divergence = - 0.5 * (1 + torch.log(var) - mu ** 2 - var).sum(dim=1) + kl_divergence = kl_divergence.mean() + + # reconstruction = 0.5 * F.mse_loss(y_pred, y_true) + + # We must sum over all image axis and average only on minibatch axis + reconstruction = F.binary_cross_entropy(y_pred, data['y'], reduce=False).sum(1).sum(1).sum(1).mean() + loss = reconstruction + kl_divergence + + if self.training: + loss.backward() + + return { + 'loss': loss.item(), + 'reconstruction': reconstruction.item(), + 'kl_divergence': kl_divergence.item() + } + + def metrics(self): + """ Set of metrics for this model """ + return [ + Loss(), + AveragingNamedMetric('reconstruction', scope="train"), + AveragingNamedMetric('kl_divergence', scope="train") + ] + + +def create(img_rows, img_cols, img_channels, layers=None, representation_length=32): + """ Vel factory function """ + if layers is None: + layers = [512, 256] + + def instantiate(**_): + return MnistCnnVAE( + img_rows, img_cols, img_channels, layers=layers, representation_length=representation_length + ) + + return ModelFactory.generic(instantiate) From 6b46de800d1943dc42fce55564c6e16427cacf1e Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 22:51:23 -0700 Subject: [PATCH 075/162] Cleaned up README a bit. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5b39c7a7..f115666e 100644 --- a/README.md +++ b/README.md @@ -118,7 +118,7 @@ that are ready to run and easy to modify for other similar usecases: # Implemented models - Unsupervised learning -- Autoencoders and Variational autoencoders with examples on MNIST dataset. +- AutoEncoders (AE) and Variational AutoEncoders (VAE) with examples on MNIST dataset. # Examples From 4fa9ca453bb402c6a69449f2bc0a380bd1e67e4d Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 23:01:20 -0700 Subject: [PATCH 076/162] Fixed a warning in VAE code. --- vel/model/autoencoder/mnist_cnn_vae.py | 2 +- vel/model/autoencoder/mnist_fc_vae.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vel/model/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py index b9222c76..a9da7e37 100644 --- a/vel/model/autoencoder/mnist_cnn_vae.py +++ b/vel/model/autoencoder/mnist_cnn_vae.py @@ -122,7 +122,7 @@ def calculate_gradient(self, data): # reconstruction = 0.5 * F.mse_loss(y_pred, y_true) # We must sum over all image axis and average only on minibatch axis - reconstruction = F.binary_cross_entropy(y_pred, data['y'], reduce=False).sum(1).sum(1).sum(1).mean() + reconstruction = F.binary_cross_entropy(y_pred, data['y'], reduction='none').sum(1).sum(1).sum(1).mean() loss = reconstruction + kl_divergence if self.training: diff --git a/vel/model/autoencoder/mnist_fc_vae.py b/vel/model/autoencoder/mnist_fc_vae.py index fc1c327c..e79f114b 100644 --- a/vel/model/autoencoder/mnist_fc_vae.py +++ b/vel/model/autoencoder/mnist_fc_vae.py @@ -109,7 +109,7 @@ def calculate_gradient(self, data): # reconstruction = 0.5 * F.mse_loss(y_pred, y_true) # We must sum over all image axis and average only on minibatch axis - reconstruction = F.binary_cross_entropy(y_pred, data['y'], reduce=False).sum(1).sum(1).sum(1).mean() + reconstruction = F.binary_cross_entropy(y_pred, data['y'], reduction='none').sum(1).sum(1).sum(1).mean() loss = reconstruction + kl_divergence if self.training: From b579864e8cd9ded14f94e0ca76abb2de6e909d9b Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 12 Sep 2019 23:01:30 -0700 Subject: [PATCH 077/162] Implement "convert warnings to errors" option. --- vel/launcher.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vel/launcher.py b/vel/launcher.py index 3f800638..7dfa94eb 100644 --- a/vel/launcher.py +++ b/vel/launcher.py @@ -17,6 +17,7 @@ def main(): parser.add_argument('-r', '--run_number', type=int, default=0, help="A run number") parser.add_argument('-d', '--device', default='cuda', help="A device to run the model on") parser.add_argument('-s', '--seed', type=int, default=None, help="Random seed for the project") + parser.add_argument('--werr', action='store_true', default=False, help="Convert warnings to errors") parser.add_argument( '-p', '--param', type=str, metavar='NAME=VALUE', action='append', default=[], help="Configuration parameters" @@ -30,6 +31,11 @@ def main(): args = parser.parse_args() + if args.werr: + import warnings + warnings.filterwarnings('error', module='vel.*') + warnings.filterwarnings('error', module='torch\\..*') + model_config = ModelConfig.from_file( args.config, args.run_number, continue_training=getattr(args, 'continue'), device=args.device, seed=args.seed, params={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)} From feb2efba557c4cc1e201f7408ff1b82126ba1ce6 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 14 Sep 2019 20:33:24 -0700 Subject: [PATCH 078/162] Expanding bibliography. --- docs/Bibliography.md | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/Bibliography.md b/docs/Bibliography.md index 808ff7e6..8ed79213 100644 --- a/docs/Bibliography.md +++ b/docs/Bibliography.md @@ -4,13 +4,18 @@ Below, I present a most likely incomplete list of works I referred to when I was on this library: -### Autoencoders +### Latent variable models - (Dec 2013) **Auto-Encoding Variational Bayes** Diederik P Kingma, Max Welling https://arxiv.org/abs/1312.6114 +- (Sep 2015) **Importance Weighted Autoencoders** + Yuri Burda, Roger Grosse, Ruslan Salakhutdinov + https://arxiv.org/abs/1509.00519 + + ### Learning rate and optimization - (2012) **Lecture 6.5 -- RmsProp: Divide the gradient by a running average of its recent magnitude** @@ -143,11 +148,13 @@ on this library: ### Open source repositories This repository contains various parts of functionality derived from open source code -in the following repositories: +in the following repositories (in alphabetical order): +- https://github.com/Kaixhin/Rainbow +- https://github.com/Khrylx/PyTorch-RL +- https://github.com/LiyuanLucasLiu/RAdam - https://github.com/fastai/fastai -- https://github.com/pytorch/pytorch +- https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer - https://github.com/openai/baselines -- https://github.com/tensorflow/tensorflow -- https://github.com/Kaixhin/Rainbow -- https://github.com/Khrylx/PyTorch-RL \ No newline at end of file +- https://github.com/pytorch/pytorch +- https://github.com/tensorflow/tensorflow \ No newline at end of file From cc53ae525e5ed0ee75ec83ef12bda70e0666a379 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 14 Sep 2019 22:17:35 -0700 Subject: [PATCH 079/162] Added a few extra interactive options to dataflow. --- vel/data/dataflow.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/vel/data/dataflow.py b/vel/data/dataflow.py index 8d52ab0e..de7fb4ab 100644 --- a/vel/data/dataflow.py +++ b/vel/data/dataflow.py @@ -1,5 +1,7 @@ -import typing +import collections +import torch import torch.utils.data as data +import typing from vel.api import Source, Transformation @@ -44,6 +46,32 @@ def get_raw(self, index): """ Get raw data point """ return pre_map(self.dataset[index]) + def get_batch(self, batch_idx, batch_size): + """ + Simple method to get a batch of data, mainly for interactive purposes. + For training, a DataLoader should be used. + """ + + start_idx = batch_idx * batch_size + end_idx = min(start_idx + batch_size, len(self)) + + buffer = collections.defaultdict(list) + + for i in range(start_idx, end_idx): + datapoint = self[i] + + for k, v in datapoint.items(): + buffer[k].append(v) + + return { + k: torch.stack(v, dim=0) for k, v in buffer.items() + } + + def num_batches(self, batch_size): + """ Number of batches of given batch size """ + length = len(self) + return (length + (batch_size - 1)) // batch_size + def __getitem__(self, index): """ Get data point from the dataset """ datapoint = self.get_raw(index) From 67d9d4b1569283dc4b19447509dbca1f0ffb5964 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 14 Sep 2019 22:18:03 -0700 Subject: [PATCH 080/162] Proper NLL estimation using importance sampling. --- vel/model/autoencoder/mnist_cnn_vae.py | 82 +++++++++++++++++++++----- 1 file changed, 68 insertions(+), 14 deletions(-) diff --git a/vel/model/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py index a9da7e37..9cfa95a8 100644 --- a/vel/model/autoencoder/mnist_cnn_vae.py +++ b/vel/model/autoencoder/mnist_cnn_vae.py @@ -76,27 +76,28 @@ def reset_weights(self): elif isinstance(m, nn.Linear): self._weight_initializer(m) - def encode(self, sample): + def encoder_distribution(self, sample): encoding = self.encoder(sample) - mu = encoding[:, :self.representation_length] # I encode std directly as a softplus, rather than exp(logstd) std = F.softplus(encoding[:, self.representation_length:]) + return mu, std + + def encode(self, sample): + mu, std = self.encoder_distribution(sample) + # Sample z return mu + torch.randn_like(std) * std def decode(self, sample): + # We don't sample here, because decoder is so weak it doesn't make sense return self.decoder(sample) def forward(self, sample): - encoding = self.encoder(sample) - - mu = encoding[:, :self.representation_length] - # I encode std directly as a softplus, rather than exp(logstd) - std = F.softplus(encoding[:, self.representation_length:]) + mu, std = self.encoder_distribution(sample) + # Sample z z = mu + torch.randn_like(std) * std - decoded = self.decoder(z) return { @@ -110,30 +111,83 @@ def calculate_gradient(self, data): """ Calculate a gradient of loss function """ output = self(data['x']) + # ELBO is E_q log p(x, z) / q(z | x) + # Which can be expressed in many equivalent forms: + # (1) E_q log p(x | z) + log p(z) - log q(z | x) + # (2) E_q log p(x | z) - D_KL(p(z) || q(z | x)) + # (3) E_q log p(x) - D_KL(p(z | x) || q(z | x)Biblio) + + # Form 3 is interesting from a theoretical standpoint, but is intractable to compute directly + # While forms (1) and (2) can be computed directly. + # Positive aspect of form (2) is that KL divergence can be calculated analytically + # further reducing the variance of the gradient + y_pred = output['decoded'] mu = output['mu'] std = output['std'] var = std ** 2 + # Analytical solution of KL divergence kl_divergence = - 0.5 * (1 + torch.log(var) - mu ** 2 - var).sum(dim=1) kl_divergence = kl_divergence.mean() - # reconstruction = 0.5 * F.mse_loss(y_pred, y_true) + # Diag-gaussian likelihood + # likelihood = 0.5 * F.mse_loss(y_pred, y_true) # We must sum over all image axis and average only on minibatch axis - reconstruction = F.binary_cross_entropy(y_pred, data['y'], reduction='none').sum(1).sum(1).sum(1).mean() - loss = reconstruction + kl_divergence + # Log prob p(x | z) in the case where the output distribution is Bernoulli(p) + likelihood = F.binary_cross_entropy(y_pred, data['y'], reduction='none').sum((1, 2, 3)).mean() + + elbo = likelihood + kl_divergence + + nll = self.nll(data['x'], num_posterior_samples=5) if self.training: - loss.backward() + elbo.backward() return { - 'loss': loss.item(), - 'reconstruction': reconstruction.item(), + 'loss': elbo.item(), + 'nll': nll.mean().item(), + 'reconstruction': likelihood.item(), 'kl_divergence': kl_divergence.item() } + def logmeanexp(self, inputs, dim=1): + if inputs.size(dim) == 1: + return inputs + else: + input_max = inputs.max(dim, keepdim=True)[0] + return (inputs - input_max).exp().mean(dim).log() + input_max.squeeze(dim=dim) + + @torch.no_grad() + def nll(self, data_sample, num_posterior_samples: int = 1): + """ + Upper bound on negative log-likelihood of supplied data. + If num samples goes to infinity, the nll of data should + approach true value + """ + assert num_posterior_samples >= 1, "Need at least one posterior sample" + + buffer = [] + + mu, std = self.encoder_distribution(data_sample) + var = std ** 2 + + kl_divergence = - 0.5 * (1 + torch.log(var) - mu ** 2 - var).sum(dim=1) + + for i in range(num_posterior_samples): + z = mu + torch.randn_like(std) * std + y_pred = self.decoder(z) + + likelihood = F.binary_cross_entropy(y_pred, data_sample, reduction='none').sum((1, 2, 3)) + elbo = likelihood + kl_divergence + + buffer.append(-elbo) + + averaged = self.logmeanexp(torch.stack(buffer, dim=-1), dim=-1) + return -averaged + def metrics(self): """ Set of metrics for this model """ return [ From b7f66673ce0d164d75ad8f072495654957a7fe56 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 19 Sep 2019 15:17:54 -0700 Subject: [PATCH 081/162] Minor dependency update. --- requirements.txt | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index fc2a4418..9629fe23 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,14 +16,16 @@ dnspython==1.16.0 future==0.17.1 # via pyglet gym[atari,box2d,classic_control]==0.14.0 idna==2.8 # via requests -importlib-metadata==0.22 # via pluggy, pytest +importlib-metadata==0.23 # via pluggy, pytest joblib==0.13.2 # via scikit-learn +jsonpatch==1.24 # via visdom +jsonpointer==2.0 # via jsonpatch kiwisolver==1.1.0 # via matplotlib matplotlib==3.1.1 more-itertools==7.2.0 # via pytest, zipp numpy==1.17.2 opencv-python==4.1.1.26 -packaging==19.1 # via pytest +packaging==19.2 # via pytest pandas==0.25.1 pillow==6.1.0 # via gym, torchvision, visdom pluggy==0.13.0 # via pytest @@ -45,9 +47,9 @@ torchfile==0.1.0 # via visdom torchtext==0.4.0 torchvision==0.4.0 tornado==6.0.3 # via visdom -tqdm==4.35.0 -urllib3==1.25.3 # via requests -visdom==0.1.8.8 +tqdm==4.36.1 +urllib3==1.25.4 # via requests +visdom==0.1.8.9 wcwidth==0.1.7 # via pytest websocket-client==0.56.0 # via visdom zipp==0.6.0 # via importlib-metadata From 53628e470bcd7f5730b1be8f50dea2d6fb4b27ba Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 19 Sep 2019 20:49:21 -0700 Subject: [PATCH 082/162] Implemented fully connected MNIST VAE with results matching the IWAE paper. --- .../autoencoder/mnist/mnist_fc_vae.yaml | 59 ++++++- vel/data/augmentation/scale_min_size.py | 2 +- vel/data/dataset_loader.py | 10 +- vel/data/transformation/binarize_image.py | 16 ++ vel/data/transformation/pil_resize.py | 18 ++ vel/model/autoencoder/fc_vae.py | 106 +++++++++++ vel/model/autoencoder/mnist_fc_vae.py | 143 --------------- vel/model/autoencoder/vae_base.py | 164 ++++++++++++++++++ vel/scheduler/multi_step.py | 52 +++--- vel/storage/backend/mongodb.py | 2 +- 10 files changed, 397 insertions(+), 175 deletions(-) create mode 100644 vel/data/transformation/binarize_image.py create mode 100644 vel/data/transformation/pil_resize.py create mode 100644 vel/model/autoencoder/fc_vae.py delete mode 100644 vel/model/autoencoder/mnist_fc_vae.py create mode 100644 vel/model/autoencoder/vae_base.py diff --git a/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml b/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml index a6422905..647094f4 100644 --- a/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml @@ -2,12 +2,14 @@ name: 'mnist_fc_vae' model: - name: vel.model.autoencoder.mnist_fc_vae + name: vel.model.autoencoder.fc_vae img_rows: 28 img_cols: 28 img_channels: 1 - layers: [512, 256] - representation_length: 16 + layers: [200, 200] + representation_length: 50 + max_grad_norm: 1.0 + analytical_kl_div: true source: @@ -15,10 +17,13 @@ source: loader: name: vel.data.dataset_loader - batch_size: 256 + batch_size: 128 num_workers: 4 + pin_memory: true transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.binarize_image - name: vel.data.transformation.image_to_tensor - name: vel.data.transformation.unsupervised @@ -26,10 +31,54 @@ loader: optimizer: name: vel.optimizer.radam lr: 1.0e-3 + eps: 1.0e-4 + + +scheduler: + name: vel.scheduler.multi_step + gamma: 0.71968 # 10 * (-1/7) + milestones: [ 1, 4, 13, 40, 121, 364, 1093, 3280] commands: train: name: vel.command.train_command - epochs: 100 + epochs: 3280 +# train: +# name: vel.command.phase_train_command +# phases: +# - name: vel.train.phase.cycle +# max_lr: 0.001 +# min_lr: 0.000 +# interpolate: 'cosine' +# cycles: 20 +# cycle_len: 1 +# - name: vel.train.phase.cycle +# max_lr: 0.001 +# min_lr: 0.0000 +# interpolate: 'cosine' +# cycles: 7 +# cycle_len: 1 +# cycle_mult: 2 +# - name: vel.train.phase.cycle +# max_lr: 0.0001 +# min_lr: 0.0000 +# interpolate: 'cosine' +# cycles: 8 +# cycle_len: 1 +# cycle_mult: 2 +# - name: vel.train.phase.cycle +# max_lr: 0.00003 +# min_lr: 0.0000 +# interpolate: 'cosine' +# cycles: 9 +# cycle_len: 1 +# cycle_mult: 2 +# - name: vel.train.phase.cycle +# max_lr: 0.00001 +# min_lr: 0.0000 +# interpolate: 'cosine' +# cycles: 10 +# cycle_len: 1 +# cycle_mult: 2 diff --git a/vel/data/augmentation/scale_min_size.py b/vel/data/augmentation/scale_min_size.py index d6ecd06b..88554a09 100644 --- a/vel/data/augmentation/scale_min_size.py +++ b/vel/data/augmentation/scale_min_size.py @@ -15,7 +15,7 @@ def __init__(self, size, scope='x', tags=None): self.size = size def transform(self, x_data): - return op.scale_min(x_data, self.size, Image.BILINEAR) + return op.scale_min(x_data, self.size) def create(size, scope='x', tags=None): diff --git a/vel/data/dataset_loader.py b/vel/data/dataset_loader.py index 8228cf52..59be7841 100644 --- a/vel/data/dataset_loader.py +++ b/vel/data/dataset_loader.py @@ -10,11 +10,12 @@ class DatasetLoader: """ Loads data from a data source to serve it to the model """ def __init__(self, source: Source, batch_size: int, num_workers: int, - transformations: typing.Optional[list] = None): + transformations: typing.Optional[list] = None, pin_memory=False): self.source = source self.batch_size = batch_size self.num_workers = num_workers self.transformations = transformations + self.pin_memory = pin_memory if transformations is not None: self.transformed_source = DataFlow.transform(self.source, transformations) @@ -23,11 +24,12 @@ def __init__(self, source: Source, batch_size: int, num_workers: int, self.train_loader = data.DataLoader( self.transformed_source.train, batch_size=batch_size, shuffle=True, num_workers=num_workers, - drop_last=True + pin_memory=pin_memory, drop_last=True ) self.val_loader = data.DataLoader( self.transformed_source.validation, batch_size=batch_size, shuffle=False, num_workers=num_workers, + pin_memory=pin_memory ) if self.transformed_source.test is not None: @@ -63,11 +65,13 @@ def size(self): return self._loader_sizes -def create(source: Source, batch_size: int, num_workers: int = 0, transformations: typing.Optional[list] = None): +def create(source: Source, batch_size: int, num_workers: int = 0, transformations: typing.Optional[list] = None, + pin_memory=False): """ Vel factory function """ return DatasetLoader( source=source, batch_size=batch_size, + pin_memory=pin_memory, num_workers=num_workers, transformations=transformations ) diff --git a/vel/data/transformation/binarize_image.py b/vel/data/transformation/binarize_image.py new file mode 100644 index 00000000..38c13610 --- /dev/null +++ b/vel/data/transformation/binarize_image.py @@ -0,0 +1,16 @@ +import numpy as np + +import vel.api as api + + +class BinarizeImage(api.ScopedTransformation): + """ Convert [0,1] image into a binary {0, 1} representation """ + + def transform(self, x_data): + # Sample image from a Bernoulli distribution + return np.random.binomial(1, x_data).astype(np.float32) + + +def create(scope='x', tags=None): + """ Vel factory function """ + return BinarizeImage(scope, tags) diff --git a/vel/data/transformation/pil_resize.py b/vel/data/transformation/pil_resize.py new file mode 100644 index 00000000..326a34b2 --- /dev/null +++ b/vel/data/transformation/pil_resize.py @@ -0,0 +1,18 @@ +import PIL.Image as Image + +import vel.api as api + + +class PilResize(api.ScopedTransformation): + """ Scale the PIL image """ + def __init__(self, shape, scope='x', tags=None): + super().__init__(scope, tags) + self.shape = shape + + def transform(self, x_data): + return x_data.resize(self.shape, Image.LANCZOS) + + +def create(shape, scope='x', tags=None): + """ Vel factory function """ + return PilResize(shape, scope, tags) diff --git a/vel/model/autoencoder/fc_vae.py b/vel/model/autoencoder/fc_vae.py new file mode 100644 index 00000000..49e91fbd --- /dev/null +++ b/vel/model/autoencoder/fc_vae.py @@ -0,0 +1,106 @@ +import torch +import torch.distributions as dist +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.init as init + +from vel.api import ModelFactory +from vel.module.layers import Flatten, Reshape + +from vel.model.autoencoder.vae_base import VaeBase + + +class FcVae(VaeBase): + """ + A simple variational autoencoder, containing 2 fully connected layers. + """ + + def __init__(self, img_rows, img_cols, img_channels, layers=None, representation_length=32, + analytical_kl_div=False, max_grad_norm=None): + super().__init__(analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + + if layers is None: + layers = [512, 256] + + self.representation_length = representation_length + self.layers = layers + + input_length = img_rows * img_cols * img_channels + + self.encoder = nn.Sequential( + Flatten(), + nn.Linear(in_features=input_length, out_features=self.layers[0]), + nn.Tanh(), + nn.Linear(in_features=self.layers[0], out_features=self.layers[1]), + nn.Tanh(), + nn.Linear(self.layers[1], representation_length * 2) + ) + + self.decoder = nn.Sequential( + nn.Linear(representation_length, self.layers[1]), + nn.Tanh(), + nn.Linear(in_features=self.layers[1], out_features=self.layers[0]), + nn.Tanh(), + nn.Linear(in_features=self.layers[0], out_features=input_length), + Reshape(img_channels, img_rows, img_cols), + nn.Sigmoid() + ) + + self.register_buffer('prior_mean', torch.tensor([[0.0]])) + self.register_buffer('prior_std', torch.tensor([[1.0]])) + + def encoder_network(self, sample: torch.Tensor) -> torch.Tensor: + """ Transform input sample into an encoded representation """ + return self.encoder(sample) + + def encoder_distribution(self, encoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the encoder distribution (approximate posterior) """ + mu = encoded[:, :self.representation_length] + std = F.softplus(encoded[:, self.representation_length:]) + + return dist.Independent(dist.Normal(mu, std), 1) + + def decoder_network(self, z: torch.Tensor) -> torch.Tensor: + """ Transform encoded value into a decoded representation """ + return self.decoder(z) + + def decoder_distribution(self, decoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the decoder distribution (likelihood) """ + return dist.Independent(dist.Bernoulli(probs=decoded), 3) + + def prior_distribution(self) -> dist.Distribution: + """ Return a prior distribution object """ + return dist.Independent(dist.Normal(self.prior_mean, self.prior_std), 1) + + def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: + """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ + return self.decoder_network(decoded) + +# @staticmethod +# def _weight_initializer(tensor): +# init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('tanh')) +# init.constant_(tensor.bias, 0.01) +# +# def reset_weights(self): +# for m in it.chain(self.encoder, self.decoder): +# if isinstance(m, nn.Conv2d): +# self._weight_initializer(m) +# elif isinstance(m, nn.ConvTranspose2d): +# self._weight_initializer(m) +# elif isinstance(m, nn.Linear): +# self._weight_initializer(m) + + +def create(img_rows, img_cols, img_channels, layers=None, representation_length=32, max_grad_norm=None, + analytical_kl_div=True): + """ Vel factory function """ + if layers is None: + layers = [512, 256] + + def instantiate(**_): + return FcVae( + img_rows, img_cols, img_channels, layers=layers, representation_length=representation_length, + max_grad_norm=max_grad_norm, analytical_kl_div=analytical_kl_div + ) + + return ModelFactory.generic(instantiate) diff --git a/vel/model/autoencoder/mnist_fc_vae.py b/vel/model/autoencoder/mnist_fc_vae.py deleted file mode 100644 index e79f114b..00000000 --- a/vel/model/autoencoder/mnist_fc_vae.py +++ /dev/null @@ -1,143 +0,0 @@ -import itertools as it - -import torch -import torch.nn as nn -import torch.nn.init as init -import torch.nn.functional as F - -from vel.api import GradientModel, ModelFactory -from vel.metric import AveragingNamedMetric -from vel.metric.loss_metric import Loss -from vel.module.layers import Flatten, Reshape - - -class MnistCnnVAE(GradientModel): - """ - A simple MNIST variational autoencoder, containing 3 convolutional layers. - """ - - def __init__(self, img_rows, img_cols, img_channels, layers=None, representation_length=32): - super(MnistCnnVAE, self).__init__() - - if layers is None: - layers = [512, 256] - - self.representation_length = representation_length - - # self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) - # self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) - self.layers = layers - - input_length = img_rows * img_cols * img_channels - - self.encoder = nn.Sequential( - Flatten(), - nn.Linear(in_features=input_length, out_features=self.layers[0]), - nn.ReLU(True), - nn.Linear(in_features=self.layers[0], out_features=self.layers[1]), - nn.ReLU(True), - nn.Linear(self.layers[1], representation_length * 2) - ) - - self.decoder = nn.Sequential( - nn.Linear(representation_length, self.layers[1]), - nn.ReLU(True), - nn.Linear(self.layers[1], self.layers[0]), - nn.ReLU(True), - nn.Linear(self.layers[0], input_length), - Reshape(img_channels, img_rows, img_cols), - nn.Sigmoid() - ) - - @staticmethod - def _weight_initializer(tensor): - init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) - init.constant_(tensor.bias, 0.0) - - def reset_weights(self): - for m in it.chain(self.encoder, self.decoder): - if isinstance(m, nn.Conv2d): - self._weight_initializer(m) - elif isinstance(m, nn.ConvTranspose2d): - self._weight_initializer(m) - elif isinstance(m, nn.Linear): - self._weight_initializer(m) - - def encode(self, sample): - encoding = self.encoder(sample) - - mu = encoding[:, :self.representation_length] - # I encode std directly as a softplus, rather than exp(logstd) - std = F.softplus(encoding[:, self.representation_length:]) - - return mu + torch.randn_like(std) * std - - def decode(self, sample): - return self.decoder(sample) - - def forward(self, sample): - encoding = self.encoder(sample) - - mu = encoding[:, :self.representation_length] - # I encode std directly as a softplus, rather than exp(logstd) - std = F.softplus(encoding[:, self.representation_length:]) - - z = mu + torch.randn_like(std) * std - - decoded = self.decoder(z) - - return { - 'decoded': decoded, - 'encoding': z, - 'mu': mu, - 'std': std - } - - def calculate_gradient(self, data): - """ Calculate a gradient of loss function """ - output = self(data['x']) - - y_pred = output['decoded'] - - mu = output['mu'] - std = output['std'] - var = std ** 2 - - kl_divergence = - 0.5 * (1 + torch.log(var) - mu ** 2 - var).sum(dim=1) - kl_divergence = kl_divergence.mean() - - # reconstruction = 0.5 * F.mse_loss(y_pred, y_true) - - # We must sum over all image axis and average only on minibatch axis - reconstruction = F.binary_cross_entropy(y_pred, data['y'], reduction='none').sum(1).sum(1).sum(1).mean() - loss = reconstruction + kl_divergence - - if self.training: - loss.backward() - - return { - 'loss': loss.item(), - 'reconstruction': reconstruction.item(), - 'kl_divergence': kl_divergence.item() - } - - def metrics(self): - """ Set of metrics for this model """ - return [ - Loss(), - AveragingNamedMetric('reconstruction', scope="train"), - AveragingNamedMetric('kl_divergence', scope="train") - ] - - -def create(img_rows, img_cols, img_channels, layers=None, representation_length=32): - """ Vel factory function """ - if layers is None: - layers = [512, 256] - - def instantiate(**_): - return MnistCnnVAE( - img_rows, img_cols, img_channels, layers=layers, representation_length=representation_length - ) - - return ModelFactory.generic(instantiate) diff --git a/vel/model/autoencoder/vae_base.py b/vel/model/autoencoder/vae_base.py new file mode 100644 index 00000000..0ff0de27 --- /dev/null +++ b/vel/model/autoencoder/vae_base.py @@ -0,0 +1,164 @@ +import torch +import torch.distributions as dist + +from vel.api import GradientModel +from vel.metric import AveragingNamedMetric +from vel.metric.loss_metric import Loss + + +class VaeBase(GradientModel): + """ Base module for variational auto-encoder implementations """ + def __init__(self, analytical_kl_div=True, max_grad_norm=1.0): + super().__init__() + + self.analytical_kl_div = analytical_kl_div + self.max_grad_norm = max_grad_norm + + #################################################################################################################### + # Interface methods + def encoder_network(self, sample: torch.Tensor) -> torch.Tensor: + """ Transform input sample into an encoded representation """ + raise NotImplementedError + + def encoder_distribution(self, encoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the encoder distribution (approximate posterior) """ + raise NotImplementedError + + def decoder_network(self, z: torch.Tensor) -> torch.Tensor: + """ Transform encoded value into a decoded representation """ + raise NotImplementedError + + def decoder_distribution(self, decoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the decoder distribution (likelihood) """ + raise NotImplementedError + + def prior_distribution(self) -> dist.Distribution: + """ Return a prior distribution object """ + raise NotImplementedError + + #################################################################################################################### + # Other useful methods + def encode(self, sample: torch.Tensor) -> torch.Tensor: + """ Encode incoming data into a latent representation """ + encoded = self.encoder_network(sample) + return self.encoder_rsample(encoded) + + def encoder_rsample(self, encoded: torch.Tensor) -> torch.Tensor: + """ Sample with "reparametrization trick" encoder sample """ + return self.encoder_distribution(encoded).rsample() + + def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: + """ Sample from a decoder distribution """ + return self.decoder_distribution(decoded).sample() + + def forward(self, sample: torch.Tensor) -> torch.Tensor: + """ Simple forward pass through the module """ + encoded = self.encoder_network(sample) + z = self.encoder_rsample(encoded) + decoded = self.decoder_sample(z) + return decoded + + def calculate_gradient(self, data: dict) -> dict: + """ Calculate model gradient for given data sample """ + encoded = self.encoder_network(data['x']) + z_dist = self.encoder_distribution(encoded) + z = z_dist.rsample() + + decoded = self.decoder_network(z) + x_dist = self.decoder_distribution(decoded) + prior = self.prior_distribution() + + kl_divergence = self.kl_divergence(z, z_dist, prior).mean() + reconstruction = x_dist.log_prob(data['y']).mean() + + elbo = reconstruction - kl_divergence + + loss = -elbo + + if self.training: + loss.backward() + + if self.max_grad_norm is not None: + grad_norm = torch.nn.utils.clip_grad_norm_( + filter(lambda p: p.requires_grad, self.parameters()), + max_norm=self.max_grad_norm + ) + else: + grad_norm = 0.0 + else: + grad_norm = 0.0 + + return { + 'loss': loss.item(), + + 'grad_norm': grad_norm, + 'reconstruction': -reconstruction.item(), + 'kl_divergence': kl_divergence.item() + } + + def kl_divergence(self, z, encoder_distribution, prior) -> torch.Tensor: + """ Calculate the kl divergence between q(z|x) and p(z) """ + if self.analytical_kl_div: + kl_divergence = dist.kl_divergence(encoder_distribution, prior) + else: + lpz = prior.log_prob(z) + lqzx = encoder_distribution.log_prob(z) + kl_divergence = -lpz + lqzx + + return kl_divergence + + def metrics(self): + """ Set of metrics for this model """ + return [ + Loss(), + AveragingNamedMetric('reconstruction', scope="train"), + AveragingNamedMetric('kl_divergence', scope="train"), + AveragingNamedMetric('grad_norm', scope="train") + ] + + @torch.no_grad() + def nll(self, sample: torch.Tensor, num_posterior_samples: int = 1): + """ + Upper bound on negative log-likelihood of supplied data. + If num samples goes to infinity, the nll of data should + approach true value + """ + assert num_posterior_samples >= 1, "Need at least one posterior sample" + + buffer = [] + + encoded = self.encoder_network(sample) + z_dist = self.encoder_distribution(encoded) + prior = self.prior_distribution() + + if self.analytical_kl_div: + kl_divergence = dist.kl_divergence(z_dist, prior) + + for i in range(num_posterior_samples): + z = z_dist.rsample() + decoded = self.decoder_network(z) + x_dist = self.decoder_distribution(decoded) + + if not self.analytical_kl_div: + lpz = prior.log_prob(z) + lqzx = z_dist.log_prob(z) + kl_divergence = -lpz + lqzx + + likelihood = x_dist.log_prob(sample) + elbo = likelihood - kl_divergence + + buffer.append(elbo) + + averaged = self.log_mean_exp(torch.stack(buffer, dim=-1), dim=-1) + + return -averaged + + #################################################################################################################### + # Utility methods + def log_mean_exp(self, inputs, dim=1): + """ Perform log(mean(exp(data))) in a numerically stable way """ + if inputs.size(dim) == 1: + return inputs + else: + input_max = inputs.max(dim, keepdim=True)[0] + return (inputs - input_max).exp().mean(dim).log() + input_max.squeeze(dim=dim) diff --git a/vel/scheduler/multi_step.py b/vel/scheduler/multi_step.py index eeeb735d..6ab8110b 100644 --- a/vel/scheduler/multi_step.py +++ b/vel/scheduler/multi_step.py @@ -1,22 +1,30 @@ -# import torch.optim.lr_scheduler as scheduler - -# class MultiStepScheduler: -# def __init__(self, optimizer, milestones, gamma, last_epoch): -# self.scheduler = scheduler.MultiStepLR(optimizer, milestones, gamma, last_epoch=last_epoch) -# -# def get_lr(self): -# return self.scheduler.get_lr() -# -# def pre_epoch_step(self, epoch_idx): -# self.scheduler.step() -# -# def post_epoch_step(self, epoch_idx, metrics): -# pass -# -# -# def create(milestones, gamma=0.1): -# """ Create a multi-step scheduler """ -# def scheduler_fn(optimizer, last_epoch=-1): -# return MultiStepScheduler(optimizer, milestones, gamma, last_epoch=last_epoch) -# -# return scheduler_fn +import torch.optim.lr_scheduler as scheduler + +from vel.api import Callback, SchedulerFactory, EpochInfo + + +class MultiStepScheduler(Callback): + """ Scheduler multiplying the learning rate given number after given number of epochs """ + def __init__(self, optimizer, milestones, gamma, last_epoch): + self.scheduler = scheduler.MultiStepLR(optimizer, milestones, gamma, last_epoch=last_epoch) + + def get_lr(self): + return self.scheduler.get_lr() + + def on_epoch_end(self, epoch_info: EpochInfo) -> None: + self.scheduler.step(epoch=epoch_info.global_epoch_idx) + + +class MultiStepSchedulerFactory(SchedulerFactory): + """ Factory class for ladder scheduler """ + def __init__(self, milestones, gamma): + self.milestones = milestones + self.gamma = gamma + + def instantiate(self, optimizer, last_epoch=-1) -> MultiStepScheduler: + return MultiStepScheduler(optimizer, self.milestones, self.gamma, last_epoch) + + +def create(milestones, gamma=0.1): + """ Create a multi-step scheduler """ + return MultiStepSchedulerFactory(milestones, gamma) diff --git a/vel/storage/backend/mongodb.py b/vel/storage/backend/mongodb.py index ff663737..77b8f6da 100644 --- a/vel/storage/backend/mongodb.py +++ b/vel/storage/backend/mongodb.py @@ -34,7 +34,7 @@ def get_frame(self): return pd.DataFrame(metric_items).drop(['_id', 'model_name'], axis=1).set_index('epoch_idx') def store(self, metrics): - augmented_metrics = metrics.copy() + augmented_metrics = {'{}/{}/{}'.format(k.scope, k.dataset, k.name): v for k, v in metrics.items()} model_name = self.model_config.name run_name = self.model_config.run_name From 00178dd8a987d7db62de2eda444d2d9519f1d227 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Fri, 20 Sep 2019 10:34:18 -0700 Subject: [PATCH 083/162] Implemented CNN-VAE for MNIST. --- .../autoencoder/mnist/mnist_cnn_ae.yaml | 2 +- .../autoencoder/mnist/mnist_cnn_vae.yaml | 40 +++- .../autoencoder/mnist/mnist_fc_vae.yaml | 40 +--- vel/model/autoencoder/cnn_vae.py | 158 +++++++++++++ vel/model/autoencoder/mnist_cnn_vae.py | 210 ------------------ vel/model/autoencoder/vae_base.py | 10 + 6 files changed, 205 insertions(+), 255 deletions(-) create mode 100644 vel/model/autoencoder/cnn_vae.py delete mode 100644 vel/model/autoencoder/mnist_cnn_vae.py diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml index d4ce36c7..2591cc04 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml @@ -2,7 +2,7 @@ name: 'mnist_cnn_ae' model: - name: vel.model.autoencoder.mnist_cnn_autoencoder + name: vel.model.autoencoder.cnn_autoencoder img_rows: 28 img_cols: 28 img_channels: 1 diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml index 6dc6525f..8994602b 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml @@ -2,34 +2,64 @@ name: 'mnist_cnn_vae' model: - name: vel.model.autoencoder.mnist_cnn_vae + name: vel.model.autoencoder.cnn_vae img_rows: 28 img_cols: 28 img_channels: 1 channels: [64, 128, 256] - representation_length: 20 + representation_length: 50 + max_grad_norm: 1.0 + analytical_kl_div: true source: name: vel.data.source.vision.mnist + loader: name: vel.data.dataset_loader batch_size: 256 num_workers: 4 + pin_memory: true transformations: + - name: vel.data.transformation.to_array +# - name: vel.data.augmentation.random_scale +# tags: train +# size: 28 +# max_zoom: 1.1 +# - name: vel.data.augmentation.random_rotate +# tags: train +# deg: 15.0 +# - name: vel.data.augmentation.random_crop +# tags: train +# width: 28 +# height: 28 +# padding: 3 +# padding_mode: 'constant' + - name: vel.data.transformation.binarize_image - name: vel.data.transformation.image_to_tensor - name: vel.data.transformation.unsupervised optimizer: name: vel.optimizer.radam - lr: 1.0e-4 + lr: 1.0e-3 + eps: 1.0e-4 + + +scheduler: + name: vel.scheduler.multi_step + gamma: 0.71968 # 10 * (-1/7) + milestones: [ 1, 4, 13, 40, 121, 364, 1093, 3280] commands: + augvis: + name: vel.command.augvis_command + samples: 10 + cases: 5 + train: name: vel.command.train_command - epochs: 200 - + epochs: 3280 diff --git a/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml b/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml index 647094f4..ed0977ae 100644 --- a/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml @@ -43,42 +43,4 @@ scheduler: commands: train: name: vel.command.train_command - epochs: 3280 - -# train: -# name: vel.command.phase_train_command -# phases: -# - name: vel.train.phase.cycle -# max_lr: 0.001 -# min_lr: 0.000 -# interpolate: 'cosine' -# cycles: 20 -# cycle_len: 1 -# - name: vel.train.phase.cycle -# max_lr: 0.001 -# min_lr: 0.0000 -# interpolate: 'cosine' -# cycles: 7 -# cycle_len: 1 -# cycle_mult: 2 -# - name: vel.train.phase.cycle -# max_lr: 0.0001 -# min_lr: 0.0000 -# interpolate: 'cosine' -# cycles: 8 -# cycle_len: 1 -# cycle_mult: 2 -# - name: vel.train.phase.cycle -# max_lr: 0.00003 -# min_lr: 0.0000 -# interpolate: 'cosine' -# cycles: 9 -# cycle_len: 1 -# cycle_mult: 2 -# - name: vel.train.phase.cycle -# max_lr: 0.00001 -# min_lr: 0.0000 -# interpolate: 'cosine' -# cycles: 10 -# cycle_len: 1 -# cycle_mult: 2 + epochs: 3280 \ No newline at end of file diff --git a/vel/model/autoencoder/cnn_vae.py b/vel/model/autoencoder/cnn_vae.py new file mode 100644 index 00000000..da57eacd --- /dev/null +++ b/vel/model/autoencoder/cnn_vae.py @@ -0,0 +1,158 @@ +import itertools as it + +import torch +import torch.nn as nn +import torch.nn.init as init +import torch.nn.functional as F +import torch.distributions as dist + +import vel.util.network as net_util + +from vel.api import ModelFactory +from vel.module.layers import Flatten, Reshape +from vel.model.autoencoder.vae_base import VaeBase + + +class CnnVAE(VaeBase): + """ + A simple variational autoencoder, containing 3 convolutional layers. + """ + + def __init__(self, img_rows, img_cols, img_channels, channels=None, representation_length=32, + analytical_kl_div=True, max_grad_norm=0.5): + super().__init__(analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + + if channels is None: + channels = [16, 32, 32] + + layer_series = [ + (3, 1, 1), + (3, 1, 2), + (3, 1, 2), + ] + + self.representation_length = representation_length + + self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) + self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) + self.channels = channels + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels=img_channels, out_channels=channels[0], kernel_size=(3, 3), padding=1), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[0], + net_util.convolutional_layer_series(img_rows, layer_series[:1]), + net_util.convolutional_layer_series(img_cols, layer_series[:1]), + ]), + nn.Conv2d(in_channels=channels[0], out_channels=channels[1], kernel_size=(3, 3), stride=2, padding=1), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[1], + net_util.convolutional_layer_series(img_rows, layer_series[:2]), + net_util.convolutional_layer_series(img_cols, layer_series[:2]), + ]), + nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=(3, 3), stride=2, padding=1), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[2], + net_util.convolutional_layer_series(img_rows, layer_series), + net_util.convolutional_layer_series(img_cols, layer_series), + ]), + Flatten(), + nn.Linear(self.final_width * self.final_height * channels[2], representation_length * 2) + ) + + self.decoder = nn.Sequential( + nn.Linear(representation_length, self.final_width * self.final_height * channels[2]), + # nn.ReLU(True), + nn.SELU(True), + Reshape(channels[2], self.final_width, self.final_height), + nn.LayerNorm([ + channels[2], + net_util.convolutional_layer_series(img_rows, layer_series), + net_util.convolutional_layer_series(img_cols, layer_series), + ]), + nn.ConvTranspose2d( + in_channels=channels[2], out_channels=channels[1], kernel_size=3, stride=2, padding=1, output_padding=1 + ), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[1], + net_util.convolutional_layer_series(img_rows, layer_series[:2]), + net_util.convolutional_layer_series(img_cols, layer_series[:2]), + ]), + nn.ConvTranspose2d( + in_channels=channels[1], out_channels=channels[0], kernel_size=3, stride=2, padding=1, output_padding=1 + ), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[0], + net_util.convolutional_layer_series(img_rows, layer_series[:1]), + net_util.convolutional_layer_series(img_cols, layer_series[:1]), + ]), + nn.ConvTranspose2d(in_channels=channels[0], out_channels=img_channels, kernel_size=3, padding=1), + nn.Sigmoid() + ) + + self.register_buffer('prior_mean', torch.tensor([[0.0]])) + self.register_buffer('prior_std', torch.tensor([[1.0]])) + + @staticmethod + def _weight_initializer(tensor): + init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) + init.constant_(tensor.bias, 0.0) + + def reset_weights(self): + for m in it.chain(self.encoder, self.decoder): + if isinstance(m, nn.Conv2d): + self._weight_initializer(m) + elif isinstance(m, nn.ConvTranspose2d): + self._weight_initializer(m) + elif isinstance(m, nn.Linear): + self._weight_initializer(m) + + def encoder_network(self, sample: torch.Tensor) -> torch.Tensor: + """ Transform input sample into an encoded representation """ + return self.encoder(sample) + + def encoder_distribution(self, encoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the encoder distribution (approximate posterior) """ + mu = encoded[:, :self.representation_length] + std = F.softplus(encoded[:, self.representation_length:]) + + return dist.Independent(dist.Normal(mu, std), 1) + + def decoder_network(self, z: torch.Tensor) -> torch.Tensor: + """ Transform encoded value into a decoded representation """ + return self.decoder(z) + + def decoder_distribution(self, decoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the decoder distribution (likelihood) """ + return dist.Independent(dist.Bernoulli(probs=decoded), 3) + + def prior_distribution(self) -> dist.Distribution: + """ Return a prior distribution object """ + return dist.Independent(dist.Normal(self.prior_mean, self.prior_std), 1) + + def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: + """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ + return self.decoder_network(decoded) + + +def create(img_rows, img_cols, img_channels, channels=None, representation_length=32): + """ Vel factory function """ + if channels is None: + channels = [16, 32, 32] + + def instantiate(**_): + return CnnVAE( + img_rows, img_cols, img_channels, channels=channels, representation_length=representation_length + ) + + return ModelFactory.generic(instantiate) diff --git a/vel/model/autoencoder/mnist_cnn_vae.py b/vel/model/autoencoder/mnist_cnn_vae.py deleted file mode 100644 index 9cfa95a8..00000000 --- a/vel/model/autoencoder/mnist_cnn_vae.py +++ /dev/null @@ -1,210 +0,0 @@ -import itertools as it - -import torch -import torch.nn as nn -import torch.nn.init as init -import torch.nn.functional as F - -import vel.util.network as net_util - -from vel.api import GradientModel, ModelFactory -from vel.metric import AveragingNamedMetric -from vel.metric.loss_metric import Loss -from vel.module.layers import Flatten, Reshape - - -class MnistCnnVAE(GradientModel): - """ - A simple MNIST variational autoencoder, containing 3 convolutional layers. - """ - - def __init__(self, img_rows, img_cols, img_channels, channels=None, representation_length=32): - super(MnistCnnVAE, self).__init__() - - if channels is None: - channels = [16, 32, 32] - - layer_series = [ - (3, 1, 1), - (3, 1, 2), - (3, 1, 2), - ] - - self.representation_length = representation_length - - self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) - self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) - self.channels = channels - - self.encoder = nn.Sequential( - nn.Conv2d(in_channels=img_channels, out_channels=channels[0], kernel_size=(3, 3), padding=1), - nn.ReLU(True), - nn.Conv2d(in_channels=channels[0], out_channels=channels[1], kernel_size=(3, 3), stride=2, padding=1), - nn.ReLU(True), - nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=(3, 3), stride=2, padding=1), - Flatten(), - nn.Linear(self.final_width * self.final_height * channels[2], representation_length * 2) - ) - - self.decoder = nn.Sequential( - nn.Linear(representation_length, self.final_width * self.final_height * channels[2]), - nn.ReLU(True), - Reshape(channels[2], self.final_width, self.final_height), - nn.ConvTranspose2d( - in_channels=channels[2], out_channels=channels[1], kernel_size=3, stride=2, padding=1, output_padding=1 - ), - nn.ReLU(True), - nn.ConvTranspose2d( - in_channels=channels[1], out_channels=channels[0], kernel_size=3, stride=2, padding=1, output_padding=1 - ), - nn.ReLU(True), - nn.ConvTranspose2d(in_channels=channels[0], out_channels=img_channels, kernel_size=3, padding=1), - nn.Sigmoid() - ) - - @staticmethod - def _weight_initializer(tensor): - init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) - init.constant_(tensor.bias, 0.0) - - def reset_weights(self): - for m in it.chain(self.encoder, self.decoder): - if isinstance(m, nn.Conv2d): - self._weight_initializer(m) - elif isinstance(m, nn.ConvTranspose2d): - self._weight_initializer(m) - elif isinstance(m, nn.Linear): - self._weight_initializer(m) - - def encoder_distribution(self, sample): - encoding = self.encoder(sample) - mu = encoding[:, :self.representation_length] - # I encode std directly as a softplus, rather than exp(logstd) - std = F.softplus(encoding[:, self.representation_length:]) - - return mu, std - - def encode(self, sample): - mu, std = self.encoder_distribution(sample) - # Sample z - return mu + torch.randn_like(std) * std - - def decode(self, sample): - # We don't sample here, because decoder is so weak it doesn't make sense - return self.decoder(sample) - - def forward(self, sample): - mu, std = self.encoder_distribution(sample) - - # Sample z - z = mu + torch.randn_like(std) * std - decoded = self.decoder(z) - - return { - 'decoded': decoded, - 'encoding': z, - 'mu': mu, - 'std': std - } - - def calculate_gradient(self, data): - """ Calculate a gradient of loss function """ - output = self(data['x']) - - # ELBO is E_q log p(x, z) / q(z | x) - # Which can be expressed in many equivalent forms: - # (1) E_q log p(x | z) + log p(z) - log q(z | x) - # (2) E_q log p(x | z) - D_KL(p(z) || q(z | x)) - # (3) E_q log p(x) - D_KL(p(z | x) || q(z | x)Biblio) - - # Form 3 is interesting from a theoretical standpoint, but is intractable to compute directly - # While forms (1) and (2) can be computed directly. - # Positive aspect of form (2) is that KL divergence can be calculated analytically - # further reducing the variance of the gradient - - y_pred = output['decoded'] - - mu = output['mu'] - std = output['std'] - var = std ** 2 - - # Analytical solution of KL divergence - kl_divergence = - 0.5 * (1 + torch.log(var) - mu ** 2 - var).sum(dim=1) - kl_divergence = kl_divergence.mean() - - # Diag-gaussian likelihood - # likelihood = 0.5 * F.mse_loss(y_pred, y_true) - - # We must sum over all image axis and average only on minibatch axis - # Log prob p(x | z) in the case where the output distribution is Bernoulli(p) - likelihood = F.binary_cross_entropy(y_pred, data['y'], reduction='none').sum((1, 2, 3)).mean() - - elbo = likelihood + kl_divergence - - nll = self.nll(data['x'], num_posterior_samples=5) - - if self.training: - elbo.backward() - - return { - 'loss': elbo.item(), - 'nll': nll.mean().item(), - 'reconstruction': likelihood.item(), - 'kl_divergence': kl_divergence.item() - } - - def logmeanexp(self, inputs, dim=1): - if inputs.size(dim) == 1: - return inputs - else: - input_max = inputs.max(dim, keepdim=True)[0] - return (inputs - input_max).exp().mean(dim).log() + input_max.squeeze(dim=dim) - - @torch.no_grad() - def nll(self, data_sample, num_posterior_samples: int = 1): - """ - Upper bound on negative log-likelihood of supplied data. - If num samples goes to infinity, the nll of data should - approach true value - """ - assert num_posterior_samples >= 1, "Need at least one posterior sample" - - buffer = [] - - mu, std = self.encoder_distribution(data_sample) - var = std ** 2 - - kl_divergence = - 0.5 * (1 + torch.log(var) - mu ** 2 - var).sum(dim=1) - - for i in range(num_posterior_samples): - z = mu + torch.randn_like(std) * std - y_pred = self.decoder(z) - - likelihood = F.binary_cross_entropy(y_pred, data_sample, reduction='none').sum((1, 2, 3)) - elbo = likelihood + kl_divergence - - buffer.append(-elbo) - - averaged = self.logmeanexp(torch.stack(buffer, dim=-1), dim=-1) - return -averaged - - def metrics(self): - """ Set of metrics for this model """ - return [ - Loss(), - AveragingNamedMetric('reconstruction', scope="train"), - AveragingNamedMetric('kl_divergence', scope="train") - ] - - -def create(img_rows, img_cols, img_channels, channels=None, representation_length=32): - """ Vel factory function """ - if channels is None: - channels = [16, 32, 32] - - def instantiate(**_): - return MnistCnnVAE( - img_rows, img_cols, img_channels, channels=channels, representation_length=representation_length - ) - - return ModelFactory.generic(instantiate) diff --git a/vel/model/autoencoder/vae_base.py b/vel/model/autoencoder/vae_base.py index 0ff0de27..efaf62ab 100644 --- a/vel/model/autoencoder/vae_base.py +++ b/vel/model/autoencoder/vae_base.py @@ -71,6 +71,16 @@ def calculate_gradient(self, data: dict) -> dict: kl_divergence = self.kl_divergence(z, z_dist, prior).mean() reconstruction = x_dist.log_prob(data['y']).mean() + # ELBO is E_q log p(x, z) / q(z | x) + # Which can be expressed in many equivalent forms: + # (1) E_q log p(x | z) + log p(z) - log q(z | x) + # (2) E_q log p(x | z) - D_KL(p(z) || q(z | x)) + # (3) E_q log p(x) - D_KL(p(z | x) || q(z | x)Biblio) + + # Form 3 is interesting from a theoretical standpoint, but is intractable to compute directly + # While forms (1) and (2) can be computed directly. + # Positive aspect of form (2) is that KL divergence can be calculated analytically + # further reducing the variance of the gradient elbo = reconstruction - kl_divergence loss = -elbo From b61bbcd0a658228d72d54204e06bbdb80b8c6ce1 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Fri, 20 Sep 2019 20:57:17 -0700 Subject: [PATCH 084/162] Added omniglot dataset. --- vel/data/source/vision/mnist.py | 2 +- vel/data/source/vision/omniglot.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 vel/data/source/vision/omniglot.py diff --git a/vel/data/source/vision/mnist.py b/vel/data/source/vision/mnist.py index 16640cac..58f8ee90 100644 --- a/vel/data/source/vision/mnist.py +++ b/vel/data/source/vision/mnist.py @@ -4,7 +4,7 @@ def create(model_config): - """ Create a MNIST dataset, normalized """ + """ Create a MNIST dataset """ path = model_config.data_dir('mnist') train_dataset = datasets.MNIST(path, train=True, download=True) diff --git a/vel/data/source/vision/omniglot.py b/vel/data/source/vision/omniglot.py new file mode 100644 index 00000000..659f53b4 --- /dev/null +++ b/vel/data/source/vision/omniglot.py @@ -0,0 +1,16 @@ +from torchvision import datasets + +from vel.api import Source + + +def create(model_config): + """ Create an Omniglot dataset """ + path = model_config.data_dir('omniglot') + + train_dataset = datasets.Omniglot(path, background=True, download=True) + test_dataset = datasets.Omniglot(path, background=False, download=True) + + return Source( + train=train_dataset, + validation=test_dataset, + ) From b39347090b13bccafa0c93fba2383275a9f08385 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Fri, 20 Sep 2019 20:57:34 -0700 Subject: [PATCH 085/162] Added omniglot VAE configs. --- .../omniglot/omniglot_cnn_vae.yaml | 49 +++++++++++++++++ .../autoencoder/omniglot/omniglot_fc_vae.yaml | 54 +++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 examples-configs/autoencoder/omniglot/omniglot_cnn_vae.yaml create mode 100644 examples-configs/autoencoder/omniglot/omniglot_fc_vae.yaml diff --git a/examples-configs/autoencoder/omniglot/omniglot_cnn_vae.yaml b/examples-configs/autoencoder/omniglot/omniglot_cnn_vae.yaml new file mode 100644 index 00000000..dae887c8 --- /dev/null +++ b/examples-configs/autoencoder/omniglot/omniglot_cnn_vae.yaml @@ -0,0 +1,49 @@ +name: 'omniglot_cnn_vae' + + +model: + name: vel.model.autoencoder.cnn_vae + img_rows: 28 + img_cols: 28 + img_channels: 1 + channels: [64, 128, 256] + representation_length: 50 + max_grad_norm: 1.0 + analytical_kl_div: true + + +source: + name: vel.data.source.vision.omniglot + + +loader: + name: vel.data.dataset_loader + batch_size: 128 + num_workers: 4 + pin_memory: true + + transformations: + - name: vel.data.transformation.pil_resize + shape: [28, 28] + - name: vel.data.transformation.to_array + - name: vel.data.transformation.binarize_image + - name: vel.data.transformation.image_to_tensor + - name: vel.data.transformation.unsupervised + + +optimizer: + name: vel.optimizer.radam + lr: 1.0e-3 + eps: 1.0e-4 + + +commands: + augvis: + name: vel.command.augvis_command + samples: 5 + cases: 3 + + train: + name: vel.command.train_command + epochs: 3280 + diff --git a/examples-configs/autoencoder/omniglot/omniglot_fc_vae.yaml b/examples-configs/autoencoder/omniglot/omniglot_fc_vae.yaml new file mode 100644 index 00000000..1b627400 --- /dev/null +++ b/examples-configs/autoencoder/omniglot/omniglot_fc_vae.yaml @@ -0,0 +1,54 @@ +name: 'omniglot_fc_vae' + + +model: + name: vel.model.autoencoder.fc_vae + img_rows: 28 + img_cols: 28 + img_channels: 1 + layers: [200, 200] + representation_length: 50 + max_grad_norm: 1.0 + analytical_kl_div: true + + +source: + name: vel.data.source.vision.omniglot + + +loader: + name: vel.data.dataset_loader + batch_size: 128 + num_workers: 4 + pin_memory: true + + transformations: + - name: vel.data.transformation.pil_resize + shape: [28, 28] + - name: vel.data.transformation.to_array + - name: vel.data.transformation.binarize_image + - name: vel.data.transformation.image_to_tensor + - name: vel.data.transformation.unsupervised + + +optimizer: + name: vel.optimizer.radam + lr: 1.0e-3 + eps: 1.0e-4 + + +scheduler: + name: vel.scheduler.multi_step + gamma: 0.71968 # 10 * (-1/7) + milestones: [ 1, 4, 13, 40, 121, 364, 1093, 3280] + + +commands: + augvis: + name: vel.command.augvis_command + samples: 5 + cases: 3 + + train: + name: vel.command.train_command + epochs: 3280 From 509e2c7cdef52962197a3a492638a03c26fb840d Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 22 Sep 2019 21:42:24 -0700 Subject: [PATCH 086/162] Renamed cnn autoencode. --- .../autoencoder/{mnist_cnn_autoencoder.py => cnn_autoencoder.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename vel/model/autoencoder/{mnist_cnn_autoencoder.py => cnn_autoencoder.py} (100%) diff --git a/vel/model/autoencoder/mnist_cnn_autoencoder.py b/vel/model/autoencoder/cnn_autoencoder.py similarity index 100% rename from vel/model/autoencoder/mnist_cnn_autoencoder.py rename to vel/model/autoencoder/cnn_autoencoder.py From d0c56e8fcb64d87f899515f814744e4ac5170a85 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 22 Sep 2019 21:47:00 -0700 Subject: [PATCH 087/162] Reorganized latent variable models. --- .../{autoencoder => latent}/mnist/mnist_cnn_vae.yaml | 2 +- .../{autoencoder => latent}/mnist/mnist_fc_vae.yaml | 2 +- .../{autoencoder => latent}/omniglot/omniglot_cnn_vae.yaml | 2 +- .../{autoencoder => latent}/omniglot/omniglot_fc_vae.yaml | 2 +- vel/model/{autoencoder => latent}/cnn_vae.py | 0 vel/model/{autoencoder => latent}/fc_vae.py | 0 vel/model/{autoencoder => latent}/vae_base.py | 0 7 files changed, 4 insertions(+), 4 deletions(-) rename examples-configs/{autoencoder => latent}/mnist/mnist_cnn_vae.yaml (97%) rename examples-configs/{autoencoder => latent}/mnist/mnist_fc_vae.yaml (95%) rename examples-configs/{autoencoder => latent}/omniglot/omniglot_cnn_vae.yaml (95%) rename examples-configs/{autoencoder => latent}/omniglot/omniglot_fc_vae.yaml (96%) rename vel/model/{autoencoder => latent}/cnn_vae.py (100%) rename vel/model/{autoencoder => latent}/fc_vae.py (100%) rename vel/model/{autoencoder => latent}/vae_base.py (100%) diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml b/examples-configs/latent/mnist/mnist_cnn_vae.yaml similarity index 97% rename from examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml rename to examples-configs/latent/mnist/mnist_cnn_vae.yaml index 8994602b..76cc3f90 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/latent/mnist/mnist_cnn_vae.yaml @@ -2,7 +2,7 @@ name: 'mnist_cnn_vae' model: - name: vel.model.autoencoder.cnn_vae + name: vel.model.latent.cnn_vae img_rows: 28 img_cols: 28 img_channels: 1 diff --git a/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml b/examples-configs/latent/mnist/mnist_fc_vae.yaml similarity index 95% rename from examples-configs/autoencoder/mnist/mnist_fc_vae.yaml rename to examples-configs/latent/mnist/mnist_fc_vae.yaml index ed0977ae..96653a6e 100644 --- a/examples-configs/autoencoder/mnist/mnist_fc_vae.yaml +++ b/examples-configs/latent/mnist/mnist_fc_vae.yaml @@ -2,7 +2,7 @@ name: 'mnist_fc_vae' model: - name: vel.model.autoencoder.fc_vae + name: vel.model.latent.fc_vae img_rows: 28 img_cols: 28 img_channels: 1 diff --git a/examples-configs/autoencoder/omniglot/omniglot_cnn_vae.yaml b/examples-configs/latent/omniglot/omniglot_cnn_vae.yaml similarity index 95% rename from examples-configs/autoencoder/omniglot/omniglot_cnn_vae.yaml rename to examples-configs/latent/omniglot/omniglot_cnn_vae.yaml index dae887c8..2df6f80b 100644 --- a/examples-configs/autoencoder/omniglot/omniglot_cnn_vae.yaml +++ b/examples-configs/latent/omniglot/omniglot_cnn_vae.yaml @@ -2,7 +2,7 @@ name: 'omniglot_cnn_vae' model: - name: vel.model.autoencoder.cnn_vae + name: vel.model.latent.cnn_vae img_rows: 28 img_cols: 28 img_channels: 1 diff --git a/examples-configs/autoencoder/omniglot/omniglot_fc_vae.yaml b/examples-configs/latent/omniglot/omniglot_fc_vae.yaml similarity index 96% rename from examples-configs/autoencoder/omniglot/omniglot_fc_vae.yaml rename to examples-configs/latent/omniglot/omniglot_fc_vae.yaml index 1b627400..263a72eb 100644 --- a/examples-configs/autoencoder/omniglot/omniglot_fc_vae.yaml +++ b/examples-configs/latent/omniglot/omniglot_fc_vae.yaml @@ -2,7 +2,7 @@ name: 'omniglot_fc_vae' model: - name: vel.model.autoencoder.fc_vae + name: vel.model.latent.fc_vae img_rows: 28 img_cols: 28 img_channels: 1 diff --git a/vel/model/autoencoder/cnn_vae.py b/vel/model/latent/cnn_vae.py similarity index 100% rename from vel/model/autoencoder/cnn_vae.py rename to vel/model/latent/cnn_vae.py diff --git a/vel/model/autoencoder/fc_vae.py b/vel/model/latent/fc_vae.py similarity index 100% rename from vel/model/autoencoder/fc_vae.py rename to vel/model/latent/fc_vae.py diff --git a/vel/model/autoencoder/vae_base.py b/vel/model/latent/vae_base.py similarity index 100% rename from vel/model/autoencoder/vae_base.py rename to vel/model/latent/vae_base.py From b988946389274ed0fb33d6d0d186faf0c38aacdd Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 22 Sep 2019 22:46:22 -0700 Subject: [PATCH 088/162] Make Reshape more flexible. --- vel/module/layers.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/vel/module/layers.py b/vel/module/layers.py index 08fadb0a..9f9de68d 100644 --- a/vel/module/layers.py +++ b/vel/module/layers.py @@ -41,13 +41,18 @@ def forward(self, x): class Reshape(nn.Module): """ Flatten input vector """ - def __init__(self, *sizes): + def __init__(self, *sizes, batch_dims=1): super().__init__() self.sizes = sizes + self.batch_dims = batch_dims def forward(self, x): - return x.view(x.size(0), *self.sizes) + return x.view(x.shape[:self.batch_dims] + self.sizes) + + def extra_repr(self) -> str: + """ Extra representation of this module """ + return f"sizes={self.sizes}, batch_dims={self.batch_dims}" class OneHotEncode(nn.Module): From e51146215a6a60d16ca251c0350270db79f8df4a Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 22 Sep 2019 22:46:37 -0700 Subject: [PATCH 089/162] Clean up VAE implementation. --- vel/model/latent/vae_base.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/vel/model/latent/vae_base.py b/vel/model/latent/vae_base.py index efaf62ab..51bbb070 100644 --- a/vel/model/latent/vae_base.py +++ b/vel/model/latent/vae_base.py @@ -38,11 +38,6 @@ def prior_distribution(self) -> dist.Distribution: #################################################################################################################### # Other useful methods - def encode(self, sample: torch.Tensor) -> torch.Tensor: - """ Encode incoming data into a latent representation """ - encoded = self.encoder_network(sample) - return self.encoder_rsample(encoded) - def encoder_rsample(self, encoded: torch.Tensor) -> torch.Tensor: """ Sample with "reparametrization trick" encoder sample """ return self.encoder_distribution(encoded).rsample() @@ -51,11 +46,23 @@ def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: """ Sample from a decoder distribution """ return self.decoder_distribution(decoded).sample() + def encode(self, sample: torch.Tensor) -> torch.Tensor: + """ Encode incoming data into a latent representation """ + encoded = self.encoder_network(sample) + return self.encoder_rsample(encoded) + + def decode(self, z: torch.Tensor) -> torch.Tensor: + """ + Decode latent representation back into data domain. + Sample from p(x | z) + """ + decoded = self.decoder_network(z) + return self.decoder_sample(decoded) + def forward(self, sample: torch.Tensor) -> torch.Tensor: """ Simple forward pass through the module """ - encoded = self.encoder_network(sample) - z = self.encoder_rsample(encoded) - decoded = self.decoder_sample(z) + z = self.encode(sample) + decoded = self.decode(z) return decoded def calculate_gradient(self, data: dict) -> dict: From 28bc997c4153b85527a3231090ac15652a787e79 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 22 Sep 2019 22:47:47 -0700 Subject: [PATCH 090/162] IWAE implementation. --- README.md | 5 +- .../latent/mnist/mnist_fc_iwae.yaml | 47 ++++++++ vel/model/latent/cnn_vae.py | 4 +- vel/model/latent/fc_iwae.py | 106 ++++++++++++++++++ vel/model/latent/fc_vae.py | 6 +- vel/model/latent/iwae.py | 73 ++++++++++++ 6 files changed, 235 insertions(+), 6 deletions(-) create mode 100644 examples-configs/latent/mnist/mnist_fc_iwae.yaml create mode 100644 vel/model/latent/fc_iwae.py create mode 100644 vel/model/latent/iwae.py diff --git a/README.md b/README.md index f115666e..ffed0177 100644 --- a/README.md +++ b/README.md @@ -118,7 +118,10 @@ that are ready to run and easy to modify for other similar usecases: # Implemented models - Unsupervised learning -- AutoEncoders (AE) and Variational AutoEncoders (VAE) with examples on MNIST dataset. +- A simple AutoEncoder (AE) with example on MNIST dataset. +- Latent variable models: + - Variational AutoEncoders (VAE) + - Importance Weighted AutoEncoder (IWAE) # Examples diff --git a/examples-configs/latent/mnist/mnist_fc_iwae.yaml b/examples-configs/latent/mnist/mnist_fc_iwae.yaml new file mode 100644 index 00000000..29979c21 --- /dev/null +++ b/examples-configs/latent/mnist/mnist_fc_iwae.yaml @@ -0,0 +1,47 @@ +name: 'mnist_fc_vae' + + +model: + name: vel.model.latent.fc_iwae + img_rows: 28 + img_cols: 28 + img_channels: 1 + layers: [200, 200] + representation_length: 50 + max_grad_norm: 1.0 + analytical_kl_div: true + k: 5 + + +source: + name: vel.data.source.vision.mnist + +loader: + name: vel.data.dataset_loader + batch_size: 128 + num_workers: 4 + pin_memory: true + + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.binarize_image + - name: vel.data.transformation.image_to_tensor + - name: vel.data.transformation.unsupervised + + +optimizer: + name: vel.optimizer.radam + lr: 1.0e-3 + eps: 1.0e-4 + + +scheduler: + name: vel.scheduler.multi_step + gamma: 0.71968 # 10 * (-1/7) + milestones: [ 1, 4, 13, 40, 121, 364, 1093, 3280] + + +commands: + train: + name: vel.command.train_command + epochs: 3280 \ No newline at end of file diff --git a/vel/model/latent/cnn_vae.py b/vel/model/latent/cnn_vae.py index da57eacd..958877bf 100644 --- a/vel/model/latent/cnn_vae.py +++ b/vel/model/latent/cnn_vae.py @@ -10,7 +10,7 @@ from vel.api import ModelFactory from vel.module.layers import Flatten, Reshape -from vel.model.autoencoder.vae_base import VaeBase +from vel.model.latent.vae_base import VaeBase class CnnVAE(VaeBase): @@ -142,7 +142,7 @@ def prior_distribution(self) -> dist.Distribution: def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ - return self.decoder_network(decoded) + return decoded def create(img_rows, img_cols, img_channels, channels=None, representation_length=32): diff --git a/vel/model/latent/fc_iwae.py b/vel/model/latent/fc_iwae.py new file mode 100644 index 00000000..ab6d8602 --- /dev/null +++ b/vel/model/latent/fc_iwae.py @@ -0,0 +1,106 @@ +import torch +import torch.distributions as dist +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.init as init + +from vel.api import ModelFactory +from vel.module.layers import Flatten, Reshape + +from vel.model.latent.iwae import IWAE + + +class FcIwae(IWAE): + """ + A simple IWAE, containing 2 fully connected layers. + """ + + def __init__(self, img_rows, img_cols, img_channels, k=5, layers=None, representation_length=32, + analytical_kl_div=False, max_grad_norm=None): + super().__init__(k=k, analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + + if layers is None: + layers = [512, 256] + + self.representation_length = representation_length + self.layers = layers + + input_length = img_rows * img_cols * img_channels + + self.encoder = nn.Sequential( + Flatten(), + nn.Linear(in_features=input_length, out_features=self.layers[0]), + nn.Tanh(), + nn.Linear(in_features=self.layers[0], out_features=self.layers[1]), + nn.Tanh(), + nn.Linear(self.layers[1], representation_length * 2) + ) + + self.decoder = nn.Sequential( + nn.Linear(in_features=representation_length, out_features=self.layers[1]), + nn.Tanh(), + nn.Linear(in_features=self.layers[1], out_features=self.layers[0]), + nn.Tanh(), + nn.Linear(in_features=self.layers[0], out_features=input_length), + Reshape(img_channels, img_rows, img_cols), + nn.Sigmoid() + ) + + self.register_buffer('prior_mean', torch.tensor([[0.0]])) + self.register_buffer('prior_std', torch.tensor([[1.0]])) + + def encoder_network(self, sample: torch.Tensor) -> torch.Tensor: + """ Transform input sample into an encoded representation """ + return self.encoder(sample) + + def encoder_distribution(self, encoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the encoder distribution (approximate posterior) """ + mu = encoded[:, :self.representation_length] + std = F.softplus(encoded[:, self.representation_length:]) + + return dist.Independent(dist.Normal(mu, std), 1) + + def decoder_network(self, z: torch.Tensor) -> torch.Tensor: + """ Transform encoded value into a decoded representation """ + return self.decoder(z) + + def decoder_distribution(self, decoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the decoder distribution (likelihood) """ + return dist.Independent(dist.Bernoulli(probs=decoded), 3) + + def prior_distribution(self) -> dist.Distribution: + """ Return a prior distribution object """ + return dist.Independent(dist.Normal(self.prior_mean, self.prior_std), 1) + + def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: + """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ + return decoded + +# @staticmethod +# def _weight_initializer(tensor): +# init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('tanh')) +# init.constant_(tensor.bias, 0.01) +# +# def reset_weights(self): +# for m in it.chain(self.encoder, self.decoder): +# if isinstance(m, nn.Conv2d): +# self._weight_initializer(m) +# elif isinstance(m, nn.ConvTranspose2d): +# self._weight_initializer(m) +# elif isinstance(m, nn.Linear): +# self._weight_initializer(m) + + +def create(img_rows, img_cols, img_channels, k=5, layers=None, representation_length=32, max_grad_norm=None, + analytical_kl_div=True): + """ Vel factory function """ + if layers is None: + layers = [512, 256] + + def instantiate(**_): + return FcIwae( + img_rows, img_cols, img_channels, k=k, layers=layers, representation_length=representation_length, + max_grad_norm=max_grad_norm, analytical_kl_div=analytical_kl_div + ) + + return ModelFactory.generic(instantiate) diff --git a/vel/model/latent/fc_vae.py b/vel/model/latent/fc_vae.py index 49e91fbd..50d7d99c 100644 --- a/vel/model/latent/fc_vae.py +++ b/vel/model/latent/fc_vae.py @@ -7,7 +7,7 @@ from vel.api import ModelFactory from vel.module.layers import Flatten, Reshape -from vel.model.autoencoder.vae_base import VaeBase +from vel.model.latent.vae_base import VaeBase class FcVae(VaeBase): @@ -37,7 +37,7 @@ def __init__(self, img_rows, img_cols, img_channels, layers=None, representation ) self.decoder = nn.Sequential( - nn.Linear(representation_length, self.layers[1]), + nn.Linear(in_features=representation_length, out_features=self.layers[1]), nn.Tanh(), nn.Linear(in_features=self.layers[1], out_features=self.layers[0]), nn.Tanh(), @@ -74,7 +74,7 @@ def prior_distribution(self) -> dist.Distribution: def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ - return self.decoder_network(decoded) + return decoded # @staticmethod # def _weight_initializer(tensor): diff --git a/vel/model/latent/iwae.py b/vel/model/latent/iwae.py new file mode 100644 index 00000000..46359c41 --- /dev/null +++ b/vel/model/latent/iwae.py @@ -0,0 +1,73 @@ +import torch.nn.utils + +from vel.model.latent.vae_base import VaeBase + + +class IWAE(VaeBase): + """ + Importance-Weighted Auto-Encoder https://arxiv.org/abs/1509.00519 + """ + + def __init__(self, k: int = 5, analytical_kl_div=True, max_grad_norm=1.0): + super().__init__(analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + + self.k = k + + def calculate_gradient(self, data: dict) -> dict: + """ Calculate model gradient for given data sample """ + encoded = self.encoder_network(data['x']) + z_dist = self.encoder_distribution(encoded) + + bs = encoded.size(0) + # Encode importance samples into batch dimension for the decoded network + z = z_dist.rsample([self.k]).reshape([bs * self.k, -1]) + + decoded = self.decoder_network(z) + decoded = decoded.reshape([self.k, bs] + list(decoded.shape[1:])) + + # Unpack to make distribution efficient for broadcasting + x_dist = self.decoder_distribution(decoded) + prior = self.prior_distribution() + + kl_divergence = self.kl_divergence(z, z_dist, prior) + reconstruction = x_dist.log_prob(data['y']) + + # ELBO is E_q log p(x, z) / q(z | x) + # Which can be expressed in many equivalent forms: + # (1) E_q log p(x | z) + log p(z) - log q(z | x) + # (2) E_q log p(x | z) - D_KL(p(z) || q(z | x)) + # (3) E_q log p(x) - D_KL(p(z | x) || q(z | x)Biblio) + + # Form 3 is interesting from a theoretical standpoint, but is intractable to compute directly + # While forms (1) and (2) can be computed directly. + # Positive aspect of form (2) is that KL divergence can be calculated analytically + # further reducing the variance of the gradient + elbo = reconstruction - kl_divergence + + # Perform log-mean-exp on the axis of importance samples + # Then mean across batch + elbo = self.log_mean_exp(elbo, 0).mean() + + loss = -elbo + + if self.training: + loss.backward() + + if self.max_grad_norm is not None: + grad_norm = torch.nn.utils.clip_grad_norm_( + filter(lambda p: p.requires_grad, self.parameters()), + max_norm=self.max_grad_norm + ) + else: + grad_norm = 0.0 + else: + grad_norm = 0.0 + + with torch.no_grad(): + return { + 'loss': loss.item(), + + 'grad_norm': grad_norm, + 'reconstruction': -reconstruction.mean().item(), + 'kl_divergence': kl_divergence.mean().item() + } From 6ffbb4cb29270123ce1403ca79dcdb166f3d3ef0 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 24 Sep 2019 20:35:33 -0700 Subject: [PATCH 091/162] IWAE implementation. --- .../latent/mnist/mnist_cnn_iwae.yaml | 48 ++++++ .../latent/mnist/mnist_fc_iwae.yaml | 5 +- vel/api/model.py | 6 +- vel/model/latent/cnn_iwae.py | 158 ++++++++++++++++++ vel/model/latent/vae_base.py | 1 + 5 files changed, 213 insertions(+), 5 deletions(-) create mode 100644 examples-configs/latent/mnist/mnist_cnn_iwae.yaml create mode 100644 vel/model/latent/cnn_iwae.py diff --git a/examples-configs/latent/mnist/mnist_cnn_iwae.yaml b/examples-configs/latent/mnist/mnist_cnn_iwae.yaml new file mode 100644 index 00000000..df3164fe --- /dev/null +++ b/examples-configs/latent/mnist/mnist_cnn_iwae.yaml @@ -0,0 +1,48 @@ +name: 'mnist_cnn_iwae' + + +model: + name: vel.model.latent.cnn_iwae + img_rows: 28 + img_cols: 28 + img_channels: 1 + channels: [64, 128, 256] + representation_length: 50 + max_grad_norm: 1.0 + analytical_kl_div: true + k: 5 # It's hard to sample many samples for this slightly larger network + + +source: + name: vel.data.source.vision.mnist + + +loader: + name: vel.data.dataset_loader + batch_size: 128 + num_workers: 4 + pin_memory: true + + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.binarize_image + - name: vel.data.transformation.image_to_tensor + - name: vel.data.transformation.unsupervised + + +optimizer: + name: vel.optimizer.radam + lr: 1.0e-3 + eps: 1.0e-4 + + +scheduler: + name: vel.scheduler.multi_step + gamma: 0.71968 # 10 * (-1/7) + milestones: [ 1, 4, 13, 40, 121, 364, 1093, 3280] + + +commands: + train: + name: vel.command.train_command + epochs: 3280 \ No newline at end of file diff --git a/examples-configs/latent/mnist/mnist_fc_iwae.yaml b/examples-configs/latent/mnist/mnist_fc_iwae.yaml index 29979c21..e4ca4abb 100644 --- a/examples-configs/latent/mnist/mnist_fc_iwae.yaml +++ b/examples-configs/latent/mnist/mnist_fc_iwae.yaml @@ -1,4 +1,4 @@ -name: 'mnist_fc_vae' +name: 'mnist_fc_iwae' model: @@ -10,12 +10,13 @@ model: representation_length: 50 max_grad_norm: 1.0 analytical_kl_div: true - k: 5 + k: 50 # Because it's such a small network we can try many importance samples source: name: vel.data.source.vision.mnist + loader: name: vel.data.dataset_loader batch_size: 128 diff --git a/vel/api/model.py b/vel/api/model.py index 53406a3e..9cb31443 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -15,7 +15,7 @@ def metrics(self) -> list: return [] def train(self, mode=True): - r""" + """ Sets the module in training mode. This has any effect only on certain modules. See documentations of @@ -68,8 +68,8 @@ class GradientModel(Model): def calculate_gradient(self, data: dict) -> dict: """ - Calculate gradient for given batch of supervised learning. - Returns a dictionary of metrics + Calculate gradient for given batch of training data. + Returns a dictionary of metrics. """ raise NotImplementedError diff --git a/vel/model/latent/cnn_iwae.py b/vel/model/latent/cnn_iwae.py new file mode 100644 index 00000000..c4b79ded --- /dev/null +++ b/vel/model/latent/cnn_iwae.py @@ -0,0 +1,158 @@ +import itertools as it + +import torch +import torch.nn as nn +import torch.nn.init as init +import torch.nn.functional as F +import torch.distributions as dist + +import vel.util.network as net_util + +from vel.api import ModelFactory +from vel.module.layers import Flatten, Reshape +from vel.model.latent.iwae import IWAE + + +class CnnIWAE(IWAE): + """ + A simple IWAE, containing 3 convolutional layers + """ + + def __init__(self, img_rows, img_cols, img_channels, k=5, channels=None, representation_length=32, + analytical_kl_div=True, max_grad_norm=0.5): + super().__init__(k=k, analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + + if channels is None: + channels = [16, 32, 32] + + layer_series = [ + (3, 1, 1), + (3, 1, 2), + (3, 1, 2), + ] + + self.representation_length = representation_length + + self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) + self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) + self.channels = channels + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels=img_channels, out_channels=channels[0], kernel_size=(3, 3), padding=1), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[0], + net_util.convolutional_layer_series(img_rows, layer_series[:1]), + net_util.convolutional_layer_series(img_cols, layer_series[:1]), + ]), + nn.Conv2d(in_channels=channels[0], out_channels=channels[1], kernel_size=(3, 3), stride=2, padding=1), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[1], + net_util.convolutional_layer_series(img_rows, layer_series[:2]), + net_util.convolutional_layer_series(img_cols, layer_series[:2]), + ]), + nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=(3, 3), stride=2, padding=1), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[2], + net_util.convolutional_layer_series(img_rows, layer_series), + net_util.convolutional_layer_series(img_cols, layer_series), + ]), + Flatten(), + nn.Linear(self.final_width * self.final_height * channels[2], representation_length * 2) + ) + + self.decoder = nn.Sequential( + nn.Linear(representation_length, self.final_width * self.final_height * channels[2]), + # nn.ReLU(True), + nn.SELU(True), + Reshape(channels[2], self.final_width, self.final_height), + nn.LayerNorm([ + channels[2], + net_util.convolutional_layer_series(img_rows, layer_series), + net_util.convolutional_layer_series(img_cols, layer_series), + ]), + nn.ConvTranspose2d( + in_channels=channels[2], out_channels=channels[1], kernel_size=3, stride=2, padding=1, output_padding=1 + ), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[1], + net_util.convolutional_layer_series(img_rows, layer_series[:2]), + net_util.convolutional_layer_series(img_cols, layer_series[:2]), + ]), + nn.ConvTranspose2d( + in_channels=channels[1], out_channels=channels[0], kernel_size=3, stride=2, padding=1, output_padding=1 + ), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[0], + net_util.convolutional_layer_series(img_rows, layer_series[:1]), + net_util.convolutional_layer_series(img_cols, layer_series[:1]), + ]), + nn.ConvTranspose2d(in_channels=channels[0], out_channels=img_channels, kernel_size=3, padding=1), + nn.Sigmoid() + ) + + self.register_buffer('prior_mean', torch.tensor([[0.0]])) + self.register_buffer('prior_std', torch.tensor([[1.0]])) + + @staticmethod + def _weight_initializer(tensor): + init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) + init.constant_(tensor.bias, 0.0) + + def reset_weights(self): + for m in it.chain(self.encoder, self.decoder): + if isinstance(m, nn.Conv2d): + self._weight_initializer(m) + elif isinstance(m, nn.ConvTranspose2d): + self._weight_initializer(m) + elif isinstance(m, nn.Linear): + self._weight_initializer(m) + + def encoder_network(self, sample: torch.Tensor) -> torch.Tensor: + """ Transform input sample into an encoded representation """ + return self.encoder(sample) + + def encoder_distribution(self, encoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the encoder distribution (approximate posterior) """ + mu = encoded[:, :self.representation_length] + std = F.softplus(encoded[:, self.representation_length:]) + + return dist.Independent(dist.Normal(mu, std), 1) + + def decoder_network(self, z: torch.Tensor) -> torch.Tensor: + """ Transform encoded value into a decoded representation """ + return self.decoder(z) + + def decoder_distribution(self, decoded: torch.Tensor) -> dist.Distribution: + """ Create a pytorch distribution object representing the decoder distribution (likelihood) """ + return dist.Independent(dist.Bernoulli(probs=decoded), 3) + + def prior_distribution(self) -> dist.Distribution: + """ Return a prior distribution object """ + return dist.Independent(dist.Normal(self.prior_mean, self.prior_std), 1) + + def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: + """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ + return decoded + + +def create(img_rows, img_cols, img_channels, k=5, channels=None, representation_length=32): + """ Vel factory function """ + if channels is None: + channels = [16, 32, 32] + + def instantiate(**_): + return CnnIWAE( + img_rows, img_cols, img_channels, k=k, channels=channels, representation_length=representation_length + ) + + return ModelFactory.generic(instantiate) diff --git a/vel/model/latent/vae_base.py b/vel/model/latent/vae_base.py index 51bbb070..03fa88f7 100644 --- a/vel/model/latent/vae_base.py +++ b/vel/model/latent/vae_base.py @@ -8,6 +8,7 @@ class VaeBase(GradientModel): """ Base module for variational auto-encoder implementations """ + def __init__(self, analytical_kl_div=True, max_grad_norm=1.0): super().__init__() From 0b9d7313dcd8159d0f565313a435713b73bb08d2 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 24 Sep 2019 21:16:25 -0700 Subject: [PATCH 092/162] Added VQ-VAE repo to bibliograpgy. --- docs/Bibliography.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/Bibliography.md b/docs/Bibliography.md index 8ed79213..2490513a 100644 --- a/docs/Bibliography.md +++ b/docs/Bibliography.md @@ -157,4 +157,5 @@ in the following repositories (in alphabetical order): - https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer - https://github.com/openai/baselines - https://github.com/pytorch/pytorch +- https://github.com/ritheshkumar95/pytorch-vqvae/ - https://github.com/tensorflow/tensorflow \ No newline at end of file From 5a7c2980db697ee8bff5fcd57567caf1921ab8f6 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 24 Sep 2019 21:16:39 -0700 Subject: [PATCH 093/162] Run tag support, and cleaned up training info. --- README.md | 14 +-- examples-scripts/rl/atari/a2c/breakout_a2c.py | 101 ---------------- .../rl/atari/a2c/breakout_a2c_evaluate.py | 66 ---------- examples-scripts/rl/atari/ppo/qbert_ppo.py | 114 ------------------ .../rl/mujoco/ddpg/half_cheetah_ddpg.py | 110 ----------------- vel/api/info.py | 3 +- vel/api/model_config.py | 114 ++++++++++++++---- vel/command/phase_train_command.py | 1 - vel/command/rnn/generate_text.py | 3 +- vel/command/train_command.py | 1 - vel/data/source/nlp/text_url.py | 2 +- vel/internal/provider.py | 3 +- vel/launcher.py | 3 +- vel/notebook/loader.py | 10 +- vel/rl/command/enjoy.py | 3 +- vel/rl/command/evaluate_env_command.py | 2 +- vel/rl/command/record_movie_command.py | 3 +- vel/storage/streaming/stdout.py | 16 ++- 18 files changed, 122 insertions(+), 447 deletions(-) delete mode 100644 examples-scripts/rl/atari/a2c/breakout_a2c.py delete mode 100644 examples-scripts/rl/atari/a2c/breakout_a2c_evaluate.py delete mode 100644 examples-scripts/rl/atari/ppo/qbert_ppo.py delete mode 100644 examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py diff --git a/README.md b/README.md index ffed0177..5f678ffd 100644 --- a/README.md +++ b/README.md @@ -9,8 +9,7 @@ Bring **velocity** to deep-learning research. This project hosts a collection of **highly modular** deep learning components that are tested to be working well together. -A simple yaml-based system ties these modules together declaratively using configuration files, -but everything that can be defined using config files can be coded directly in the python script as well. +A simple yaml-based system ties these modules together declaratively using configuration files. This is still an early version and a hobby project so documentation is unfortunately nonexistent. I've tried to make the @@ -33,7 +32,7 @@ into a structure that is designed to be reused rather than copied over. As a goal, it should be enough to write a config file that wires existing components together and defines their hyperparameters for most common applications. -If that's not the case few bits of custom glue code should do the jobatari. +If that's not the case few bits of custom glue code should do the job. This repository is still in an early stage of that journey but it will grow @@ -54,7 +53,7 @@ pip install -e . ``` from the repository root directory. -This project requires Python at least 3.6 and PyTorch 1.1. +This project requires Python at least 3.6 and PyTorch 1.2. If you want to run YAML config examples, you'll also need a **project configuration file** `.velproject.yaml`. An example is included in this repository. @@ -86,7 +85,7 @@ To use it, just rename it to `.velproject.yaml`. Several models are already implemented in the framework and have example config files that are ready to run and easy to modify for other similar usecases: -- State-of-the art results on Cifar10 dataset using residual networks +- Residual networks (resnets) trained on Cifar10 dataset replicating published performance - Cats vs dogs classification using transfer learning from a resnet34 model pretrained on ImageNet @@ -99,8 +98,8 @@ that are ready to run and easy to modify for other similar usecases: # Implemented models - Reinforcement learning - Continuous and discrete action spaces -- Basic support for LSTM policies for A2C and PPO -- Following published policy gradient reinforcement learning algorithms: +- Basic support for recurrent policies for A2C and PPO +- Following policy gradient reinforcement learning algorithms: - Advantage Actor-Critic (A2C) - Deep Deterministic Policy Gradient (DDPG) - Proximal Policy Optimization (PPO) @@ -122,6 +121,7 @@ that are ready to run and easy to modify for other similar usecases: - Latent variable models: - Variational AutoEncoders (VAE) - Importance Weighted AutoEncoder (IWAE) + - Vector-Quantised Variational AutoEncoder (VQ-VAE) # Examples diff --git a/examples-scripts/rl/atari/a2c/breakout_a2c.py b/examples-scripts/rl/atari/a2c/breakout_a2c.py deleted file mode 100644 index 4cb9560a..00000000 --- a/examples-scripts/rl/atari/a2c/breakout_a2c.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.optim as optim - -from vel.rl.metrics import EpisodeRewardMetric -from vel.storage.streaming.stdout import StdoutStreaming -from vel.util.random import set_seed - -from vel.rl.env.classic_atari import ClassicAtariEnv -from vel.rl.vecenv.subproc import SubprocVecEnvWrapper - -from vel.module.input.image_to_tensor import ImageToTensorFactory -from vel.rl.policy.stochastic_policy import StochasticPolicy -from vel.rl.backbone.nature_cnn import NatureCnnFactory - - -from vel.rl.reinforcer.on_policy_iteration_reinforcer import ( - OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings -) - -from vel.rl.algo.policy_gradient.a2c import A2CPolicyGradient -from vel.rl.env_roller.step_env_roller import StepEnvRoller - -from vel.api.info import TrainingInfo, EpochInfo - - -def breakout_a2c(): - device = torch.device('cuda:0') - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers - # These are just helper functions for that - vec_env = SubprocVecEnvWrapper( - ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 - ).instantiate(parallel_envs=16, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - model = StochasticPolicy( - input_block=ImageToTensorFactory(), - backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) - ).instantiate(action_space=vec_env.action_space) - - # Reinforcer - an object managing the learning process - reinforcer = OnPolicyIterationReinforcer( - device=device, - settings=OnPolicyIterationReinforcerSettings( - batch_size=256, - number_of_steps=5, - ), - model=model, - algo=A2CPolicyGradient( - entropy_coefficient=0.01, - value_coefficient=0.5, - max_grad_norm=0.5, - discount_factor=0.99, - ), - env_roller=StepEnvRoller( - environment=vec_env, - device=device, - ) - ) - - # Model optimizer - optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[StdoutStreaming()] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 100 batches per epoch to average metrics nicely - num_epochs = int(1.1e7 / (5 * 16) / 100) - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=100, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info) - - training_info.on_train_end() - - -if __name__ == '__main__': - breakout_a2c() diff --git a/examples-scripts/rl/atari/a2c/breakout_a2c_evaluate.py b/examples-scripts/rl/atari/a2c/breakout_a2c_evaluate.py deleted file mode 100644 index 3c31f6ab..00000000 --- a/examples-scripts/rl/atari/a2c/breakout_a2c_evaluate.py +++ /dev/null @@ -1,66 +0,0 @@ -import torch -import pandas as pd -import numpy as np - -from vel.modules.input.image_to_tensor import ImageToTensorFactory -from vel.openai.baselines.common.atari_wrappers import FrameStack -from vel.rl.env.classic_atari import ClassicAtariEnv -from vel.rl.models.backbone.nature_cnn import NatureCnnFactory -from vel.rl.models.stochastic_policy_model import StochasticPolicyModelFactory - - -def breakout_a2c_evaluate(checkpoint_file_path, takes=10): - model_checkpoint = torch.load(checkpoint_file_path) - device = torch.device('cuda:0') - - env = FrameStack( - ClassicAtariEnv('BreakoutNoFrameskip-v4').instantiate(preset='record'), k=4 - ) - - model = StochasticPolicyModelFactory( - input_block=ImageToTensorFactory(), - backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) - ).instantiate(action_space=env.action_space) - - model.load_state_dict(model_checkpoint) - model = model.to(device) - - model.eval() - - rewards = [] - lengths = [] - - for i in range(takes): - result = record_take(model, env, device) - rewards.append(result['r']) - lengths.append(result['l']) - - print(pd.DataFrame({'lengths': lengths, 'rewards': rewards}).describe()) - - -@torch.no_grad() -def record_take(model, env_instance, device): - frames = [] - - observation = env_instance.reset() - - frames.append(env_instance.render('rgb_array')) - - print("Evaluating environment...") - - while True: - observation_array = np.expand_dims(np.array(observation), axis=0) - observation_tensor = torch.from_numpy(observation_array).to(device) - actions = model.step(observation_tensor, deterministic=True)['actions'] - - observation, reward, done, epinfo = env_instance.step(actions.item()) - - frames.append(env_instance.render('rgb_array')) - - if 'episode' in epinfo: - # End of an episode - return epinfo['episode'] - - -if __name__ == '__main__': - breakout_a2c_evaluate("checkpoint_00001375.data", takes=2) diff --git a/examples-scripts/rl/atari/ppo/qbert_ppo.py b/examples-scripts/rl/atari/ppo/qbert_ppo.py deleted file mode 100644 index 98388dd1..00000000 --- a/examples-scripts/rl/atari/ppo/qbert_ppo.py +++ /dev/null @@ -1,114 +0,0 @@ -import torch -import torch.optim as optim - -from vel.rl.metrics import EpisodeRewardMetric -from vel.storage.streaming.stdout import StdoutStreaming -from vel.util.random import set_seed -from vel.api.info import TrainingInfo, EpochInfo - -from vel.modules.input.image_to_tensor import ImageToTensorFactory -from vel.rl.env.classic_atari import ClassicAtariEnv -from vel.rl.vecenv.subproc import SubprocVecEnvWrapper -from vel.rl.models.stochastic_policy_model import StochasticPolicyModelFactory -from vel.rl.models.backbone.nature_cnn import NatureCnnFactory - -from vel.rl.reinforcers.on_policy_iteration_reinforcer import ( - OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings -) - -from vel.rl.algo.policy_gradient.ppo import PpoPolicyGradient -from vel.rl.env_roller.step_env_roller import StepEnvRoller -from vel.rl.commands.rl_train_command import FrameTracker - -from vel.schedules.linear import LinearSchedule - - -def qbert_ppo(): - device = torch.device('cuda:0') - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers - # These are just helper functions for that - vec_env = SubprocVecEnvWrapper( - ClassicAtariEnv('QbertNoFrameskip-v4'), frame_history=4 - ).instantiate(parallel_envs=8, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - model = StochasticPolicyModelFactory( - input_block=ImageToTensorFactory(), - backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) - ).instantiate(action_space=vec_env.action_space) - - # Set schedule for gradient clipping. - cliprange = LinearSchedule( - initial_value=0.1, - final_value=0.0 - ) - - # Reinforcer - an object managing the learning process - reinforcer = OnPolicyIterationReinforcer( - device=device, - settings=OnPolicyIterationReinforcerSettings( - batch_size=256, - experience_replay=4, - number_of_steps=128 - ), - model=model, - algo=PpoPolicyGradient( - entropy_coefficient=0.01, - value_coefficient=0.5, - max_grad_norm=0.5, - discount_factor=0.99, - gae_lambda=0.95, - cliprange=cliprange - ), - env_roller=StepEnvRoller( - environment=vec_env, - device=device, - ) - ) - - # Model optimizer - optimizer = optim.Adam(reinforcer.model.parameters(), lr=2.5e-4, eps=1.0e-5) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[ - StdoutStreaming(), # Print live metrics every epoch to standard output - FrameTracker(1.1e7) # We need frame tracker to track the progress of learning - ] - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 10 batches per epoch to average metrics nicely - # Rollout size is 8 environments times 128 steps - num_epochs = int(1.1e7 / (128 * 8) / 10) - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=10, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info) - - training_info.on_train_end() - - -if __name__ == '__main__': - qbert_ppo() diff --git a/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py b/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py deleted file mode 100644 index 49900c0e..00000000 --- a/examples-scripts/rl/mujoco/ddpg/half_cheetah_ddpg.py +++ /dev/null @@ -1,110 +0,0 @@ -import torch -import torch.optim - -from vel.api import TrainingInfo, EpochInfo -from vel.module.input.normalize_observations import NormalizeObservationsFactory -from vel.rl.buffer.circular_replay_buffer import CircularReplayBuffer -from vel.rl.env_roller.transition_replay_env_roller import TransitionReplayEnvRoller -from vel.rl.metrics import EpisodeRewardMetric -from vel.rl.module.noise.ou_noise import OuNoise -from vel.storage.streaming.stdout import StdoutStreaming -from vel.util.random import set_seed -from vel.rl.env.mujoco import MujocoEnv -from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory -from vel.rl.backbone.mlp import MLPFactory -from vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer import ( - BufferedOffPolicyIterationReinforcer, BufferedOffPolicyIterationReinforcerSettings -) -from vel.rl.algo.policy_gradient.ddpg import DeepDeterministicPolicyGradient -from vel.rl.vecenv.dummy import DummyVecEnvWrapper -from vel.optimizer.adam import AdamFactory - - -def half_cheetah_ddpg(): - device = torch.device('cuda:0') - seed = 1002 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - vec_env = DummyVecEnvWrapper( - MujocoEnv('HalfCheetah-v2') - ).instantiate(parallel_envs=1, seed=seed) - - model_factory = DeterministicPolicyModelFactory( - input_block=NormalizeObservationsFactory(input_shape=17), - policy_backbone=MLPFactory(input_length=17, hidden_layers=[64, 64], activation='tanh'), - value_backbone=MLPFactory(input_length=23, hidden_layers=[64, 64], activation='tanh'), - ) - - model = model_factory.instantiate(action_space=vec_env.action_space) - - reinforcer = BufferedOffPolicyIterationReinforcer( - device=device, - environment=vec_env, - settings=BufferedOffPolicyIterationReinforcerSettings( - rollout_steps=2, - training_steps=64, - ), - model=model, - algo=DeepDeterministicPolicyGradient( - model_factory=model_factory, - discount_factor=0.99, - tau=0.01, - ), - env_roller=TransitionReplayEnvRoller( - environment=vec_env, - device=device, - action_noise=OuNoise(std_dev=0.2, environment=vec_env), - replay_buffer=CircularReplayBuffer( - buffer_capacity=1_000_000, - buffer_initial_size=2_000, - num_envs=vec_env.num_envs, - observation_space=vec_env.observation_space, - action_space=vec_env.action_space - ), - normalize_returns=True, - discount_factor=0.99 - ), - ) - - # Optimizer helper - A weird regularization settings I've copied from OpenAI code - adam_optimizer = AdamFactory( - lr=[1.0e-4, 1.0e-3, 1.0e-3], - weight_decay=[0.0, 0.0, 0.001], - eps=1.0e-4, - layer_groups=True - ).instantiate(model) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[StdoutStreaming()] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 20 batches per epoch to average metrics nicely - num_epochs = int(1.0e6 / 2 / 1000) - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=1000, - optimizer=adam_optimizer - ) - - reinforcer.train_epoch(epoch_info) - - training_info.on_train_end() - - -if __name__ == '__main__': - half_cheetah_ddpg() diff --git a/vel/api/info.py b/vel/api/info.py index 4e5957d4..11544cff 100644 --- a/vel/api/info.py +++ b/vel/api/info.py @@ -33,13 +33,12 @@ class TrainingInfo(abc.MutableMapping): Data dict is any extra information processes may want to store """ - def __init__(self, start_epoch_idx=0, run_name: typing.Optional[str] = None, metrics=None, callbacks=None): + def __init__(self, start_epoch_idx=0, metrics=None, callbacks=None): self.data_dict = {} self.start_epoch_idx = start_epoch_idx self.metrics = metrics if metrics is not None else [] self.callbacks = callbacks if callbacks is not None else [] - self.run_name = run_name self.history = TrainingHistory() self.optimizer_initial_state = None diff --git a/vel/api/model_config.py b/vel/api/model_config.py index bd100eeb..b593e878 100644 --- a/vel/api/model_config.py +++ b/vel/api/model_config.py @@ -1,5 +1,7 @@ import datetime as dtm +import json import os.path +import pathlib import typing from vel.exception import VelInitializationException @@ -16,6 +18,7 @@ class ModelConfig: """ PROJECT_FILE_NAME = '.velproject.yaml' + META_FILE_NAME = 'meta.json' @staticmethod def find_project_directory(start_path) -> str: @@ -39,7 +42,7 @@ def from_project_directory(path) -> str: @classmethod def from_file(cls, filename: str, run_number: int = 1, continue_training: bool = False, seed: int = None, - device: str = 'cuda', params=None): + device: str = 'cuda', parameters: typing.Optional[dict] = None, tag: typing.Optional[str] = None): """ Create model config from file """ with open(filename, 'r') as fp: model_config_contents = Parser.parse(fp) @@ -62,12 +65,14 @@ def from_file(cls, filename: str, run_number: int = 1, continue_training: bool = continue_training=continue_training, seed=seed, device=device, - parameters=params + parameters=parameters, + tag=tag ) @classmethod def script(cls, model_name: str = 'script', configuration: typing.Optional[dict] = None, run_number: int = 1, - continue_training=False, seed: int = None, device: str = 'cuda', params=None): + continue_training=False, seed: int = None, device: str = 'cuda', + parameters: typing.Optional[dict] = None, tag: typing.Optional[str] = None): """ Create model config from supplied data """ if configuration is None: configuration = {} @@ -92,11 +97,13 @@ def script(cls, model_name: str = 'script', configuration: typing.Optional[dict] continue_training=continue_training, seed=seed, device=device, - parameters=params + parameters=parameters, + tag=tag ) def __init__(self, filename: str, configuration: dict, run_number: int, project_dir: str, - continue_training=False, seed: int = None, device: str = 'cuda', parameters=None): + continue_training=False, seed: int = None, device: str = 'cuda', + parameters: typing.Optional[dict] = None, tag: typing.Optional[str] = None): self.filename = filename self.device = device self.continue_training = continue_training @@ -121,13 +128,48 @@ def __init__(self, filename: str, configuration: dict, run_number: int, project_ self._model_name = self.provider.get("name") + if continue_training: + self._meta = self._load_meta() + + if tag is None: + self._tag = self._meta['tag'] + else: + if self._tag != self._meta['tag']: + raise VelInitializationException("Model tag mismatch") + else: + self._tag = tag + self._meta = self._create_meta() + self._write_meta() + + #################################################################################################################### + # INTERNAL FUNCTIONS def _prepare_environment(self) -> dict: """ Return full environment for dependency injection """ return {**self.contents, 'run_number': self.run_number} - def render_configuration(self) -> dict: - """ Return a nice and picklable run configuration """ - return self.provider.render_configuration() + def _load_meta(self) -> dict: + """ Load previously written metadata about the project """ + if not os.path.exists(self.meta_dir(self.META_FILE_NAME)): + raise VelInitializationException("Previous run does not exist") + + with open(self.meta_dir(self.META_FILE_NAME), 'rt') as fp: + return json.load(fp) + + def _write_meta(self) -> None: + """ Write metadata to a file """ + pathlib.Path(self.meta_dir()).mkdir(parents=True, exist_ok=True) + + with open(self.meta_dir(self.META_FILE_NAME), 'wt') as fp: + return json.dump(self.meta, fp) + + def _create_meta(self) -> dict: + """ Metadata for this model/config """ + return { + 'run_name': self.run_name, + 'tag': self.tag, + 'created': dtm.datetime.now().strftime("%Y/%m/%d - %H:%M:%S"), + 'config': self.render_configuration() + } #################################################################################################################### # COMMAND UTILITIES @@ -142,29 +184,29 @@ def run_command(self, command_name, varargs): #################################################################################################################### # MODEL DIRECTORIES - def checkpoint_dir(self, *args) -> str: - """ Return checkpoint directory for this model """ - return self.output_dir('checkpoints', self.run_name, *args) + def project_top_dir(self, *args) -> str: + """ Project top-level directory """ + return os.path.join(self.project_dir, *args) - def data_dir(self, *args) -> str: - """ Return data directory for given dataset """ - return self.project_data_dir(*args) + def output_dir(self, *args) -> str: + """ Directory where to store output """ + return os.path.join(self.project_dir, self.output_directory_name, *args) - def openai_dir(self) -> str: + def meta_dir(self, *args) -> str: """ Return directory for openai output files for this model """ - return self.output_dir('openai', self.run_name) + return self.output_dir('meta', self.run_name, *args) - def project_data_dir(self, *args) -> str: + def data_dir(self, *args) -> str: """ Directory where to store data """ return os.path.normpath(os.path.join(self.project_dir, 'data', *args)) - def output_dir(self, *args) -> str: - """ Directory where to store output """ - return os.path.join(self.project_dir, self.output_directory_name, *args) + def checkpoint_dir(self, *args) -> str: + """ Return checkpoint directory for this model """ + return self.output_dir('checkpoints', self.run_name, *args) - def project_top_dir(self, *args) -> str: - """ Project top-level directory """ - return os.path.join(self.project_dir, *args) + def openai_dir(self, *args) -> str: + """ Return directory for openai output files for this model """ + return self.output_dir('openai', self.run_name, *args) #################################################################################################################### # NAME UTILITIES @@ -178,6 +220,16 @@ def name(self) -> str: """ Return name of the model """ return self._model_name + @property + def meta(self) -> dict: + """ Return name of the model """ + return self._meta + + @property + def tag(self) -> typing.Optional[str]: + """ Tag for this model/run number """ + return self._tag + #################################################################################################################### # MISC GETTERS def torch_device(self): @@ -185,6 +237,10 @@ def torch_device(self): import torch return torch.device(self.device) + def render_configuration(self) -> dict: + """ Return a nice and picklable run configuration """ + return self.provider.render_configuration() + #################################################################################################################### # PROVIDER API def provide(self, name): @@ -204,7 +260,16 @@ def banner(self, command_name) -> None: print("=" * 80) print(f"Pytorch version: {torch.__version__} cuda version {torch.version.cuda} cudnn version {torch.backends.cudnn.version()}") # noqa - print("Running model {}, run {} -- command {} -- device {}".format(self._model_name, self.run_number, command_name, self.device)) # noqa + + if self.tag: + print("Running model {}, run {} ({}) -- command {} -- device {}".format( + self._model_name, self.run_number, self.tag, command_name, self.device) + ) + else: + print("Running model {}, run {} -- command {} -- device {}".format( + self._model_name, self.run_number, command_name, self.device) + ) + if device.type == 'cuda': device_idx = 0 if device.index is None else device.index print(f"CUDA Device name {torch.cuda.get_device_name(device_idx)}") @@ -237,7 +302,6 @@ def load_trained_model(self): training_info = TrainingInfo( start_epoch_idx=last_epoch_idx, - run_name=self.run_name, ) model_state, hidden_state = storage.load(training_info) diff --git a/vel/command/phase_train_command.py b/vel/command/phase_train_command.py index 18566ebf..2318d7bf 100644 --- a/vel/command/phase_train_command.py +++ b/vel/command/phase_train_command.py @@ -129,7 +129,6 @@ def resume_training(self, learner, callbacks, metrics) -> (api.TrainingInfo, dic training_info = api.TrainingInfo( start_epoch_idx=start_epoch, - run_name=self.model_config.run_name, metrics=metrics, callbacks=callbacks ) diff --git a/vel/command/rnn/generate_text.py b/vel/command/rnn/generate_text.py index 7dda3b53..99b56647 100644 --- a/vel/command/rnn/generate_text.py +++ b/vel/command/rnn/generate_text.py @@ -28,8 +28,7 @@ def run(self): start_epoch = self.storage.last_epoch_idx() training_info = TrainingInfo( - start_epoch_idx=start_epoch, - run_name=self.model_config.run_name, + start_epoch_idx=start_epoch ) model_state, hidden_state = self.storage.load(training_info) diff --git a/vel/command/train_command.py b/vel/command/train_command.py index 9d1a1965..5d3ca3ce 100644 --- a/vel/command/train_command.py +++ b/vel/command/train_command.py @@ -87,7 +87,6 @@ def resume_training(self, learner, callbacks, metrics) -> api.TrainingInfo: training_info = api.TrainingInfo( start_epoch_idx=start_epoch, - run_name=self.model_config.run_name, metrics=metrics, callbacks=callbacks ) diff --git a/vel/data/source/nlp/text_url.py b/vel/data/source/nlp/text_url.py index 5f3d61eb..fea44f95 100644 --- a/vel/data/source/nlp/text_url.py +++ b/vel/data/source/nlp/text_url.py @@ -80,7 +80,7 @@ def download(self) -> dict: def create(model_config, url, local_dir, train_val_split=0.8): """ Vel factory function """ if not os.path.isabs(local_dir): - local_dir = model_config.project_data_dir(local_dir) + local_dir = model_config.data_dir(local_dir) return TextUrlSource( url, diff --git a/vel/internal/provider.py b/vel/internal/provider.py index d694ad24..e1060f2d 100644 --- a/vel/internal/provider.py +++ b/vel/internal/provider.py @@ -1,5 +1,6 @@ import importlib import inspect +import typing from vel.internal.parser import Variable from vel.internal.generic_factory import GenericFactory @@ -7,7 +8,7 @@ class Provider: """ Dependency injection resolver for the configuration file """ - def __init__(self, environment, instances=None, parameters=None): + def __init__(self, environment: dict, instances: typing.Optional[dict] = None, parameters: typing.Optional[dict] = None): self.environment = environment self.parameters = parameters if parameters is not None else {} diff --git a/vel/launcher.py b/vel/launcher.py index 7dfa94eb..18a4f687 100644 --- a/vel/launcher.py +++ b/vel/launcher.py @@ -17,6 +17,7 @@ def main(): parser.add_argument('-r', '--run_number', type=int, default=0, help="A run number") parser.add_argument('-d', '--device', default='cuda', help="A device to run the model on") parser.add_argument('-s', '--seed', type=int, default=None, help="Random seed for the project") + parser.add_argument('-t', '--tag', type=str, default=None, help="String tag for a given run") parser.add_argument('--werr', action='store_true', default=False, help="Convert warnings to errors") parser.add_argument( '-p', '--param', type=str, metavar='NAME=VALUE', action='append', default=[], @@ -38,7 +39,7 @@ def main(): model_config = ModelConfig.from_file( args.config, args.run_number, continue_training=getattr(args, 'continue'), device=args.device, seed=args.seed, - params={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)} + parameters={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)} ) if model_config.project_dir not in sys.path: diff --git a/vel/notebook/loader.py b/vel/notebook/loader.py index d28c048d..ca0db31b 100644 --- a/vel/notebook/loader.py +++ b/vel/notebook/loader.py @@ -1,19 +1,21 @@ from vel.api import ModelConfig -def load_config(config_path, run_number=0, device='cuda:0'): +def load_config(config_path, run_number=0, device='cuda:0', continue_training=True): """ Load a ModelConfig from filename """ return ModelConfig.from_file( ModelConfig.from_project_directory(config_path), run_number=run_number, - device=device + device=device, + continue_training=continue_training ) -def script(model_name: str = 'script', run_number=0, device='cuda:0'): +def script(model_name: str = 'script', run_number=0, device='cuda:0', continue_training=True): """ Create an ad-hoc script model config """ return ModelConfig.script( model_name=model_name, run_number=run_number, - device=device + device=device, + continue_training=continue_training ) diff --git a/vel/rl/command/enjoy.py b/vel/rl/command/enjoy.py index a0f056bc..14da7fa2 100644 --- a/vel/rl/command/enjoy.py +++ b/vel/rl/command/enjoy.py @@ -29,8 +29,7 @@ def run(self): model = self.model_factory.instantiate(action_space=env.action_space).to(device) training_info = TrainingInfo( - start_epoch_idx=self.storage.last_epoch_idx(), - run_name=self.model_config.run_name + start_epoch_idx=self.storage.last_epoch_idx() ) self.storage.load(training_info, model) diff --git a/vel/rl/command/evaluate_env_command.py b/vel/rl/command/evaluate_env_command.py index 33f7f4dc..3bf0eec7 100644 --- a/vel/rl/command/evaluate_env_command.py +++ b/vel/rl/command/evaluate_env_command.py @@ -38,7 +38,7 @@ def run(self): action_noise = None training_info = TrainingInfo( - start_epoch_idx=self.storage.last_epoch_idx(), run_name=self.model_config.run_name + start_epoch_idx=self.storage.last_epoch_idx() ) model_state, hidden_state = self.storage.load(training_info) diff --git a/vel/rl/command/record_movie_command.py b/vel/rl/command/record_movie_command.py index 6b6f3c4c..a7a14d78 100644 --- a/vel/rl/command/record_movie_command.py +++ b/vel/rl/command/record_movie_command.py @@ -31,8 +31,7 @@ def run(self): model = self.model_factory.instantiate(action_space=env.action_space).to(device) training_info = TrainingInfo( - start_epoch_idx=self.storage.last_epoch_idx(), - run_name=self.model_config.run_name + start_epoch_idx=self.storage.last_epoch_idx() ) model_state, hidden_state = self.storage.load(training_info) diff --git a/vel/storage/streaming/stdout.py b/vel/storage/streaming/stdout.py index d83e8f9d..7ef02893 100644 --- a/vel/storage/streaming/stdout.py +++ b/vel/storage/streaming/stdout.py @@ -1,13 +1,17 @@ -from vel.api import EpochInfo, Callback +from vel.api import EpochInfo, Callback, ModelConfig class StdoutStreaming(Callback): """ Stream results to stdout """ + def __init__(self, model_config: ModelConfig): + self.model_config = model_config + def on_epoch_end(self, epoch_info: EpochInfo): - if epoch_info.training_info.run_name: - print(f"=>>>>>>>>>> EPOCH {epoch_info.global_epoch_idx} [{epoch_info.training_info.run_name}]") + if self.model_config.tag: + tag = self.model_config.tag + print(f"=>>>>>>>>>> EPOCH {epoch_info.global_epoch_idx} [{self.model_config.run_name} - {tag}]") else: - print(f"=>>>>>>>>>> EPOCH {epoch_info.global_epoch_idx}") + print(f"=>>>>>>>>>> EPOCH {epoch_info.global_epoch_idx} [{self.model_config.run_name}]") if any(x.dataset is None for x in epoch_info.result.keys()): self._print_metrics_line(epoch_info.result, dataset=None) @@ -37,6 +41,6 @@ def _print_metrics_line(metrics, dataset=None): print('{0: <10}'.format(dataset.capitalize()), " ".join(metrics_list)) -def create(): +def create(model_config): """ Vel factory function """ - return StdoutStreaming() + return StdoutStreaming(model_config) From 118454f4786986bd8808d3db7252130cff8326c1 Mon Sep 17 00:00:00 2001 From: Federico Galatolo Date: Thu, 26 Sep 2019 00:00:32 +0200 Subject: [PATCH 094/162] Configurable Evaluator cache (#52) * Added configurable cache to the Evaluator * Added tests for configurable cache --- vel/rl/api/evaluator.py | 34 ++++++++++++++++++------- vel/rl/test/test_evaluator_cache.py | 39 +++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 9 deletions(-) create mode 100644 vel/rl/test/test_evaluator_cache.py diff --git a/vel/rl/api/evaluator.py b/vel/rl/api/evaluator.py index c8a98307..e0c15d1c 100644 --- a/vel/rl/api/evaluator.py +++ b/vel/rl/api/evaluator.py @@ -2,14 +2,19 @@ class EvaluatorMeta(type): """ Metaclass for Evaluator - gathers all provider methods in a class attribute """ def __new__(mcs, name, bases, attributes): providers = {} + use_cache = {} for name, attr in attributes.items(): if callable(attr): proper_name = getattr(attr, '_vel_evaluator_provides', None) - if proper_name is not None: providers[proper_name] = attr + + cache = getattr(attr, '_vel_use_cache', None) + if cache is not None: + use_cache[proper_name] = cache + attributes['_use_cache'] = use_cache attributes['_providers'] = providers return super().__new__(mcs, name, bases, attributes) @@ -88,10 +93,12 @@ class Evaluator(metaclass=EvaluatorMeta): """ @staticmethod - def provides(name): + def provides(name, cache=True): """ Function decorator - value provided by the evaluator """ def decorator(func): func._vel_evaluator_provides = name + func._vel_use_cache = cache + return func return decorator @@ -112,7 +119,7 @@ def is_provided(self, name): else: return False - def get(self, name): + def get(self, name, cache=True): """ Return a value from this evaluator. @@ -120,18 +127,27 @@ def get(self, name): with and without no_grad() context. It is advised in such cases to not use no_grad and stick to .detach() + + If you want to disable the cache you can pass 'cache=False' to the decorator to disable it + for the attribute or to the get() function to disable it just for that call """ - if name in self._storage: - return self._storage[name] + if name in self._use_cache and not self._use_cache[name]: + cache = False + + if name in self._storage and cache: + value = self._storage[name] elif name in self._providers: - value = self._storage[name] = self._providers[name](self) - return value + value = self._providers[name](self) elif name.startswith('rollout:'): rollout_name = name[8:] - value = self._storage[name] = self.rollout.batch_tensor(rollout_name) - return value + value = self.rollout.batch_tensor(rollout_name) else: raise RuntimeError(f"Key {name} is not provided by this evaluator") + + if cache: + self._storage[name] = value + + return value def provide(self, name, value): """ Provide given value under specified name """ diff --git a/vel/rl/test/test_evaluator_cache.py b/vel/rl/test/test_evaluator_cache.py new file mode 100644 index 00000000..1f0b3724 --- /dev/null +++ b/vel/rl/test/test_evaluator_cache.py @@ -0,0 +1,39 @@ +from vel.rl.api import Evaluator, Rollout + +calls = { + "a": 0, + "b": 0, + "c": 0, +} + +class TestEvaluator(Evaluator): + @Evaluator.provides('test:a') + def test_a(self): + calls["a"] += 1 + + @Evaluator.provides('test:b', cache=False) + def test_b(self): + calls["b"] += 1 + + @Evaluator.provides('test:c') + def test_c(self): + calls["c"] += 1 + + +def test_evaluator(): + e = TestEvaluator(Rollout()) + e.get("test:a") + e.get("test:a") + e.get("test:a") + + e.get("test:b") + e.get("test:b") + e.get("test:b") + + e.get("test:c") + e.get("test:c") + e.get("test:c", cache=False) + + assert calls["a"] == 1 # test:a is cached so just one call + assert calls["b"] == 3 # test:b is never cached so three calls + assert calls["c"] == 2 # test:c is cached but one call is not so two calls \ No newline at end of file From 09f14f9886e0728d93778190dcafcd35f024a6ee Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 26 Sep 2019 14:19:24 -0700 Subject: [PATCH 095/162] Added a global list command for the models. --- .velproject.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.velproject.yaml b/.velproject.yaml index 2b6bbabd..8127a339 100644 --- a/.velproject.yaml +++ b/.velproject.yaml @@ -24,3 +24,9 @@ visdom_settings: server: 'http://localhost' port: 8097 + +# List of commands that are shared among all models in this project +global_commands: + list: + name: vel.command.list_command + From 521912c7a80b5f39b173741cf0801c1172ebfa7d Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 26 Sep 2019 14:19:43 -0700 Subject: [PATCH 096/162] Canonical MIST-VAE notebook. --- .../autoencoders/mnist/mnist-vae.ipynb | 340 ++++++++++++++---- 1 file changed, 276 insertions(+), 64 deletions(-) diff --git a/examples-notebooks/autoencoders/mnist/mnist-vae.ipynb b/examples-notebooks/autoencoders/mnist/mnist-vae.ipynb index 4a00f5da..652fd859 100644 --- a/examples-notebooks/autoencoders/mnist/mnist-vae.ipynb +++ b/examples-notebooks/autoencoders/mnist/mnist-vae.ipynb @@ -8,6 +8,7 @@ "source": [ "import os\n", "import torch\n", + "import tqdm\n", "import numpy as np\n", "import matplotlib.pyplot as plt" ] @@ -31,24 +32,16 @@ "metadata": {}, "outputs": [], "source": [ - "config = nb.load_config('examples-configs/autoencoders/mnist/mnist_cnn_vae.yaml', run_number=2, device='cpu')" + "config = nb.load_config('examples-configs/latent/mnist/mnist_fc_vae.yaml', run_number=1, device='cuda:0')" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:root:Setting up a new session...\n" - ] - } - ], + "outputs": [], "source": [ - "model = config.load_trained_model()" + "model = config.load_trained_model().to(config.device)" ] }, { @@ -60,30 +53,27 @@ "name": "stdout", "output_type": "stream", "text": [ - "MnistCnnVAE(\n", + "FcVae(\n", " (encoder): Sequential(\n", - " (0): Conv2d(1, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", - " (1): ReLU(inplace)\n", - " (2): Conv2d(8, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", - " (3): ReLU(inplace)\n", - " (4): Conv2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", - " (5): Flatten()\n", - " (6): Linear(in_features=784, out_features=32, bias=True)\n", + " (0): Flatten()\n", + " (1): Linear(in_features=784, out_features=200, bias=True)\n", + " (2): Tanh()\n", + " (3): Linear(in_features=200, out_features=200, bias=True)\n", + " (4): Tanh()\n", + " (5): Linear(in_features=200, out_features=100, bias=True)\n", " )\n", " (decoder): Sequential(\n", - " (0): Linear(in_features=16, out_features=784, bias=True)\n", - " (1): ReLU(inplace)\n", - " (2): Reshape()\n", - " (3): ConvTranspose2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", - " (4): ReLU(inplace)\n", - " (5): ConvTranspose2d(16, 8, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", - " (6): ReLU(inplace)\n", - " (7): ConvTranspose2d(8, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", - " (8): Sigmoid()\n", + " (0): Linear(in_features=50, out_features=200, bias=True)\n", + " (1): Tanh()\n", + " (2): Linear(in_features=200, out_features=200, bias=True)\n", + " (3): Tanh()\n", + " (4): Linear(in_features=200, out_features=784, bias=True)\n", + " (5): Reshape(sizes=(1, 28, 28), batch_dims=1)\n", + " (6): Sigmoid()\n", " )\n", ")\n", "----------------------------------------------------------------------------------------------------\n", - "Number of model parameters: 45,569\n", + "Number of model parameters: 425,284\n", "----------------------------------------------------------------------------------------------------\n" ] } @@ -98,8 +88,10 @@ "metadata": {}, "outputs": [], "source": [ - "data_source = config.provide('source')\n", - "train_dataset = data_source.train_dataset" + "data_loader = config.provide('loader')\n", + "data_source = data_loader.transformed_source\n", + "train_dataset = data_source.train\n", + "validation_dataset = data_source.validation" ] }, { @@ -109,7 +101,7 @@ "outputs": [], "source": [ "def get_sample(idx):\n", - " return train_dataset[idx][0]" + " return train_dataset[idx]['x'].to(config.device)" ] }, { @@ -119,7 +111,7 @@ "outputs": [], "source": [ "def show_image(axis, sample):\n", - " axis.imshow(train_dataset.denormalize(sample)[:, :, 0], cmap='gray')" + " axis.imshow(train_dataset.denormalize_item(sample, 'x'), cmap='gray')" ] }, { @@ -129,7 +121,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAACzCAYAAAD48u9xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAHTJJREFUeJzt3XuQVdX55vHnRYOXOKiIQUqjmARNaQrbiEocSkhA4xgTUROVUhFjiRVvJKUUxhCHjMEQRafEaOIlgBdGtIJENOOoI15iFAokJj9FETXRH9jBK3LRyChr/uCkgr6r6d3nutbu76eKovth77PXPv3QfVbv3qsthCAAAAAAQDp6tHoAAAAAAIBPYqIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAImpaaJmZkeZ2TIze8nMLq7XoIBGobPIEb1FbugsckNnkSILIVS3o9lWkl6UdISkFZIWSRoVQli6hX2qOxhQEUKwavels2iFWjordb23dBZ18FYIYddqd6azaIGmdrayD71FTYq8Pqjlitohkl4KIbwSQtggabakY2t4PKDR6CxyRG/RbK/WuD+dRbPRWZRSLRO13SX952bvr6hkQKroLHJEb5EbOovc0FkkaetGH8DMxkoa2+jjAPVCZ5EbOovc0FnkiN6i2WqZqK2U9PnN3t+jkn1CCOFGSTdK/DwvWo7OIked9pbOIjF0Frnh9QGSVMuPPi6SNMDM9jaznpJOljSvPsMCGoLOIkf0Frmhs8gNnUWSqr6iFkL4yMzOk/SApK0kTQ8hPFe3kQF1RmeRI3qL3NBZ5IbOIlVVL89f1cG4TIwa1brUeVfRWdSKziJDT4cQBjXrYHQWddDUzkr0FrVr9PL8AAAAAIAGYKIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJ2brVAwBQPgcddJDLzjvvPJeNHj3aZbfeeqvLrr322uhxlixZUsXoAAAA0scVNQAAAABIDBM1AAAAAEgMEzUAAAAASExN96iZ2d8lrZX0saSPQgiD6jEooJHoLXJDZ5EbOovc0FmkyEII1e+8qdSDQghvFdy++oNlbKuttnLZjjvuWNNjxhZm2H777V227777uuzcc8+NPubUqVNdNmrUKJf985//dNmUKVNc9rOf/Sx6nFqEEKzWx+hKb7trZ4tqa2uL5vPnz3dZr169qj7Oe++9F8132WWXqh+zWegsNjd8+HCXzZo1y2VDhw512bJlyxoypoina32RSmfTNnHiRJd19DW7Rw//w1fDhg1z2WOPPVbzuGrQ1M5Wtqe3qEmR1wf86CMAAAAAJKbWiVqQ9KCZPW1mY+sxIKAJ6C1yQ2eRGzqL3NBZJKfW36M2JISw0sw+J+khM3shhPD45htUyk7hkZIt9pbOIkF0Frmhs8gNr2mRnJquqIUQVlb+fkPSXEmHRLa5MYQwiJsykYrOektnkRo6i9zQWeSG17RIUdVX1Mzss5J6hBDWVt4+UtL/qNvIWmTPPfeM5j179nTZYYcd5rIhQ4a4bKeddnLZCSecUMXoum7FihUumzZtWnTb4447zmVr16512V/+8heXtfgm4sLK2ttmOOQQ9zVLc+bMiW4bWywntnBRrF8bNmxwWUeLhgwePNhlS5YsKfSYuUihs4cffrjLYh+TuXPnNmM4WTv44INdtmjRohaMpHFS6Cz+bcyYMS6bMGGCyzZu3Fj4MWtZiC5FdBapquVHH/tKmmtm/3qc/xVC+D91GRXQOPQWuaGzyA2dRW7oLJJU9UQthPCKpAPqOBag4egtckNnkRs6i9zQWaSK5fkBAAAAIDFM1AAAAAAgMbUuz5+1trY2l82fPz+6bWxxhNTEbgSeOHGiy9atWxfdf9asWS5rb2932bvvvuuyZcuWFRkiErT99tu77Ktf/arLbr/9dpf169evpmMvX77cZVdccYXLZs+eHd3/T3/6k8tinf/FL35RxejwL8OGDXPZgAEDXMZiIv/Wo0f8+6B77723y/baay+XVe6VAWoW69e2227bgpGgDA499FCXnXrqqS4bOnRodP/999+/0HEuuugil73++usuiy3iJ8VfsyxcuLDQsVPCFTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASEy3Xkzktddec9nbb78d3bYZi4l0dJPj6tWrXfb1r3/dZRs2bHDZbbfdVvvAUGo33HCDy0aNGtWUY8cWLdlhhx1c9thjj0X3jy1yMXDgwJrHhU8aPXq0y5566qkWjCQfHS20c9ZZZ7ksdtP7Cy+8UPcxofxGjBjhsvPPP7/Qvh117phjjnHZqlWrujYwZOmkk05y2TXXXOOyPn36uKyjBZEeffRRl+26664uu/LKKwuMsOPjxB7z5JNPLvSYKeGKGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSmW6/6+M4777hs/Pjx0W1jqx79+c9/dtm0adMKHfuZZ55x2RFHHBHddv369S7bf//9XTZu3LhCx0b3ddBBB7nsW9/6lss6WkXp0zpajfHee+912dSpU132+uuvuyz2/+rdd9+NHucb3/iGy4qOHcX16MH39Lrq5ptvLrzt8uXLGzgSlNWQIUNcNmPGDJcVXbW6o1X2Xn311a4NDMnbemv/8n/QoEEuu+mmm1y2/fbbu+zxxx932WWXXRY99hNPPOGybbbZxmV33XWXy4488sjoY8YsXry48LYp46svAAAAACSGiRoAAAAAJIaJGgAAAAAkptOJmplNN7M3zOzZzbLeZvaQmS2v/L1zY4cJdA29RW7oLHJDZ5EbOovcWAhhyxuYHS5pnaRbQwhfqWRXSHonhDDFzC6WtHMIYUKnBzPb8sES1qtXL5etXbvWZTfccIPLzjzzTJedeuqpLrvjjjuqHF33EUIotFJEvXqbc2fb2tpcNn/+fJfFuh1z//33u2zUqFHRbYcOHeqygQMHuiy24MKbb75ZaDyS9PHHH7vs/fffLzSeJUuWFD5OLXLrbOzj9NRTT7ns7rvvdtlpp51Wy6FL5cknn4zmgwcPdtlhhx3msgULFtR9TF3wdAjBryzwKal0truKLfTw/e9/v9C+jz76qMuGDx9e65BaqamdreyXbW/HjBnjsqILID300EMuO+mkk1y2Zs2awuOJvSaeOXNmoX1XrlwZzWOLo3Tl9UUzFHl90OkVtRDC45I+vTzisZJuqbx9i6SRXR4d0ED0Frmhs8gNnUVu6CxyU+09an1DCO2Vt/8hqW+dxgM0Er1FbugsckNnkRs6i2TV/HvUQghhS5d/zWyspLG1Hgeopy31ls4iRXQWuaGzyA2vaZGaaq+orTKzfpJU+fuNjjYMIdwYQhhU5GeHgQYr1Fs6i4TQWeSGziI3vKZFsqq9ojZP0umSplT+vqduI0pU0Zsi33vvvULbnXXWWS678847o9tu3Lix0GOiU6Xs7T777BPNx48f77Idd9zRZW+99ZbL2tvbXXbLLbe4bN26ddFj/+EPfyiUNcJ2223nsgsvvNBlp5xySjOGU6umd/boo492Wew5xb/17et/UmrvvfcuvH9HN8NnqpSfZ1upT58+0Ty2cEjs9cLq1atd9vOf/7z2gZVHaTt72WWXRfNLLrnEZbHFBa+//nqXTZw40WVdWTgk5ic/+UnV+15wwQXRPLWFQ6pVZHn+OyQ9JWlfM1thZmdqU5mPMLPlkkZU3geSQW+RGzqL3NBZ5IbOIjedXlELIcTX35ayXscV5UZvkRs6i9zQWeSGziI31d6jBgAAAABoECZqAAAAAJCYmpfnxydNmjTJZQcddJDLhg4d6rIRI0ZEH/PBBx+seVwoh2222cZlU6dOjW4bWxRi7dq1Lhs9erTLFi9e7LKcF5TYc889Wz2EbOy7776FtnvuuecaPJJ8xP4PxhYYkaQXX3zRZbH/l+ie+vfv77I5c+bU9JjXXnutyx555JGaHhPpufTSS10WWzREkjZs2OCyBx54wGUTJkxw2QcffFBoPNtuu200P/LII10W+xptZi6LLYJzzz2lWfsliitqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYFhOps/Xr17vsrLPOctmSJUtcdtNNN0UfM3bTb2yxh+uuu85lsd80j3wdeOCBLostGtKRY4891mWPPfZYTWNC97Ro0aJWD6GuevXq5bKjjjrKZaeeeqrLYjfHd+Syyy5z2erVqwvvj3KLdW7gwIGF93/44Yddds0119Q0JqRnp512ctk555zjso5eA8YWDhk5cmTV4/nSl77kslmzZkW3jS2wF/O73/3OZVdccUXXBlYCXFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEMFEDAAAAgMSwmEgTvPzyyy4bM2aMy2bMmBHd/7TTTiuUffazn3XZrbfe6rL29vbocZC+q6++2mVmFt02tkhI2RYO6dHDf69p48aNLRhJ99O7d++6P+YBBxzgsli/R4wY4bI99tjDZT179nTZKaecEj12rEsffPCByxYuXOiyDz/80GVbbx3/8vr0009Hc3Q/scUbpkyZUnj/J554wmWnn366y957772uDQzJi31u69OnT+H9L7jgApd97nOfc9kZZ5zhsu985zsu+8pXvuKyHXbYIXrs2AInsez22293WWzBvrLjihoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJKbTiZqZTTezN8zs2c2ySWa20syeqfw5urHDBIqjs8gRvUVu6CxyQ2eRmyKrPs6U9CtJn14+8H+GEKbWfUTdxNy5c122fPny6Laxlf6GDx/usssvv9xle+21l8smT54cPc7KlSujeYZmqgSdPeaYY1zW1tbmsthqSZI0b968uo8pNbEVHmPPxzPPPNOM4dRqphLobWylw9hz+pvf/MZll1xySU3HHjhwoMtiqz5+9NFHLnv//fddtnTpUpdNnz49euzFixe7LLZK6qpVq1y2YsUKl2233XbR47zwwgvRPFMzlUBnc9C/f3+XzZkzp6bHfOWVV1wW6yc+YaZK0NkNGza47M0333TZrrvuGt3/b3/7m8s6ei1RxOuvv+6yNWvWRLft16+fy9566y2X3XvvvVWPp0w6vaIWQnhc0jtNGAtQF3QWOaK3yA2dRW7oLHJTyz1q55nZXyuXkXfuaCMzG2tmi83Mf7sSaC46ixx12ls6i8TQWeSG1wdIUrUTtV9L+qKkNkntkq7qaMMQwo0hhEEhhEFVHguoBzqLHBXqLZ1FQugscsPrAySrqolaCGFVCOHjEMJGSTdJOqS+wwLqi84iR/QWuaGzyA2dRcqKLCbimFm/EEJ75d3jJD27pe1RzLPPxp/GE0880WXf/va3XTZjxgyXnX322S4bMGBA9DhHHHFEZ0PMVo6djS1G0LNnT5e98cYb0f3vvPPOuo+pGbbZZhuXTZo0qfD+8+fPd9mPf/zjWobUMq3o7TnnnOOyV1991WWHHXZY3Y/92muvuez3v/+9y55//nmXLViwoO7jiRk7dqzLYjfsxxZ66A5y/FzbDBMmTHBZbDGkrpgyZUpN+2OTHDu7evVql40cOdJl9913X3T/3r17u+zll1922T333OOymTNnuuydd/xtf7Nnz44eO7aYSEfbosBEzczukDRMUh8zWyHpv0saZmZtkoKkv0vyswGgRegsckRvkRs6i9zQWeSm04laCGFUJP5tA8YC1AWdRY7oLXJDZ5EbOovc1LLqIwAAAACgAZioAQAAAEBiqlpMBM0Vu2n0tttuc9nNN9/ssq239h/iww8/PHqcYcOGuezRRx/tfIBoqQ8//DCat7e3R/OUxBYOmThxosvGjx8f3X/FihUuu+oqv7LyunXrqhgd/uWXv/xlq4eQjOHDhxfabs6cOQ0eCVLV1tbmsiOPPLLqx4st6CBJy5Ytq/oxUT4LFy50WWyho0aIva4cOnRodNvYIjrddfGlIriiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYTGRhAwcODCaf/e733XZwQcf7LLYwiExS5cujeaPP/54of2Rlnnz5rV6CIXEbrCPLRJy0kknuayjm+lPOOGE2gcGNMDcuXNbPQS0yIMPPuiynXfeudC+CxYscNmYMWNqHRLQUNttt53LYouGSFIIwWWzZ8+u+5jKgitqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYFhNpgn333ddl5513nsuOP/746P677bZb1cf++OOPXdbe3h7dtqMbP9EaZlYoGzlyZHT/cePG1X1MRf3oRz9y2U9/+lOX7bjjji6bNWuWy0aPHl2fgQFAg+2yyy4uK/r19frrr3fZunXrah4T0EgPPPBAq4dQWlxRAwAAAIDEMFEDAAAAgMQwUQMAAACAxHQ6UTOzz5vZI2a21MyeM7Nxlby3mT1kZssrfxf7bY5Ag9FZ5IbOIkf0Frmhs8hNkStqH0m6MISwn6TBks41s/0kXSzp4RDCAEkPV94HUkBnkRs6ixzRW+SGziIrna76GEJol9ReeXutmT0vaXdJx0oaVtnsFkmPSprQkFEmKrYa46hRo1wWW+Gxf//+dR/P4sWLXTZ58mSXzZs3r+7HTklZOhtCKJR1tCrotGnTXDZ9+nSXvf322y4bPHiwy0477TSXHXDAAdFj77HHHi577bXXXBZbKSq26lnZlaWz3VVsNdZ99tknuu2CBQsaPZymobfSjBkzXNajR/V3lTz55JO1DAedoLON8c1vfrPVQyitLn02MbP+kg6UtFBS30rhJekfkvrWdWRAHdBZ5IbOIkf0Frmhs8hB4d+jZmY7SJoj6YchhDWbfwcxhBDMzH+rf9N+YyWNrXWgQFfRWeSGziJH1fSWzqKV+FyLXBS6omZmn9GmQs8KIdxdiVeZWb/Kv/eT9EZs3xDCjSGEQSGEQfUYMFAEnUVu6CxyVG1v6Sxahc+1yEmRVR9N0m8lPR9CuHqzf5on6fTK26dLuqf+wwO6js4iN3QWOaK3yA2dRW6K/Ojjf5V0mqT/MLNnKtklkqZIusvMzpT0qqQTGzPE5urbN/5jyfvtt5/LfvWrX7nsy1/+ct3HtHDhQpddeeWVLrvnHv95ZePGjXUfTwa6VWe32mqraH7OOee47IQTTnDZmjVrXDZgwICaxhS7If6RRx5x2aWXXlrTcUqkW3W2bGKL/NSyoERGuk1v29raovmIESNcFvu6u2HDBpddd911Llu1alUVo0MXdJvONtMXvvCFVg+htIqs+viEJL+k1SbD6zscoHZ0Frmhs8gRvUVu6Cxy0y2+5QcAAAAAOWGiBgAAAACJYaIGAAAAAIkp/HvUcte7d2+X3XDDDS7r6Ibhet8oGVts4aqrropu+8ADD7jsgw8+qOt4kJ6nnnrKZYsWLXLZwQcfXPgxd9ttN5d1tIDOp7399tsumz17dnTbcePGFR4TUEZf+9rXovnMmTObOxDUxU477RTNY59TY1auXOmyiy66qKYxAan44x//6LKOFlTqpovcVY0ragAAAACQGCZqAAAAAJAYJmoAAAAAkBgmagAAAACQmOwXEzn00ENdNn78eJcdcsghLtt9993rPp7333/fZdOmTXPZ5Zdf7rL169fXfTzI14oVK1x2/PHHu+zss8+O7j9x4sSqj33NNde47Ne//rXLXnrppaqPAZSFWUe/PxcAyu/ZZ5912fLly6Pbxhbn++IXv+iyN998s/aBlQBX1AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMdkvJnLccccVyopaunRpNL/vvvtc9tFHH7nsqquuctnq1aurHg+wufb2dpdNmjQpum1HOYDq3X///S773ve+14KRoJleeOGFaP7kk0+6bMiQIY0eDpC82KJ5knTzzTe7bPLkyS47//zzXdbRa/Qy44oaAAAAACSGiRoAAAAAJIaJGgAAAAAkptOJmpl93sweMbOlZvacmY2r5JPMbKWZPVP5c3Tjhwt0js4iN3QWuaGzyBG9RW4shLDlDcz6SeoXQlhiZv9F0tOSRko6UdK6EMLUwgcz2/LBgE6EEKyzbegsUkJnkaGnQwiDtrQBnUViOu2sRG+bqVevXtH8rrvuctmIESNcdvfdd7vsjDPOcNn69eurGF0airw+6HTVxxBCu6T2yttrzex5SbvXPjygMegsckNnkRs6ixzRW+SmS/eomVl/SQdKWliJzjOzv5rZdDPbuc5jA2pGZ5EbOovc0FnkiN4iB4Unama2g6Q5kn4YQlgj6deSviipTZu+O+F/gdim/caa2WIzW1yH8QKF0Vnkhs4iN3QWOaK3yEWhiZqZfUabCj0rhHC3JIUQVoUQPg4hbJR0k6RDYvuGEG4MIQwq8rPDQL3QWeSGziI3dBY5orfISZFVH03SbyU9H0K4erO832abHSfp2foPD+g6Oovc0Fnkhs4iR/QWuSmy6uMQSX+U9B+SNlbiSySN0qZLxEHS3yWdXblJc0uPxQo5qEnBFfToLJJBZ5GhIqs+0lmkpOiqj/S2xWKrQU6ePNllP/jBD1w2cOBAly1durQ+A2uBeq36+ISk2AP972oGBTQanUVu6CxyQ2eRI3qL3HRp1UcAAAAAQOMxUQMAAACAxDBRAwAAAIDEdLqYSF0Pxo2XqFGRGy/ric6iVnQWGSq0MEO90FnUQVM7K9Fb1K7I6wOuqAEAAABAYpioAQAAAEBimKgBAAAAQGKYqAEAAABAYjr9hdd19pakVytv96m8XwZlOhcp3fPZqwXHpLN5SPV86Gz9lOlcpLTPp9m9LWtnpXKdT8rn0srPtSk/L9Uo0/mkfC6FOtvUVR8/cWCzxc1eoadRynQuUvnOp17K9LyU6Vyk8p1PvZTpeSnTuUjlO596KdvzUqbzKdO51FPZnpcynU8ZzoUffQQAAACAxDBRAwAAAIDEtHKidmMLj11vZToXqXznUy9lel7KdC5S+c6nXsr0vJTpXKTynU+9lO15KdP5lOlc6qlsz0uZzif7c2nZPWoAAAAAgDh+9BEAAAAAEtP0iZqZHWVmy8zsJTO7uNnHr5WZTTezN8zs2c2y3mb2kJktr/y9cyvHWJSZfd7MHjGzpWb2nJmNq+RZnk+j0Nl00Nli6Gw66GxxOfe2TJ2V6G1ROXdWKldvy9rZpk7UzGwrSddJ+m+S9pM0ysz2a+YY6mCmpKM+lV0s6eEQwgBJD1fez8FHki4MIewnabCkcysfj1zPp+7obHLobCfobHLobAEl6O1MlaezEr3tVAk6K5Wrt6XsbLOvqB0i6aUQwishhA2SZks6tsljqEkI4XFJ73wqPlbSLZW3b5E0sqmDqlIIoT2EsKTy9lpJz0vaXZmeT4PQ2YTQ2ULobELobGFZ97ZMnZXobUFZd1YqV2/L2tlmT9R2l/Sfm72/opLlrm8Iob3y9j8k9W3lYKphZv0lHShpoUpwPnVEZxNFZztEZxNFZ7eojL0txceY3naojJ2VSvAxLlNnWUykzsKmZTSzWkrTzHaQNEfSD0MIazb/txzPB12T48eYznZvOX6M6Wz3luvHmN52bzl+jMvW2WZP1FZK+vxm7+9RyXK3ysz6SVLl7zdaPJ7CzOwz2lToWSGEuytxtufTAHQ2MXS2U3Q2MXS2kDL2NuuPMb3tVBk7K2X8MS5jZ5s9UVskaYCZ7W1mPSWdLGlek8fQCPMknV55+3RJ97RwLIWZmUn6raTnQwhXb/ZPWZ5Pg9DZhNDZQuhsQuhsYWXsbbYfY3pbSBk7K2X6MS5tZ0MITf0j6WhJL0p6WdJPmn38Ooz/Dkntkv6fNv088pmSdtGmlWSWS/q/knq3epwFz2WINl0C/qukZyp/js71fBr4PNHZRP7Q2cLPE51N5A+d7dJzlW1vy9TZyvnQ22LPU7adrYy/NL0ta2etcnIAAAAAgESwmAgAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAk5v8DVMTDbjI6QLUAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2cAAACxCAYAAABAxMXKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAS3UlEQVR4nO3dT6htV30H8O+vRifVQVLb8IixkZJJcKAQxEEGdtCSZpI4ER0FWngOFBScBDuo0Km1o1JIMbwMrCJoa5BSmwYhjsRERPOnmlQMJjwTQgbGkY2uDt4RX5J739n37n3OWWufzwcO79zzzp+19vmefffvrr3WqdZaAAAAOKw/OHQDAAAAUJwBAAB0QXEGAADQAcUZAABABxRnAAAAHVCcAQAAdGBWcVZVd1bVj6vq2aq6b6lGwa7ILCOSW0Yjs4xGZulFnfd7zqrqLUl+kuQvkjyf5HtJPtZae+oaj/GlaszSWqvzPlZmOYQ5mU3OnluZZQEvt9b++LwPllkOYK+Z3TxGbpnltOODOSNnH0jybGvtp621Xyf5SpK7Zzwf7JrMMiK5Zd+em/l4mWXfZJbVmFOc3ZTk51f9/PzmNuiVzDIiuWU0MstoZJZuXLfrF6iqi0ku7vp1YCkyy2hkltHILCOSW/ZhTnH2QpKbr/r5XZvbXqe1dn+S+xPn53JwMsuItuZWZumMzDIaxwd0Y85pjd9LcmtVvaeq3pbko0keWqZZsBMyy4jkltHILKORWbpx7pGz1tprVfXJJN9K8pYkD7TWnlysZbAwmWVEcstoZJbRyCw9OfdS+ud6MUPAzDR3WfKzklnmklkG9Hhr7fZ9vZjMsoC9ZjaRW+bbxVL6AAAALERxBgAA0AHFGQAAQAcUZwAAAB1QnAEAAHRAcQYAANABxRkAAEAHFGcAAAAdUJwBAAB0QHEGAADQAcUZAABABxRnAAAAHVCcAQAAdOC6QzcAOA6ttUn3q6rJjz3pvgAAozJyBgAA0AHFGQAAQAcUZwAAAB2YNeesqn6W5NUkv0nyWmvt9iUaBbskt4xGZhmNzDIamaUXSywI8uettZcXeB5mOmnRhLMsmDB1wYapOl+sQW53aE6Wls7hisjsCsxZGGdAMtupufvZleTzJDLLwTmtEQAAoANzi7OW5L+q6vGqurhEg2AP5JbRyCyjkVlGI7N0Ye5pjXe01l6oqj9J8nBV/U9r7dGr77AJuJDTk2vmVmbpkMwyGpllNI5p6UItNb+jqj6X5Fettc9f4z4mk+zQMcw5a60t+qTbciuz57OveWMjzHuQWa42yJyzx5dcDEFm+7PCOWd7zezmPnLLLKcdH5z7tMaq+sOqesfvrif5yyRPnPf5etFaO/Fylvse6nJIVfWmS4/WmttDmZvFUXJzSDK7LseQeZllNDJLT+ac1nhjkn/b/GK5Lsm/ttb+c5FWwe7ILaORWUYjs4xGZunGYqc1TnqxAYaAT9seJ/1189CjVVPs67TGff31d+lTxLYZIbOHtItTY8468tY7mWWbuaek78Cip4htI7P757TG+eSWuRY/rREAAIDlKM4AAAA6MHcp/aMxwimMU809dbPD0xnYg118Bqbm61qL8kx5PMuz7c9nTb9LGIPMsWu7yNjU3ydnOaYdhZEzAACADijOAAAAOqA4AwAA6IDiDAAAoAMWBBnA0t+xdpZJkiNPqGSaqVka5bv+LFSxvB7fZ2CaY/g+SfZnX99vesy/d4ycAQAAdEBxBgAA0AHFGQAAQAcUZwAAAB1QnAEAAHTAao1vcNqqRPtYNcYqivTskCsnneVz6bMBHKt9reTMcZizmvNZjLIa9L4YOQMAAOiA4gwAAKADijMAAIAObC3OquqBqnqpqp646rYbqurhqnpm8+/1u20mnI3cMhqZZTQyy2hklhFMGTm7lOTON9x2X5JHWmu3Jnlk8/OqVdW5L1Ofj0VdypHntrU26TJHj5+DffR7Ry7lyDO7JgPlbo5LkdmDmZOvIz4GuRSZPdER7K+GsbU4a609muSVN9x8d5IHN9cfTHLPwu2CWeSW0cgso5FZRiOzjOC8c85ubK1d3lz/RZIbF2oP7JLcMhqZZTQyy2hklq7M/p6z1lqrqlPHPqvqYpKLc18HlnSt3MosPZJZRiOzjMYxLT0478jZi1V1IUk2/7502h1ba/e31m5vrd1+zteCpUzKrczSEZllNDLLaBzT0pXzFmcPJbl3c/3eJN9YpjnjM6Gya0eV2zkTvucs8rGL9hyxbjPr/VzOyrZlt5k9VivL1y6sNrNTF8g69GIyS7dn9IzXtg1QVV9O8qEk70zyYpK/S/LvSb6a5N1JnkvykdbaGydYnvRcq69UpgZq9OAcSmtt0oZbKrdry+za8jnnjx/76uMaMnvSdh4lI4dy1oOLzjw+ZWSg58weg7Xtz2faa2Y3z9V9bncxQLCLPI3wu3wXTjs+2FqcLWmEIM9lZ7lbUw90l7K2zK4tnyPs0NeQWcXZ2R1DcbaUte1n92Vt+/OZ9prZZIzcKs76dtrxwXlPawQAAGBBijMAAIAOzF5Kn9ebOrzqNCGWMve0hZEze9LrW4RnHtvv7Gwzdkm+mGIXxwKydxhGzgAAADqgOAMAAOiA4gwAAKADijMAAIAOWBBkD6ZOqDztfodedAGSPnPoe37YpVG+IwjgLKbuh+bsr/a1WNkaGTkDAADogOIMAACgA4ozAACADijOAAAAOmBBkD2YOqnxtMmTcyZVHvOESt7sWPNw0mfoWLfFUg65Tae+9i4W9DjJIV+bcVlwhhHZt+2ekTMAAIAOKM4AAAA6oDgDAADogOIMAACgA1uLs6p6oKpeqqonrrrtc1X1QlX9YHO5a7fNhOlklhHJLaORWUYjs4xgysjZpSR3nnD7P7bW3re5/MeyzTpOVXXiZY7W2psuR+BSVprZs7yfS2eJnbuUDnK7j33Q3MvU15nb76mXI93PJp1kFs7gUmR28f0vy9panLXWHk3yyh7aAouQWUYkt4xGZhmNzDKCOXPOPllVP9wMEV9/2p2q6mJVPVZVj814LViCzDKirbmVWTojs4zG8QHdqClDlFV1S5Jvttbeu/n5xiQvJ2lJ/j7JhdbaX094HuOh57D0MPLIp7e11iY1fq2ZPUsWRn6fpxrhC9qnZjZZJre7yOzU7TzylzHPycMKP5ePt9Zun3LHXjM7Al9Cvai9ZnbzuK5yO8q+dqpjyPJpxwfnGjlrrb3YWvtNa+23Sf4lyQfmNA52TWYZkdwyGpllNDJLb647z4Oq6kJr7fLmxw8neeJa92eepf8afdpj1/xXCpk9bqNme7TcnrRv2cVo2qjv5zEYLbMj8zlYxloy29u+dm0jefu0tTirqi8n+VCSd1bV80n+LsmHqup9uTIE/LMkH99hG+FMZJYRyS2jkVlGI7OMYNKcs8VerLPzc0d2rOeqn2X+zhJ6y+wK57bMMmcu1L6sIbNLz+1b28jZCj+Xk+fvLKG3/ey+rO1zcGB7zWwyRm5HHjk7hnwvOucMAACAZSnOAAAAOnCuBUE4vJGXrGYa7+fvrfC0saEsvU2P4T06hj4ynf05hzDKfmiUdu6LkTMAAIAOKM4AAAA6oDgDAADogOIMAACgAxYEGdTS3ztEf4510ReLf7AGp+VYZtfP72dgDiNnAAAAHVCcAQAAdEBxBgAA0AHFGQAAQAcsCHIgx7CwA8fLhHjWamq25RiA8zByBgAA0AHFGQAAQAcUZwAAAB3YWpxV1c1V9e2qeqqqnqyqT21uv6GqHq6qZzb/Xr/75sJ2MstoZJYRyS2jkVlGMGXk7LUkn2mt3Zbkg0k+UVW3JbkvySOttVuTPLL5GXogs4xGZhmR3DIamaV7W4uz1trl1tr3N9dfTfJ0kpuS3J3kwc3dHkxyz64aObrW2psu7M6aM1tVb7qcZmru5uTzpMee9vipbT/Gz8uaM7s2Z/kMrp3cHuf+amQyuxtzPwc+R693pjlnVXVLkvcn+W6SG1trlzf/9YskNy7aMliAzDIamWVEcstoZJZeTf6es6p6e5KvJfl0a+2XV/+1sLXWqurEMreqLia5OLehcFYyy2hklhGdJ7cyyyHZ19KzSSNnVfXWXAnxl1prX9/c/GJVXdj8/4UkL5302Nba/a2121trty/RYJhCZhmNzDKi8+ZWZjkU+1p6N2W1xkryxSRPt9a+cNV/PZTk3s31e5N8Y/nmwdnJLKORWUYkt4xGZhlBbZt0V1V3JPlOkh8l+e3m5s/myjm6X03y7iTPJflIa+2VLc+1qhl+h5yweKyT0FtrWzu+5szuInMnZWnu60x9zmPI8bFndm2mfjYGz/bjU0YGlsrtyJmds68cPCO92WtmN881bG6XtotjhmNw2vHB1uJsSWsLsuJs/6Yc6C6pt8wqzsZz7JldG8XZ8kbOrOKsG3vNbDJ2bpemODuf044PzrRaIwAAALuhOAMAAOiA4gwAAKADk7/nbI16mzN2WnuO9Vxc9mNfcybkmGNifw7AeRg5AwAA6IDiDAAAoAOKMwAAgA4ozgAAADqwygVBDrnQxxwmirPNLr4wes5rAyfzeVmXUY8rYB/Osr/zWdrOyBkAAEAHFGcAAAAdUJwBAAB0QHEGAADQgVUuCDLH3EmNJoGzbzIH+zN1UZ7TJr37vI7JggdwNj4H52fkDAAAoAOKMwAAgA4ozgAAADqwtTirqpur6ttV9VRVPVlVn9rc/rmqeqGqfrC53LX75sJ2MstoZJbRyCwjkltGUNsm7FXVhSQXWmvfr6p3JHk8yT1JPpLkV621z09+sSqzA5mltbZ1VrbM0hOZZUCPt9Zuv9YdZJbObM1sIrc9mLNQyNoWVDrt+GDrao2ttctJLm+uv1pVTye5adnmwXJkltHILKORWUYkt4zgTHPOquqWJO9P8t3NTZ+sqh9W1QNVdf3CbYPZZJbRyCyjkVlGJLf0anJxVlVvT/K1JJ9urf0yyT8n+bMk78uVv0L8wymPu1hVj1XVYwu0FyaTWUYjs4xGZhmR3NKzrXPOkqSq3prkm0m+1Vr7wgn/f0uSb7bW3rvleZyfyyxT5u8kMks/ZJYBTZ2/I7P0YlJmE7k9NHPOfu+044MpqzVWki8mefrqEG8mVf7Oh5M8MbeRsASZZTQyy2hklhHJLSOYslrjHUm+k+RHSX67ufmzST6WK8O/LcnPknx8M9HyWs/lrwzMMnHlO5mlGzLLgKas1iiz9GTqaK/cdmjqaNqxjJxNOq1xKYLMXFNPEVuKzDKXzDKgyaeILUFmWcBeM5vI7ZIUZ693ptUaAQAA2A3FGQAAQAcUZwAAAB247tANAAAAjtPa5pLNZeQMAACgA4ozAACADijOAAAAOqA4AwAA6MC+FwR5Oclzm+vv3Py8BmvqS9Jvf/70AK8ps2PotT8yu5w19SXpuz/7zu1aM5usqz899+WQ+9qet8t5rKk/Pffl1MzW1G/lXlpVPbbvb3PflTX1JVlff5aypu2ypr4k6+vPUta0XdbUl2R9/VnK2rbLmvqzpr4saW3bZU39GbUvTmsEAADogOIMAACgA4cszu4/4GsvbU19SdbXn6WsabusqS/J+vqzlDVtlzX1JVlff5aytu2ypv6sqS9LWtt2WVN/huzLweacAQAA8HtOawQAAOjA3ouzqrqzqn5cVc9W1X37fv25quqBqnqpqp646rYbqurhqnpm8+/1h2zjVFV1c1V9u6qeqqonq+pTm9uH7M+uyGw/ZHYame2HzE43cm7XlNlEbqcaObPJunK7pszutTirqrck+ackf5XktiQfq6rb9tmGBVxKcucbbrsvySOttVuTPLL5eQSvJflMa+22JB9M8onN+zFqfxYns92R2S1ktjsyO8EKcnsp68lsIrdbrSCzybpyu5rM7nvk7ANJnm2t/bS19uskX0ly957bMEtr7dEkr7zh5ruTPLi5/mCSe/baqHNqrV1urX1/c/3VJE8nuSmD9mdHZLYjMjuJzHZEZicbOrdrymwitxMNndlkXbldU2b3XZzdlOTnV/38/Oa20d3YWru8uf6LJDcesjHnUVW3JHl/ku9mBf1ZkMx2SmZPJbOdktlrWmNuV/Eey+2p1pjZZAXv8eiZtSDIwtqV5S+HWgKzqt6e5GtJPt1a++XV/zdifzibEd9jmT1uI77HMnvcRn2P5fa4jfgeryGz+y7OXkhy81U/v2tz2+herKoLSbL596UDt2eyqnprroT4S621r29uHrY/OyCznZHZrWS2MzI7yRpzO/R7LLdbrTGzycDv8Voyu+/i7HtJbq2q91TV25J8NMlDe27DLjyU5N7N9XuTfOOAbZmsqirJF5M83Vr7wlX/NWR/dkRmOyKzk8hsR2R2sjXmdtj3WG4nWWNmk0Hf41VltrW210uSu5L8JMn/Jvnbfb/+Au3/cpLLSf4vV84v/pskf5QrK8A8k+S/k9xw6HZO7MsduTK8+8MkP9hc7hq1PzvcTjLbyUVmJ28nme3kIrNn2lbD5nZNmd30R26nbadhM7tp/2pyu6bM1qZDAAAAHJAFQQAAADqgOAMAAOiA4gwAAKADijMAAIAOKM4AAAA6oDgDAADogOIMAACgA4ozAACADvw/hU+ueKcDcG8AAAAASUVORK5CYII=\n", "text/plain": [ "
" ] @@ -145,7 +137,7 @@ "fig, axes = plt.subplots(1, 5)\n", "\n", "for index in range(5):\n", - " show_image(axes[index], get_sample(index))" + " show_image(axes[index], get_sample(index).cpu())" ] }, { @@ -154,19 +146,24 @@ "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor([[ 0.7010, 0.7096, 0.2029, -0.8527, -0.1471, 0.1670, -0.0375, 1.2047,\n", - " -1.9497, -0.1735, 2.7477, 0.9634, -1.8239, -1.0749, 0.8230, 0.0965]])" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([[ 0.5881, -1.6104, 0.4107, 1.0962, 1.7997, 1.0594, 0.0516, 1.5038,\n", + " 2.1698, 0.0210, -0.3525, 0.4182, 1.5991, 0.0826, 1.5369, 0.0597,\n", + " 2.2284, 0.1273, -1.6312, -0.1385, 1.0510, 0.0295, -0.2376, -0.0673,\n", + " 0.5040, 0.7474, -0.1831, 0.7763, 0.1418, 1.6395, -2.4175, 0.7683,\n", + " 0.1352, -2.3757, -0.4010, -2.0714, 0.0345, -0.5530, 1.6734, -1.6953,\n", + " 2.1401, 1.4186, 0.7702, 0.0874, -0.8720, -1.3320, -1.0852, -0.1516,\n", + " -1.0706, 1.0755]], device='cuda:0')\n", + "torch.Size([1, 50])\n" + ] } ], "source": [ - " model.encode(get_sample(0)[None])" + "x = model.encode(get_sample(0)[None])\n", + "print(x)\n", + "print(x.shape)" ] }, { @@ -176,7 +173,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAAGgCAYAAADbx5TwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xm41WW5//H7BmWWGZGjBDinhqiAZlziSeQYWWqeTE6ilonnlEOlpql1LLXMqZMN5gwiaV6iSXXUzAlNIZAoEUSEBIHNLDPI9Pz+YPU7xH0v9nevaT/Pd79f18UFfFhrfZ+112evvR7W/t5bQwgCAAAAAIhHs8ZeAAAAAADgn7FRAwAAAIDIsFEDAAAAgMiwUQMAAACAyLBRAwAAAIDIsFEDAAAAgMiwUQMAAACAyLBRAwAAAIDIlLVRU9VTVHWWqr6rqldXalFAtdBZpIjeIjV0Fqmhs4iRhhBKu6JqcxF5R0ROFpEFIjJZRIaHEGbs5jqlHQwoCCFoqdels2gM5XRWpOG9pbOogOUhhG6lXpnOohHUtLOF69BblCXL64Ny3lEbKCLvhhDmhhA2i8ijInJaGbcHVBudRYroLWptXpnXp7OoNTqLXCpno7aviLy/098XFDIgVnQWKaK3SA2dRWroLKK0R7UPoKojRWRktY8DVAqdRWroLFJDZ5EieotaK2ejtlBEeu709/0K2T8JIdwjIveI8P28aHR0Fimqt7d0FpGhs0gNrw8QpXK+9XGyiBykqn1UtYWInC0i4yuzLKAq6CxSRG+RGjqL1NBZRKnkd9RCCFtV9WIReVZEmovIAyGEtyq2MqDC6CxSRG+RGjqL1NBZxKrk8fwlHYy3iVGmckedNxSdRbnoLBL0Rgihf60ORmdRATXtrAi9RfmqPZ4fAAAAAFAFbNQAAAAAIDJs1AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMmzUAAAAACAyezT2AgDkzzHHHGOyiy++2GTnnnuuyR566CGT/fSnP3WPM3Xq1BJWBwAAED/eUQMAAACAyLBRAwAAAIDIsFEDAAAAgMiUdY6aqr4nImtFZJuIbA0h9K/EooBqordIDZ1FaugsUkNnESMNIZR+5R2l7h9CWJ7x8qUfLGHNmzc3WYcOHcq6TW8wQ5s2bUx2yCGHmOxrX/uae5u33XabyYYPH26yTZs2mezmm2822fe+9z33OOUIIWi5t9GQ3jbVzmbVr18/N3/hhRdM1r59+5KPs3r1ajfv0qVLybdZK3QWOzvppJNMNnbsWJMNHjzYZLNmzarKmhxvlPsilc7G7brrrjNZsa/ZzZrZb7468cQTTfbyyy+Xva4y1LSzhcvTW5Qly+sDvvURAAAAACJT7kYtiMgfVPUNVR1ZiQUBNUBvkRo6i9TQWaSGziI65f4ctUEhhIWqureIPKeqb4cQJux8gULZKTxistve0llEiM4iNXQWqeE1LaJT1jtqIYSFhd+XisiTIjLQucw9IYT+nJSJWNTXWzqL2NBZpIbOIjW8pkWMSn5HTVXbikizEMLawp+Hisj3K7ayRvKRj3zEzVu0aGGy448/3mSDBg0yWceOHU125plnlrC6hluwYIHJ7rzzTveyZ5xxhsnWrl1rsr/+9a8ma+STiDPLa29rYeBA8zVLxo0b517WG5bjDS7y+rV582aTFRsactxxx5ls6tSpmW4zFTF09oQTTjCZ95g8+eSTtVhO0gYMGGCyyZMnN8JKqieGzuL/nH/++Sa76qqrTLZ9+/bMt1nOILoY0VnEqpxvfewuIk+q6j9u51chhGcqsiqgeugtUkNnkRo6i9TQWUSp5I1aCGGuiBxZwbUAVUdvkRo6i9TQWaSGziJWjOcHAAAAgMiwUQMAAACAyJQ7nj9p/fr1M9kLL7zgXtYbjhAb70Tg6667zmTr1q1zrz927FiT1dXVmeyDDz4w2axZs7IsERFq06aNyY4++miTPfzwwybr0aNHWceePXu2yW655RaTPfroo+71//SnP5nM6/wPf/jDElaHfzjxxBNNdtBBB5mMYSL/p1kz//9B+/TpY7JevXqZrHCuDFA2r1+tWrVqhJUgD4499liTnXPOOSYbPHiwe/3DDz8803GuuOIKky1atMhk3hA/Ef81y6RJkzIdOya8owYAAAAAkWGjBgAAAACRYaMGAAAAAJFhowYAAAAAkWnSw0Tmz59vshUrVriXrcUwkWInOa5atcpk//qv/2qyzZs3m2zMmDHlLwy5dvfdd5ts+PDhNTm2N7SkXbt2Jnv55Zfd63tDLvr27Vv2uvDPzj33XJO9/vrrjbCSdBQbtHPhhReazDvp/e233674mpB/Q4YMMdkll1yS6brFOnfqqaeabMmSJQ1bGJL0hS98wWQ/+clPTNa1a1eTFRuI9NJLL5msW7duJrv11lszrLD4cbzbPPvsszPdZkx4Rw0AAAAAIsNGDQAAAAAiw0YNAAAAACLDRg0AAAAAIsNGDQAAAAAi06SnPq5cudJkV155pXtZb+rRX/7yF5PdeeedmY49bdo0k5188snuZdevX2+yww8/3GSXXXZZpmOj6TrmmGNM9ulPf9pkxaYo7arYNMbf/va3JrvttttMtmjRIpN5n1cffPCBe5xPfvKTJsu6dmTXrBn/p9dQ9913X+bLzp49u4orQV4NGjTIZA8++KDJsk6tLjZlb968eQ1bGKK3xx725X///v1Ndu+995qsTZs2JpswYYLJbrjhBvfYr776qslatmxpsscee8xkQ4cOdW/TM2XKlMyXjRlffQEAAAAgMmzUAAAAACAybNQAAAAAIDL1btRU9QFVXaqq03fKOqvqc6o6u/B7p+ouE2gYeovU0Fmkhs4iNXQWqdEQwu4voHqCiKwTkYdCCEcUsltEZGUI4WZVvVpEOoUQrqr3YKq7P1jE2rdvb7K1a9ea7O677zbZBRdcYLJzzjnHZI888kiJq2s6QgiZJkVUqrcpd7Zfv34me+GFF0zmddvz9NNPm2z48OHuZQcPHmyyvn37mswbuLBs2bJM6xER2bZtm8k2bNiQaT1Tp07NfJxypNZZ73F6/fXXTfbEE0+YbMSIEeUcOldee+01Nz/uuONMdvzxx5ts4sSJFV9TA7wRQrCTBXYRS2ebKm/Qw5e//OVM133ppZdMdtJJJ5W7pMZU084Wrpdsb88//3yTZR2A9Nxzz5nsC1/4gsnWrFmTeT3ea+JRo0Zluu7ChQvd3BuO0pDXF7WQ5fVBve+ohRAmiMiu4xFPE5HRhT+PFpHTG7w6oIroLVJDZ5EaOovU0FmkptTx/N1DCHWFPy8Wke7FLqiqI0VkZInHASopU2/pLCJCZ5EaOovU8JoW0Sr756iFEMLu3v4NIdwjIveIpP02MfJld72ls4gRnUVq6CxSw2taxKbUqY9LVLWHiEjh96WVWxJQNfQWqaGzSA2dRWroLKJV6jtq40XkPBG5ufD7UxVbUaSynhS5evXqTJe78MILTfbrX//avez27dsz3SbqlcveHnzwwW5+5ZVXmqxDhw4mW758ucnq6upMNnr0aJOtW7fOPfbvf//7TFk1tG7d2mSXX365yb74xS/WYjnlqnlnhw0bZjLvY4r/0727/U6pPn36ZL5+sZPhE5XL59nG1LVrVzf3Bod4rxdWrVplshtvvLH8heVHbjt7ww03uPk111xjMm+44C9+8QuTXXfddSZryOAQz7XXXlvydS+99FI3j21wSKmyjOd/REReF5FDVHWBql4gO8p8sqrOFpEhhb8D0aC3SA2dRWroLFJDZ5Gaet9RCyH487dFkp7jinyjt0gNnUVq6CxSQ2eRmlLPUQMAAAAAVAkbNQAAAACITNnj+fHPrr/+epMdc8wxJhs8eLDJhgwZ4t7mH/7wh7LXhXxo2bKlyW677Tb3st5QiLVr15rs3HPPNdmUKVNMlvJAiY985CONvYRkHHLIIZku99Zbb1V5JenwPge9ASMiIu+8847JvM9LNE29e/c22bhx48q6zZ/+9Kcme/HFF8u6TcTnu9/9rsm8oSEiIps3bzbZs88+a7KrrrrKZBs3bsy0nlatWrn50KFDTeZ9jVZVk3lDcJ56KjezX1y8owYAAAAAkWGjBgAAAACRYaMGAAAAAJFhowYAAAAAkWGYSIWtX7/eZBdeeKHJpk6darJ7773XvU3vpF9v2MPPf/5zk3k/aR7pOuqoo0zmDQ0p5rTTTjPZyy+/XNaa0DRNnjy5sZdQUe3btzfZKaecYrJzzjnHZN7J8cXccMMNJlu1alXm6yPfvM717ds38/Wff/55k/3kJz8pa02IT8eOHU321a9+1WTFXgN6g0NOP/30ktdz4IEHmmzs2LHuZb0Be57HH3/cZLfcckvDFpYDvKMGAAAAAJFhowYAAAAAkWGjBgAAAACRYaMGAAAAAJFhmEgNzJkzx2Tnn3++yR588EH3+iNGjMiUtW3b1mQPPfSQyerq6tzjIH533HGHyVTVvaw3JCRvg0OaNbP/17R9+/ZGWEnT07lz54rf5pFHHmkyr99Dhgwx2X777WeyFi1amOyLX/yie2yvSxs3bjTZpEmTTPbhhx+abI89/C+vb7zxhpuj6fGGN9x8882Zr//qq6+a7LzzzjPZ6tWrG7YwRM97buvatWvm61966aUm23vvvU32pS99yWSf/exnTXbEEUeYrF27du6xvQEnXvbwww+bzBvYl3e8owYAAAAAkWGjBgAAAACRYaMGAAAAAJFhowYAAAAAkal3o6aqD6jqUlWdvlN2vaouVNVphV/DqrtMIDs6ixTRW6SGziI1dBapyTL1cZSI/ExEdh0f+OMQwm0VX1ET8eSTT5ps9uzZ7mW9SX8nnXSSyX7wgx+YrFevXia76aab3OMsXLjQzRM0SnLQ2VNPPdVk/fr1M5k3LUlEZPz48RVfU2y8CY/ex2PatGm1WE65RkkEvfUmHXof01/+8pcmu+aaa8o6dt++fU3mTX3cunWryTZs2GCyGTNmmOyBBx5wjz1lyhSTeVNSlyxZYrIFCxaYrHXr1u5x3n77bTdP1CiJoLMp6N27t8nGjRtX1m3OnTvXZF4/8U9GSQ46u3nzZpMtW7bMZN26dXOv//e//91kxV5LZLFo0SKTrVmzxr1sjx49TLZ8+XKT/fa3vy15PXlS7ztqIYQJIrKyBmsBKoLOIkX0Fqmhs0gNnUVqyjlH7WJV/VvhbeROxS6kqiNVdYqq2v+uBGqLziJF9faWziIydBap4fUBolTqRu0uETlARPqJSJ2I3F7sgiGEe0II/UMI/Us8FlAJdBYpytRbOouI0FmkhtcHiFZJG7UQwpIQwrYQwnYRuVdEBlZ2WUBl0VmkiN4iNXQWqaGziFmWYSKGqvYIIdQV/nqGiEzf3eWRzfTp/ofxrLPOMtlnPvMZkz344IMmu+iii0x20EEHucc5+eST61tislLsrDeMoEWLFiZbunSpe/1f//rXFV9TLbRs2dJk119/febrv/DCCyb79re/Xc6SGk1j9ParX/2qyebNm2ey448/vuLHnj9/vsl+85vfmGzmzJkmmzhxYsXX4xk5cqTJvBP2vUEPTUGKz7W1cNVVV5nMG4bUEDfffHNZ18cOKXZ21apVJjv99NNN9rvf/c69fufOnU02Z84ckz311FMmGzVqlMlWrrSn/T366KPusb1hIsUuiwwbNVV9REROFJGuqrpARP5bRE5U1X4iEkTkPRGxuwGgkdBZpIjeIjV0Fqmhs0hNvRu1EMJwJ76/CmsBKoLOIkX0Fqmhs0gNnUVqypn6CAAAAACoAjZqAAAAABCZkoaJoLa8k0bHjBljsvvuu89ke+xhH+ITTjjBPc6JJ55ospdeeqn+BaJRffjhh25eV1fn5jHxBodcd911Jrvyyivd6y9YsMBkt99uJyuvW7euhNXhH370ox819hKicdJJJ2W63Lhx46q8EsSqX79+Jhs6dGjJt+cNdBARmTVrVsm3ifyZNGmSybxBR9Xgva4cPHiwe1lviE5THb6UBe+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkGCYSkb59+7r5v//7v5tswIABJvMGh3hmzJjh5hMmTMh0fcRl/Pjxjb2ETLwT7L0hIV/4whdMVuxk+jPPPLP8hQFV8OSTTzb2EtBI/vCHP5isU6dOma47ceJEk51//vnlLgmoqtatW5vMGxoiIhJCMNmjjz5a8TXlBe+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkGCZSA4cccojJLr74YpN97nOfc6+/zz77lHzsbdu2mayurs69bLETP9E4VDVTdvrpp7vXv+yyyyq+pqy+8Y1vmOw73/mOyTp06GCysWPHmuzcc8+tzMIAoMq6dOlisqxfX3/xi1+YbN26dWWvCaimZ599trGXkFu8owYAAAAAkWGjBgAAAACRYaMGAAAAAJGpd6Omqj1V9UVVnaGqb6nqZYW8s6o+p6qzC79n+2mOQJXRWaSGziJF9BapobNITZZ31LaKyOUhhMNE5DgR+ZqqHiYiV4vI8yGEg0Tk+cLfgRjQWaSGziJF9BapobNISr1TH0MIdSJSV/jzWlWdKSL7ishpInJi4WKjReQlEbmqKquMlDeNcfjw4SbzJjz27t274uuZMmWKyW666SaTjR8/vuLHjkleOhtCyJQVmwp65513muyBBx4w2YoVK0x23HHHmWzEiBEmO/LII91j77fffiabP3++ybxJUd7Us7zLS2ebKm8a68EHH+xeduLEidVeTs3QW5EHH3zQZM2alX5WyWuvvVbOclAPOlsd//Zv/9bYS8itBj2bqGpvETlKRCaJSPdC4UVEFotI94quDKgAOovU0FmkiN4iNXQWKcj8c9RUtZ2IjBORr4cQ1uz8P4ghhKCq9r/6d1xvpIiMLHehQEPRWaSGziJFpfSWzqIx8VyLVGR6R01V95QdhR4bQniiEC9R1R6Ff+8hIku964YQ7gkh9A8h9K/EgoEs6CxSQ2eRolJ7S2fRWHiuRUqyTH1UEblfRGaGEO7Y6Z/Gi8h5hT+fJyJPVX55QMPRWaSGziJF9BapobNITZZvffyEiIwQkTdVdVohu0ZEbhaRx1T1AhGZJyJnVWeJtdW9u/9tyYcddpjJfvazn5ns0EMPrfiaJk2aZLJbb73VZE89ZZ9Xtm/fXvH1JKBJdbZ58+Zu/tWvftVkZ555psnWrFljsoMOOqisNXknxL/44osm++53v1vWcXKkSXU2b7whP+UMlEhIk+ltv3793HzIkCEm877ubt682WQ///nPTbZkyZISVocGaDKdraX999+/sZeQW1mmPr4qInak1Q4nVXY5QPnoLFJDZ5EieovU0Fmkpkn8lx8AAAAApISNGgAAAABEho0aAAAAAEQm889RS13nzp1Ndvfdd5us2AnDlT5R0hu2cPvtt7uXffbZZ022cePGiq4H8Xn99ddNNnnyZJMNGDAg823us88+Jis2QGdXK1asMNmjjz7qXvayyy7LvCYgjz7+8Y+7+ahRo2q7EFREx44d3dx7TvUsXLjQZFdccUVZawJi8corr5is2EClJjrkrmS8owYAAAAAkWGjBgAAAACRYaMGAAAAAJFhowYAAAAAkUl+mMixxx5rsiuvvNJkAwcONNm+++5b8fVs2LDBZHfeeafJfvCDH5hs/fr1FV8P0rVgwQKTfe5znzPZRRdd5F7/uuuuK/nYP/nJT0x21113mezdd98t+RhAXqgW+/m5AJB/06dPN9ns2bPdy3rD+Q444ACTLVu2rPyF5QDvqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZJIfJnLGGWdkyrKaMWOGm//ud78z2datW012++23m2zVqlUlrwfYWV1dncmuv/5697LFcgCle/rpp032+c9/vhFWglp6++233fy1114z2aBBg6q9HCB63tA8EZH77rvPZDfddJPJLrnkEpMVe42eZ7yjBgAAAACRYaMGAAAAAJFhowYAAAAAkal3o6aqPVX1RVWdoapvqeplhfx6VV2oqtMKv4ZVf7lA/egsUkNnkRo6ixTRW6RGQwi7v4BqDxHpEUKYqqp7icgbInK6iJwlIutCCLdlPpjq7g8G1COEoPVdhs4iJnQWCXojhNB/dxegs4hMvZ0Vobe11L59ezd/7LHHTDZkyBCTPfHEEyb70pe+ZLL169eXsLo4ZHl9UO/UxxBCnYjUFf68VlVnisi+5S8PqA46i9TQWaSGziJF9BapadA5aqraW0SOEpFJhehiVf2bqj6gqp0qvDagbHQWqaGzSA2dRYroLVKQeaOmqu1EZJyIfD2EsEZE7hKRA0Skn+z43wn7A8R2XG+kqk5R1SkVWC+QGZ1FaugsUkNnkSJ6i1Rk2qip6p6yo9BjQwhPiIiEEJaEELaFELaLyL0iMtC7bgjhnhBC/yzfOwxUCp1FaugsUkNnkSJ6i5RkmfqoInK/iMwMIdyxU95jp4udISLTK788oOHoLFJDZ5EaOosU0VukJsvUx0Ei8oqIvCki2wvxNSIyXHa8RRxE5D0RuahwkububosJOShLxgl6dBbRoLNIUJapj3QWMck69ZHeNjJvGuRNN91ksv/6r/8yWd++fU02Y8aMyiysEVRq6uOrIuLd0P+Wsiig2ugsUkNnkRo6ixTRW6SmQVMfAQAAAADVx0YNAAAAACLDRg0AAAAAIlPvMJGKHowTL1GmLCdeVhKdRbnoLBKUaTBDpdBZVEBNOytCb1G+LK8PeEcNAAAAACLDRg0AAAAAIsNGDQAAAAAiw0YNAAAAACJT7w+8rrDlIjKv8Oeuhb/nQZ7ui0i896dXIxyTzqYh1vtDZysnT/dFJO77U+ve5rWzIvm6PzHfl8Z8ro3541KKPN2fmO9Lps7WdOrjPx1YdUqtJ/RUS57ui0j+7k+l5Onjkqf7IpK/+1Mpefq45Om+iOTv/lRK3j4uebo/ebovlZS3j0ue7k8e7gvf+ggAAAAAkWGjBgAAAACRacyN2j2NeOxKy9N9Ecnf/amUPH1c8nRfRPJ3fyolTx+XPN0Xkfzdn0rJ28clT/cnT/elkvL2ccnT/Un+vjTaOWoAAAAAAB/f+ggAAAAAkan5Rk1VT1HVWar6rqpeXevjl0tVH1DVpao6faess6o+p6qzC793asw1ZqWqPVX1RVWdoapvqeplhTzJ+1MtdDYedDYbOhsPOptdyr3NU2dF6G1WKXdWJF+9zWtna7pRU9XmIvJzEfmUiBwmIsNV9bBarqECRonIKbtkV4vI8yGEg0Tk+cLfU7BVRC4PIRwmIseJyNcKj0eq96fi6Gx06Gw96Gx06GwGOejtKMlPZ0Xobb1y0FmRfPU2l52t9TtqA0Xk3RDC3BDCZhF5VEROq/EayhJCmCAiK3eJTxOR0YU/jxaR02u6qBKFEOpCCFMLf14rIjNFZF9J9P5UCZ2NCJ3NhM5GhM5mlnRv89RZEXqbUdKdFclXb/Pa2Vpv1PYVkfd3+vuCQpa67iGEusKfF4tI98ZcTClUtbeIHCUikyQH96eC6Gyk6GxRdDZSdHa38tjbXDzG9LaoPHZWJAePcZ46yzCRCgs7xmgmNUpTVduJyDgR+XoIYc3O/5bi/UHDpPgY09mmLcXHmM42bak+xvS2aUvxMc5bZ2u9UVsoIj13+vt+hSx1S1S1h4hI4feljbyezFR1T9lR6LEhhCcKcbL3pwrobGTobL3obGTobCZ57G3SjzG9rVceOyuS8GOcx87WeqM2WUQOUtU+qtpCRM4WkfE1XkM1jBeR8wp/Pk9EnmrEtWSmqioi94vIzBDCHTv9U5L3p0robETobCZ0NiJ0NrM89jbZx5jeZpLHzook+hjntrMhhJr+EpFhIvKOiMwRkWtrffwKrP8REakTkS2y4/uRLxCRLrJjksxsEfmjiHRu7HVmvC+DZMdbwH8TkWmFX8NSvT9V/DjR2Uh+0dnMHyc6G8kvOtugj1Wyvc1TZwv3h95m+zgl29nC+nPT27x2Vgt3DgAAAAAQCYaJAAAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGTYqAEAAABAZMraqKnqKao6S1XfVdWrK7UooFroLFJEb5EaOovU0FnESEMIpV1RtbmIvCMiJ4vIAhGZLCLDQwgzdnOd0g4GFIQQtNTr0lk0hnI6K9Lw3tJZVMDyEEK3Uq9MZ9EIatrZwnXoLcqS5fVBOe+oDRSRd0MIc0MIm0XkURE5rYzbA6qNziJF9Ba1Nq/M69NZ1BqdRS6ssoaKAAAgAElEQVSVs1HbV0Te3+nvCwoZECs6ixTRW6SGziI1dBZR2qPaB1DVkSIystrHASqFziI1dBapobNIEb1FrZWzUVsoIj13+vt+heyfhBDuEZF7RPh+XjQ6OosU1dtbOovI0FmkhtcHiFI53/o4WUQOUtU+qtpCRM4WkfGVWRZQFXQWKaK3SA2dRWroLKJU8jtqIYStqnqxiDwrIs1F5IEQwlsVWxlQYXQWKaK3SA2dRWroLGJV8nj+kg7G28QoU7mjzhuKzqJcdBYJeiOE0L9WB6OzqICadlaE3qJ81R7PDwAAAACoAjZqAAAAABAZNmoAAAAAEBk2agAAAAAQGTZqAAAAABAZNmoAAAAAEBk2agAAAAAQGTZqAAAAABCZPRp7AQDyp3nz5pkyVfuzHrdu3Wqy7du3u8cJgZ83CgAA8ol31AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMgwTaYKaNfP3523atMl02S1btphs8+bNJvMGQDD8IV3e4I/OnTu7l/3EJz5hstNOO81kBx54oMmmTZtmsrFjx7rH+etf/2qyDz/80L0sADRF3nN3x44dTfaf//mf7vUPPfRQk/34xz822d/+9jeTFRsEBSAb3lEDAAAAgMiwUQMAAACAyLBRAwAAAIDIlHWOmqq+JyJrRWSbiGwNIfSvxKKAaqK3SA2dRWroLFJDZxGjSgwT+dcQwvIK3E4uNG/e3GQtWrQwWcuWLU1W7KTbtm3bmswb/LH//vub7JhjjjHZJz/5Sfc4AwYMMNnq1atNNnHiRJN5JxZ7gx42bdrkHrsR0Nvd8E4+79atm8nOOuss9/rnnnuuybp3726yPfawT0F77bWXyYoNCFm5cqXJ5s2bZzJvAE6CGq2zXh8YDLR73tcCEZEOHTqYzOun91y5detWk0X+OPA8W2NZhz6df/75JvvmN7/p3qb3emPu3LkmmzFjhsm8QWORo7OICt/6CAAAAACRKXejFkTkD6r6hqqOrMSCgBqgt0gNnUVq6CxSQ2cRnXK/9XFQCGGhqu4tIs+p6tshhAk7X6BQdgqPmOy2t3QWEaKzSA2dRWp4TYvolPWOWghhYeH3pSLypIgMdC5zTwihPydlIhb19ZbOIjZ0Fqmhs0gNr2kRo5LfUVPVtiLSLISwtvDnoSLy/YqtrJF4J+KK+EMPunTpYrLDDz/cZCeccILJ+vTpY7KuXbu6x/ZOPveGkXjDHrxBJF4m4t/HZs3sXn7hwoUmW7x4scm8E98bW157W2leRy6//HKT/cd//Id7/fbt25ss63AE7/PglFNOcY/jndD+8MMPZzpO5EMY/r9ad9b7nD/44INN9sEHH5jMGz7kDYJJ5WPv8Z5727VrZ7Jiz7PXXnutybzBI2PGjDHZa6+9ZrIYB+XwPNt4vH727dvXZN4wkU6dOrm3uWTJEpO9+uqrJtu2bVuGFcaJzpbHe/249957u5ft2bOnybyBN95tzpkzx2QLFixwj7N27VqTpfi1p5xvfewuIk8WNjZ7iMivQgjPVGRVQPXQW6SGziI1dBapobOIUskbtRDCXBE5soJrAaqO3iI1dBapobNIDZ1FrBjPDwAAAACRYaMGAAAAAJEpdzx/MrwhId6Jit5J4SL+ybhf/vKXTXbEEUeYzDtJskWLFpnWI+KfoOudtL9582aTeSe0e4M/RERmz55tsj/+8Y8me+SRR0y2aNEik6V8YnFT4p18/o1vfMNk3snnxT5f1q1bZ7Lp06ebzOtI9+7dTdaxY0f3OCNGjDDZtGnTTPbnP//ZZCmeVFwL++yzj8m+8pWvmGzy5MkmmzRpksm85wbvuSpG3nOy93zuDXI68cQT3ds89dRTTeZ9HkyYMMFkr7/+unubaJq8ITStW7c2mfc5ve+++5qs2ACw3/3udyabMmWKyfianz/ea2dv6My3vvUtk5122mnubXoDw7xhY97XaO/rifc1X0TkxhtvzHT97du3u9ePBe+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkmswwkawaMkzEOyHyvffeM9k777xjMu+k3ZUrV7rHXrp0qcm8kx+9IQzeCcPeicEiIs8++2zJx0YavJPPL7zwQpNdccUVJmvVqpXJli9f7h7nT3/6k8m8QQjeUIn+/ftnykRE+vTpY7JPfepTJvNONvZOXm5KvJPERfwTwL3BGG+++abJvME0e+65p8mKDR3wnlsac+iLd+z169ebbP/99zfZxz72Mfc227dvb7INGzaYzHvuZVhD01Tsc7VZM/t/7R06dDDZgAEDTOZ9Xs6ZM8c9zp133mmyNWvWuJdFurzhSYceeqjJ7rrrLpN5z3fe0DwRkQ8//NBkGzduNJnXUe81rTfMSURk7ty5Jrv//vtN5r32jmnYGO+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBk2KgBAAAAQGSazNRHb4KLN2Gs2CQ4b3rMww8/bLK1a9eazJtcM3/+fJNt2bLFPbY3EWy//fYz2V577WWyFStWmMyb1ibirx3p8iaFeZOZfvCDH5isTZs2JvP6MWbMGPfYzzzzjMm8CZHe1Edv2t2BBx7oHqdXr14mGzx4sMl++tOfmqypT30sNpHr1FNPNZk3DcybMtulSxeTeY+7N/UrRt7XCG9irzeN8ZhjjnFv05uMuWrVKpO9//77mY6D/GvIBLoDDjjAZJ/5zGdM5nX7gQcecG9z1qxZZa0J8WndurXJPvvZz5rse9/7nsm85/nVq1ebbObMme6xFy1aZDLvNfFJJ51kskMOOcRk3mtsEX9CZNu2bU3mPf/G9FzLO2oAAAAAEBk2agAAAAAQGTZqAAAAABCZejdqqvqAqi5V1ek7ZZ1V9TlVnV34vVN1lwk0DL1FaugsUkNnkRo6i9RkGSYySkR+JiIP7ZRdLSLPhxBuVtWrC3+/qvLLqy7vZNqNGze6l50zZ47JvAEH3gma3kn73sn0xYYbeLl3gv7s2bNN5g0TaSJDFEZJTnub1T777GOyp556ymTeEBqvI+PGjTPZfffd5x57yZIlWZboDqnwhih4Jx+LiBx++OEm+5d/+ReTde3a1WTe51AjGyUV6uyug2S8E/979uzpXtc7Abt9+/Yma9euncm8ITTe81+xQQSpDijo3bu3ybweivhfd/785z+bzPs8iNAoaeLPs7XQrJn/f+rec/xNN91kMm/okjfoYfTo0e5xvAE6CRslTaizrVq1cvNzzz3XZN/+9rdN1rFjR5N5A+nuvvtuk02bNs09docOHUw2YMAAk3mDcbzrLliwwD3Oa6+9ZrKVK1eazHtOjkm976iFECaIyK737DQR+cdn9GgROb3C6wLKQm+RGjqL1NBZpIbOIjWlnqPWPYRQV/jzYhGxc5qB+NBbpIbOIjV0Fqmhs4hW2T9HLYQQVLXo96uo6kgRGVnucYBK2l1v6SxiRGeRGjqL1PCaFrEp9R21JaraQ0Sk8Lv9CbUFIYR7Qgj9Qwj9SzwWUCmZektnERE6i9TQWaSG17SIVqnvqI0XkfNE5ObC73ZCQQK8E9c3bNjgXtYbZuCdYDt48GCTffzjHzeZ99PVvZ/sLiIydepUk/397383mTccwRta0oTlore7KnaysDf84yMf+YjJvB6PGTPGZP/93/9tsqVL/a9n3sm53gnxzZs3N5k3uKfYScmf/OQnTdapkx3Y9dGPftRk77zzjskiPKm4pM7u+rH27pf3sRfxT7Zes2aNyerq6kzmPQcVe0717DoERSS+ASPeAJyBAweazBssJSKybt06kz3++OMmW79+fQmri0Iun2cbU7Hn+Msuu8xk3lCGzZs3m+zWW281mfe530TkorMtW7Y02QUXXOBe9tprrzWZNwzqT3/6k8muusrOWXn33XezLFFE/EF8Q4cONVnnzp1N5n0tmzJlinscb5iI9/Uotq8xu8oynv8REXldRA5R1QWqeoHsKPPJqjpbRIYU/g5Eg94iNXQWqaGzSA2dRWrqfUcthDC8yD+dVOG1ABVDb5EaOovU0Fmkhs4iNaWeowYAAAAAqBI2agAAAAAQmbLH8+dNsWEC3gngW7ZsMdmsWbNM9pWvfMVkZ511lsmKnTz+xz/+0WS33367yYr9dHbkx5577mmyG2+80b3sscceazKv3/fdd5/JvvWtb5nM+xxoyEm427Zty3Q57zjTp093L7tx40aTtWvXzmSHHnqoycaPH59pPalRVTMoxHvcvY+diH/yuDf8yLtNb2iB97iXe/K2N3Qk6+WKXdcbEuJlBx98sMmGDRtmMu9zVcQfwvKXv/zFZBEOtkENeJ0bMmSIe9lzzjnHZN7QpokTJ5rMG2BD59LhPb/0728HUX7zm990r+8NqJk0aZLJrrnmGpN5A7+85/RiA5VOPvlkkx199NEm8z4XvEF6P/zhD93jeK+JYx8c4uEdNQAAAACIDBs1AAAAAIgMGzUAAAAAiAwbNQAAAACIDMNEMsp6Mr53Urj309GPPPJIk3Xs2NE99gknnGCy+fPnm2zp0qUm805cT/FkyqbIG3rgDQi5+OKLM1/fG0xzxRVXmKzYYJtK8wZNeAMpvL6LiGzatMlknTp1MlmxwQ55FEKQrVu3mmxXS5Ysca+/evVqk/Xq1ctkQ4cONdkHH3xgssWLF5vMG8RUbJ3ecJiBAwearHv37ibr2bOnyYp1e+HChSZbsWKFyfr162eyHj16mGzXgS7/8NZbb5nMeyx4ns4/b/DHAQccYLIbbrjBvX7nzp1Ntnz5cpNdcsklJvOeOxEn72u593px5MiRJuvatat7m15PHn300UyXa9mypcm859/TTz/dPfaFF15oMu/rtvf15Pvf/77Jig0by8tzKO+oAQAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkGCZSYd6J6t6JwIceeqjJ9ttvP/c2vZNGzzzzTJMNGTLEZF/72tdMNnXqVPc43sAUNB5vAIY3OKTYoAxviMOll15qsloNDsnKOwG42H1s3bq1ybwhDmvXrs10nLwo53O5W7duJvM+/gcddJDJRowYYTLvY//++++7x+7SpYvJvGE3ffv2NVmrVq1M5j3GixYtco89bdo0k82bN89khx9+uMnatm1rsl0HuvzDsmXLTPbhhx+6l0V+eIND9tlnH5PdcsstJjviiCPc2/QGgtx4440mmzVrVpYlIlLe86/3PHT00UebrEWLFu5ten30nle9gTXekKZjjjnGZB/96EfdY3sDorzXIXfccYfJHnvsMZMVe67NC95RAwAAAIDIsFEDAAAAgMiwUQMAAACAyLBRAwAAAIDI1LtRU9UHVHWpqk7fKbteVReq6rTCr2HVXSaQHZ1FiugtUkNnkRo6i9Rkmfo4SkR+JiIP7ZL/OIRwW8VXlEOrVq0y2SWXXGKyrl27utf3JkR+/vOfN9mAAQNM9vjjj5vse9/7nnuc0aNHmyzRSZCjJAed9aYtHXfccSbbsmWLe/277rrLZHPnzi1/YVXmTaPq06ePe9n27dubzJsA9d5775W9rhoYJTXsbbFJmt27dzeZ1zFvuqY3uXbQoEEmK/a8csABB5jssMMOM5k3JfHtt9822VtvvWWyYp8D3tQxb5pj7969TeZ1dtu2be5xnn32WZNt3rzZvWwCRkkOnmsrzeuD93n1P//zPyY7+eSTTVasS88884zJ7r//fpMV+xrRRI2SiDurqibbYw/7Ur1Tp04m86YpFpuI6H3t9KaJt2zZ0mTFJknuynv+FPE/P959912TjRkzxmRNcUJuve+ohRAmiMjKGqwFqAg6ixTRW6SGziI1dBapKecctYtV9W+Ft5Ht1r5AVUeq6hRVnVLGsYBKoLNIUb29pbOIDJ1Fanh9gCiVulG7S0QOEJF+IlInIrcXu2AI4Z4QQv8QQv8SjwVUAp1FijL1ls4iInQWqeH1AaJV0kYthLAkhLAthLBdRO4VkYGVXRZQWXQWKaK3SA2dRWroLGKWZZiIoao9Qgh1hb+eISLTd3d5WO+//77J5s2b51522rRpJnv++edNNnLkSJNddNFFJvv617/uHufFF180WSJDGOoVe2e9k2sHDrRfK7wTiDdu3Oje5tSpU00W23AY7357Q3UuvfRS9/qtWrUymTcsYtKkSSaL7WPhqWZvvRPPRUT23ntvk3mDNlautKd5eMNE9t9/f5OtXr3aPXYIwWTe4+kNPnrttddMtmjRIpMVG8zgnbC/7777mmzEiBEm8wYAeB8zEZGFCxdmXlOKYn+urTRvqI43COr888832SmnnJLp9rznLxGRq666ymQbNmxwL4viYuqs9xzoDQSpq6sz2eTJk032sY99zD2O9/zfunVrk3nPTXPmzDHZ0qVLTea9hhER6dChg8m8599Nmza5129q6t2oqeojInKiiHRV1QUi8t8icqKq9hORICLviYjdDQCNhM4iRfQWqaGzSA2dRWrq3aiFEIY7sZ3/CkSCziJF9BapobNIDZ1FasqZ+ggAAAAAqAI2agAAAAAQmZKGiaB83smh3kmkIv7QgxUrVphsyhT7Yz0uvvhikxX7qfLeIID58+dnWg/K4w3VOP74403mnXA7e/Zs9za9k42LdawWvPvoDUe5/377XSj9+vVzb9Mb2HDnnXeabNmyZVmW2KQU+5j86le/Mpn33NC2bVuTbdmyxWQzZ8402Zo1a9xjeyfDe0OWFi9ebDLvpPeG9N1buzeMZPny5Zluz3uOFxFZt25d5jUhft5ghKOPPtpkZ599tsnatGljslWrVpnsRz/6kXtsb9gXX5/zx3tu+utf/2qyb33rWybznrtF/K+93nOW93XCew70XpvcfPPN7rE/+9nPmswbbsJgnB14Rw0AAAAAIsNGDQAAAAAiw0YNAAAAACLDRg0AAAAAIsMwkYioqps3b97cZAcccIDJRo4caTLvZOViJ7N7J+g35vCJpqRVq1Ym69+/v8m8gQnvv/++e5ve41zpx7NYZ1u2bGmy3r17m+zWW2812dChQ01WbDDDQw89ZLLHH3888/WbMq9LIiLPPfecybwBBV6XvMd48+bNJlu5cqV77CVLlpjMW2etBiZ4a586darJPvWpT5nMG/gk4g/A4Xk2fsWe67yhDL169cp0uY0bN5rslVdeMdkbb7zhHrvY5zDyxXt+8LrjDV7yMhF/uJeXZX1u8obUFRt05r2m9V4DeetpivgoAAAAAEBk2KgBAAAAQGTYqAEAAABAZNioAQAAAEBkaj5MZNeTCBtyUnieTrj2foq7dzKliMjHPvYxk1177bUmGzRokMm8j++bb77pHsc7OdU7gTpPj0MsunTpkinbtGmTyebPn+/epvfYeUM+tmzZYjLvMfZOFu7evbt77BEjRpjsggsuMFmPHj0yHfvpp592j/Od73zHZB988EGm24Rv2bJlJvM+fl7mDdDwelirYSDl8tbpneDuXW7t2rXubRYb5oS4FRsmctRRR5nsM5/5TKbre8/dzzzzjMlWr17tHpvnNeysIX3wBtGUM5zG6/ehhx7qXjbrcyj93oF31AAAAAAgMmzUAAAAACAybNQAAAAAIDJs1AAAAAAgMvVu1FS1p6q+qKozVPUtVb2skHdW1edUdXbh907VXy5QPzqL1NBZpIjeIjV0FqnJMvVxq4hcHkKYqqp7icgbqvqciJwvIs+HEG5W1atF5GoRuWp3N9SsWTMzdW7z5s3u5TzeBJisk2JqNT3GW/s+++xjsgEDBpjsyiuvdG/Tm/rYunVrk3kfiwkTJpjspptuco+zePHiTLeZgIp1thq86Uht27Y1mfcYe5MXvaljIiKf//znTTZx4kSTrVq1ymT77befyYYNG2ayoUOHusfu1q2bybz7vXLlSpPdeOONJrvvvvvc43z44YdunqBoOlvO53xjPvfWive50ZDpuHvuuWfF19SIoulttXXs2NHNv/zlL5vs6KOPNpk3sXfhwoUme/vtt022devWLEtENk2ms7XkPQfuvffe7mW958b169ebLGfPlSWr9x21EEJdCGFq4c9rRWSmiOwrIqeJyOjCxUaLyOnVWiTQEHQWqaGzSBG9RWroLFLToJ+jpqq9ReQoEZkkIt1DCHWFf1osIu4PVFLVkSIysvDnUtcJlKTczgK1RmeRoob2ls6isfFcixRkHiaiqu1EZJyIfD2EsGbnfws73sd0v88jhHBPCKF/CKE/GzXUUiU6W4NlAv8fnUWKSuktnUVj4rkWqci0UVPVPWVHoceGEJ4oxEtUtUfh33uIyNLqLBFoODqL1NBZpIjeIjV0Fimp91sfdcfbYPeLyMwQwh07/dN4ETlPRG4u/P5Ufbe1fft2c/K/d+J6sWEie+21l8l2HU4i4g9m2GMPe1c3btxoslatWrnH9m7z2GOPNdnHP/5xk3mDQ7wBI8VOVvYGrniDP2677TaTjR492mSrV692j5OXk/4r2dlaWb58ucnmzp1rsv33399kRx55pHubBx54oMkuuugik3mPu9f3Nm3amKzYu+Rex15++WWTXXfddSabM2eOybZt2+YeJy9S7GxT4H1ueM/HXj+bN2/u3maevrMkr731XoMUe5497LDDTOY99mvWrDHZW2+9ZbLZs2ebjGEilZPXzjY277X8unXr3Mt6z6t1dXUmS3SYXcVlOUftEyIyQkTeVNVphewa2VHmx1T1AhGZJyJnVWeJQIPRWaSGziJF9BapobNISr0btRDCqyJS7L8AT6rscoDy0Vmkhs4iRfQWqaGzSE3mYSIAAAAAgNpgowYAAAAAkWnQz1GrhF1PDvROKvQGhIj4gzqOP/54k3nDO3r16mUy7+TeYgM9OnfubDJvuIJ3ovmmTZtMtnbtWpNNnDjRPfaNN95osqlTp2Y6Tl4GhOSJ95isWrXKZA899JDJjj76aJN5gz9ERDp06GAyb5CBl3knr3snBr/yyivuse+9916TvfTSSybzBvoAMfOG/DRkKJbH+xzkubvxeMNAhg0b5l62U6dOJsv62E2YMMFkK1euNBlDFRA77/muffv27mW9Pu86aFDEHwLYFPGOGgAAAABEho0aAAAAAESGjRoAAAAARIaNGgAAAABEpuZn6mU5ybbYSdg9e/Y02amnnmqyvffe22Tt2rXLdLliJy96JxfPnz/fZC+88ILJfvOb35jMGxziDTcR4aTypsAbQvP73//eZN5Aj09/+tPubfbp08dk3uCRJUuWmOzXv/61yV599VWTLVu2zD32tm3bTEaPkRqvs2+//bbJvJPju3Xr5t5m9+7dTbZ48eISVodaWrhwoZt7X7e9wQgzZ87MlHlfC4DYea+dV69e7V52w4YNJvNe23iv273Ba3kftsM7agAAAAAQGTZqAAAAABAZNmoAAAAAEBk2agAAAAAQmZoPE1HVf/q7d7K2d1KhiMjTTz9tsjlz5pisTZs2JuvVq5fJ5s2bZ7KtW7e6x37zzTdNtnTpUpN5JxEzRAGlWLt2rcm8ASP/+7//615/1881Eb+LWTMAIosWLTLZihUrTFbs61iLFi1MlvVzFbXhDUMaM2aMe9kpU6aYzHsd8fe//91ky5cvN1neByMgn7xhIt7gJRGRQw45xGS9e/fOlHmDl/I+gId31AAAAAAgMmzUAAAAACAybNQAAAAAIDL1btRUtaeqvqiqM1T1LVW9rJBfr6oLVXVa4dew6i8XqB+dRWroLFJDZ5EieovUZBkmslVELg8hTFXVvUTkDVV9rvBvPw4h3NaQA2Y5Qdo7kVfEH/7hZWjyKtrZmDD4I7dy29m8effdd0127733mmzlypXu9d9//32TJfo5nNvOegM9vIExIiKvvvpqtZeDysptbxvT+vXrTfbLX/7SvWzLli1Ntueee5rM2wt4l9uyZYt7nESfV416N2ohhDoRqSv8ea2qzhSRfau9MKBUdBapobNIDZ1FiugtUtOgc9RUtbeIHCUikwrRxar6N1V9QFU7VXhtQNnoLFJDZ5EaOosU0VukIPNGTVXbicg4Efl6CGGNiNwlIgeISD/Z8b8Ttxe53khVnaKq9oeNAFVEZ5EaOovU0FmkiN4iFZk2aqq6p+wo9NgQwhMiIiGEJSGEbSGE7SJyr4gM9K4bQrgnhNA/hNC/UosG6kNnkRo6i9TQWaSI3iIlWaY+qojcLyIzQwh37JT32OliZ4jI9MovD2g4OovU0Fmkhs4iRfQWqdH6pqKo6iAReUVE3hSRf4xCukZEhsuOt4iDiLwnIhcVTtLc3W3lYwQLGk0IQeu7DJ1FTOhsvjRv3txkLVq0MNmO14PWxo0bTRbhdLI36nvHgM4iMvV2VoTe1pL3XCki0q1bN5O1a9fOZGvWrDGZN01369atJawuDpleH9TyCwSlRrmylLqS6CzKRWfzhY1a5dFZVEBNOytCb+vDRq1+WV4fNGjqIwAAAACg+tioAQAAAEBk2KgBAAAAQGT2aOwFAACQiu3bt5ts06ZNJit2jlps56M1a2b/v9a7jwDQEMWeR1asWGEy73y0rM+V3nPY7o6fGt5RAwAAAIDIsFEDAAAAgMiwUQMAAACAyLBRAwAAAIDI1HqYyHIRmVf4c9fC3/MgT/dFJN7706sRjkln0xDr/aGzlRPFfcl6gnuGy0Vxf4qccF/r3ua1syL5uj8x35fGfK6N+eNSiorcn2LPgVu2bMmUVUjMj02mzmpjTaBS1Sm1/iny1ZKn+yKSv/tTKXn6uOTpvojk7/5USp4+Lnm6LyL5uz+VkrePS57uT57uSyXl7eOSp/uTh/vCtz4CAAAAQGTYqAEAAABAZBpzo3ZPIx670vJ0X0Tyd38qJU8flzzdF5H83Z9KydPHJU/3RSR/96dS8vZxydP9ydN9qaS8fVzydH+Svy+Ndo4aAAAAAMDHtz4CAAAAQGRqvlFT1VNUdZaqvquqV9f6+OVS1d8J7k4AAALeSURBVAdUdamqTt8p66yqz6nq7MLvnRpzjVmpak9VfVFVZ6jqW6p6WSFP8v5UC52NB53Nhs7Gg85ml3Jv89RZEXqbVcqdFclXb/Pa2Zpu1FS1uYj8XEQ+JSKHichwVT2slmuogFEicsou2dUi8nwI4SAReb7w9xRsFZHLQwiHichxIvK1wuOR6v2pODobHTpbDzobHTqbQQ56O0ry01kReluvHHRWJF+9zWVna/2O2kAReTeEMDeEsFlEHhWR02q8hrKEECaIyMpd4tNEZHThz6NF5PSaLqpEIYS6EMLUwp/XishMEdlXEr0/VUJnI0JnM6GzEaGzmSXd2zx1VoTeZpR0Z0Xy1du8drbWG7V9ReT9nf6+oJClrnsIoa7w58Ui0r0xF1MKVe0tIkeJyCTJwf2pIDobKTpbFJ2NFJ3drTz2NhePMb0tKo+dFcnBY5ynzjJMpMLCjjGaSY3SVNV2IjJORL4eQliz87+leH/QMCk+xnS2aUvxMaazTVuqjzG9bdpSfIzz1tlab9QWikjPnf6+XyFL3RJV7SEiUvh9aSOvJzNV3VN2FHpsCOGJQpzs/akCOhsZOlsvOhsZOptJHnub9GNMb+uVx86KJPwY57Gztd6oTRaRg1S1j6q2EJGzRWR8jddQDeNF5LzCn88TkacacS2ZqaqKyP0iMjOEcMdO/5Tk/akSOhsROpsJnY0Inc0sj71N9jGmt5nksbMiiT7Gue1sCKGmv0RkmIi8IyJzROTaWh+/Aut/RETqRGSL7Ph+5AtEpIvsmCQzW0T+KCKdG3udGe/LINnxFvDfRGRa4dewVO9PFT9OdDaSX3Q288eJzkbyi8426GOVbG/z1NnC/aG32T5OyXa2sP7c9DavndXCnQMAAAAARIJhIgAAAAAQGTZqAAAAABAZNmoAAAAAEBk2agAAAAAQGTZqAAAAABAZNmoAAAAAEBk2agAAAAAQGTZqAAAAABCZ/wdbflFs14JW3AAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2cAAAGeCAYAAAAQSXmdAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3de7BddXn/8c9DrpAECKGEECKJNNhJERAjQgULRRxIwUCpIFYHb41TRWVqKxQ6hVEHaQtSK7RjMPHgDGhrAYHCABG56KCRBELMBUgICRByMQSSQO7h+/sjuz8P+T47Z5291977+137/Zo5k3Oe7H3WWvt8zjr7OWt/n2MhBAEAAAAAOmufTu8AAAAAAIDmDAAAAACSQHMGAAAAAAmgOQMAAACABNCcAQAAAEACaM4AAAAAIAFNNWdmdqaZPWtmS83s8rJ2CmgVMosckVvkhswiN2QWqbBG/86ZmQ2Q9JykMyS9LOkJSReFEBbt5T78UTU0JYRgjd6XzKITmsms1P/cklmUYF0I4Q8avTOZRQe0NbO1+5BbNKXe84NmrpydIGlpCGFZCGG7pB9LmtrE5wNajcwiR+QW7baiyfuTWbQbmUVlNNOcjZX0Uq+PX67VgFSRWeSI3CI3ZBa5IbNIxsBWb8DMpkma1urtAGUhs8gNmUVuyCxyRG7RDs00Zysljev18eG12tuEEKZLmi7x+lx0HJlFjvrMLZlFYsgscsPzAySjmZc1PiFpoplNMLPBkj4m6e5ydgtoCTKLHJFb5IbMIjdkFslo+MpZCGGnmV0i6QFJAyTNDCEsLG3PgJKRWeSI3CI3ZBa5IbNIScOj9BvaGJeA0aRmx5L3F5lFs8gsMjQ3hDC5XRsjsyhBWzMrkVs0rxWj9AEAAAAAJaE5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJCAgZ3eAQDdIYRQ6HZm1uI9AQAASBNXzgAAAAAgATRnAAAAAJAAmjMAAAAASEBTa87MbLmkTZJ2SdoZQphcxk4BrURukRsyi9yQWeSGzCIVZQwEOS2EsK6Ez4N+8IYrNDtIoejAhqISH+xAbktQdmb6+zkTz1jZyGxFteJ8nggym6hmz90VyaeHzKLjeFkjAAAAACSg2eYsSHrQzOaa2bQydghoA3KL3JBZ5IbMIjdkFklo9mWNJ4cQVprZIZJmmdkzIYTHet+gFnBCjpTsNbdkFgkis8gNmUVueE6LJFhZa0bM7GpJb4QQrtvLbcpfoNKlunXNWQih1E/aV27J7N61Ys1Zf+Sw7oHMoi8JrjmbW+YwBDKbngquOWtrZmu3IbdoSr3nBw2/rNHMhpnZiP97X9KHJS1o9PNVWQih9LdmttMKZha9pYjcIjdktlq8c3Iu58+iyCxyQ2aRkmZe1jha0p21HyIDJd0WQri/lL0CWofcIjdkFrkhs8gNmUUySntZY6GNdekl4E6/9GtP9X4r28x+tus3vWW/RKwv3ZrZojqd7RyuMJBZ9JbgSxg9pb5ErC9ktv14WWPzyC2aVfrLGgEAAAAA5aE5AwAAAIAENDtKP2udfklWO/TnpQfebTN5CQ7aoBXfL0UzVw/57Bwe+/LwWKKVKvgSRnRQs+erHCaDdxpXzgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkoKsHgjQ7jKBdOrmfVVxoibfrZJaKLixmSEhnpXherBLyibLwvYpO6E/uip7vin7OVmy707hyBgAAAAAJoDkDAAAAgATQnAEAAABAAmjOAAAAACABNGcAAAAAkICuntbo6c8kl7KnIvVn27lMmkS1tev7pd52yDxSRTbRCc08N8hlkh1ao5PPaYvev4qTGT1cOQMAAACABNCcAQAAAEACaM4AAAAAIAF9NmdmNtPM1prZgl61g8xslpktqf07srW7CfQPuUVuyCxyQ2aRGzKLHBS5ctYj6cw9apdLeiiEMFHSQ7WPu46ZRW/tuG87P2fGekRuIyGE6K0oMttyPSKzyEuPyGxSmjnHd4kekdnSc9Lsz3Jvf4ruo7ft3J9L9NmchRAek7R+j/JUSbfU3r9F0rkl7xfQFHKL3JBZ5IbMIjdkFjlodJT+6BDCqtr7qyWNrndDM5smaVqD2wHKVCi3ZBYJIbPIDZlFbnhOi6Q0/XfOQgjBzOpeDw0hTJc0XZL2djugnfaWWzKLFJFZ5IbMIjc8p0UKGp3WuMbMxkhS7d+15e0S0DLkFrkhs8gNmUVuyCyS0mhzdreki2vvXyzprnJ2J39FFyWyaLcjKpnbZhfONrOd/qjagt02aXtm25ElVFolz7MpSm3AU8bIbB31hm2klJ3+PAfKhfV1AGb2I0mnSjpY0hpJV0n6qaT/lvQOSSskXRBC2HOBpfe58n60GlQ0JJ0OeA5CCIUepLJym0Nm9/bkuZn7F9HpzObwvZVbZnN4THPQ3yfKiZkbQpjc141SyWy3yvnc3QJtzWztc2Wb29TO861opnLIeL3nB302Z2XKOcjNSO2bIGdFn+iWJYfM0pz1LYfmrCw0Z2nohuasLDmcZ1OU87m7BdqaWSnv3KZ2nqc5e7tGX9YIAAAAACgRzRkAAAAAJKDpUfooT7MvTwN6K/tlC7kvsAVahe8NAJ3WreehKj5H5soZAAAAACSA5gwAAAAAEkBzBgAAAAAJoDkDAAAAgAQwEKQNvMWK/Vm46d22igsgkbacM8f3EMrS7KJ7cof+InPYUw6Z6Na/XVYGrpwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASAADQTqEISFoRA6LgNEdOnkOasVCcyAFnONRRLPPIcvG8I9yceUMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAL6bM7MbKaZrTWzBb1qV5vZSjObV3ub0trdBIojs8gRuUVuyCxyQ2aRgyJXznoknenUbwghHFd7u6/c3epOZua+FRVCiN66VI/IbFNZQkf0KPPceuegVry1Qr3zb5G3LtajzDPbSc1mmyw2pEdkNotzcjfrszkLITwmaX0b9gUoBZlFjsgtckNmkRsyixw0s+bsEjObX7tEPLLejcxsmpnNMbM5TWwLKAOZRY76zC2ZRWLILHLD8wMkw4pcjjSz8ZL+N4RwdO3j0ZLWSQqSviFpTAjhMwU+D9c+G9DMJeOqvcwhhFDogKqa2f5koWpfe08O3xtFMyuVk9tmM9utL1FpJg8V/L6cG0KYXOSGKWQ2V/zB6VK1NbO1+yWV26qdu7sh3/WeHzR05SyEsCaEsCuE8JakmyWd0MzOAa1GZpEjcovckFnkhswiNQMbuZOZjQkhrKp9eJ6kBXu7PZpT9LcH3m9NvFo3/DZiT2Q2b936G+ZO5NZ7rIqeR5q5XX908utZtd9Ol41zrY/cpKsqmW3FubbodjxkvnF9Nmdm9iNJp0o62MxelnSVpFPN7DjtvgS8XNLnW7iPQL+QWeSI3CI3ZBa5IbPIQaE1Z6VtLLHX51ZN0a9lrlcRpP6t3ylDapmt4NqWQjK/0pJ9Zrly9ns573s/FF6/U4bUzrOt0MkrGF2irZmV8shtzlfOuiHfpa45AwAAAACUi+YMAAAAABLQ0EAQ5K1bXxqXGxbTNobMlq/oY1r27YCqaOblvXy/oFFkJ09cOQMAAACABNCcAQAAAEACaM4AAAAAIAE0ZwAAAACQAAaCJKTTAyBYOJqWVvx9qBwU/VtZQCc08/fd0B264TwNlInz5dtx5QwAAAAAEkBzBgAAAAAJoDkDAAAAgATQnAEAAABAAhgI0oVYeIlWY0E8qopso5X4+QyAK2cAAAAAkACaMwAAAABIAM0ZAAAAACSgz+bMzMaZ2cNmtsjMFprZV2r1g8xslpktqf07svW7C/SNzCI3ZBY5IrfIDZlFDopcOdsp6ashhEmSTpT0RTObJOlySQ+FECZKeqj2MZACMovckFnkiNwiN2QWyeuzOQshrAohPFl7f5OkxZLGSpoq6ZbazW6RdG6rdjJ3IYRCb61gZtFb1ZHZ3crOWNEcM82u/8hsPrrxnFoPuS3/PIvWIrNp4vvo7fq15szMxkt6j6TZkkaHEFbV/mu1pNGl7hlQAjKL3JBZ5IjcIjdkFqkq/HfOzGy4pNslXRpC2Nj7t4UhhGBmbptrZtMkTWt2R4H+IrPIDZlFjhrJLZlFJ3GuRcoKXTkzs0HaHeJbQwh31MprzGxM7f/HSFrr3TeEMD2EMDmEMLmMHQaKILPIDZlFjhrNLZlFp3CuReqKTGs0STMkLQ4hfLvXf90t6eLa+xdLuqv83QP6j8wiN2QWOSK3yA2ZRQ6sr0V3ZnaypF9I+q2kt2rlK7T7Nbr/LekdklZIuiCEsL6Pz1WpFX7tWrDYzQvO9xRC6PPBqHJmq7ZIthuy3e2Z7Qbe92Xm2Z5b5MpAWbnNObNln5Mzz00ntTWztc+VbW7L1uz3Qbfmvt7zgz6bszJVLcg0Z+1X5IlumVLLLM1Zfro9s92gW5uzsuScWZqzZLQ1s1LeuS0bzVlj6j0/6Ne0RgAAAABAa9CcAQAAAEACaM4AAAAAIAGF/85ZFaW2fqdbX3OL6iHLqKrUfm4gX5wnURVeljlXNo4rZwAAAACQAJozAAAAAEgAzRkAAAAAJIDmDAAAAAASUMmBIDksQmQhMBrRrkW35BPwsfAdANBKXDkDAAAAgATQnAEAAABAAmjOAAAAACABNGcAAAAAkIDsB4J0ciE2QxOQAnIIpKfezya+X6uFATFA87zvmW4+V3LlDAAAAAASQHMGAAAAAAmgOQMAAACABPTZnJnZODN72MwWmdlCM/tKrX61ma00s3m1tymt312gb2QWuSGzyA2ZRY7ILXJgfS1cNbMxksaEEJ40sxGS5ko6V9IFkt4IIVxXeGNmrJJFU0IIfa4QJbNICZlFhuaGECbv7QZkFonpM7MSuU1BMwNzqjYkpN7zgz6nNYYQVklaVXt/k5ktljS23N0DykNmkRsyi9yQWeSI3CIH/VpzZmbjJb1H0uxa6RIzm29mM81sZMn7BjSNzCI3ZBa5IbPIEblFqgo3Z2Y2XNLtki4NIWyU9J+SjpR0nHb/FuL6OvebZmZzzGxOCfsLFEZmkRsyi9yQWeSI3CJlfa45kyQzGyTpfyU9EEL4tvP/4yX9bwjh6D4+D6/PRVOKrN+RyCzSQWaRoaLrd8gsUlEosxK57TTWnP1evecHRaY1mqQZkhb3DnFtUeX/OU/SgmZ3EigDmUVuyCxyQ2aRI3KLHBSZ1niypF9I+q2kt2rlKyRdpN2Xf4Ok5ZI+X1toubfPxW8Z0JSCk+/ILJJBZpGhItMaySxSUvRqL7nNhNefdMuVs0IvaywLQUazir5ErCxkFs0is8hQ4ZeIlYHMogRtzaxEblutm5uzfk1rBAAAAAC0Bs0ZAAAAACSA5gwAAAAAEjCw0zsAAAAAAP+nauvL+oMrZwAAAACQAJozAAAAAEgAzRkAAAAAJIDmDAAAAAAS0O6BIOskrai9f3Dt4yqo0rFI6R7PER3YJpnNQ6rHQ2bLU6VjkdI+nnbntqqZlap1PCkfSyfPtSk/Lo2o0vGkfCx1M2veX+BuBzOb0+6/5t4qVToWqXrHU5YqPS5VOhapesdTlio9LlU6Fql6x1OWqj0uVTqeKh1Lmar2uFTpeHI9Fl7WCAAAAAAJoDkDAAAAgAR0sjmb3sFtl61KxyJV73jKUqXHpUrHIlXveMpSpcelSsciVe94ylK1x6VKx1OlYylT1R6XKh1PlsfSsTVnAAAAAIDf42WNAAAAAJCAtjdnZnammT1rZkvN7PJ2b79ZZjbTzNaa2YJetYPMbJaZLan9O7KT+1iUmY0zs4fNbJGZLTSzr9TqWR5Pq5DZdJDZYshsOshscTnntkqZlchtUTlnVqpWbquU2bY2Z2Y2QNJNks6SNEnSRWY2qZ37UIIeSWfuUbtc0kMhhImSHqp9nIOdkr4aQpgk6URJX6x9PXI9ntKR2eSQ2T6Q2eSQ2QIqkNseVSezErntUwUyK1Urt5XJbLuvnJ0gaWkIYVkIYbukH0ua2uZ9aEoI4TFJ6/coT5V0S+39WySd29adalAIYVUI4cna+5skLZY0VpkeT4uQ2YSQ2ULIbELIbGFZ57ZKmZXIbUFZZ1aqVm6rlNl2N2djJb3U6+OXa7XcjQ4hrKq9v1rS6E7uTCPMbLyk90iarQocT4nIbKLIbF1kNlFkdq+qmNtKfI3JbV1VzKxUga9x7pllIEjJwu7xl1mNwDSz4ZJul3RpCGFj7//L8XjQPzl+jclsd8vxa0xmu1uuX2Ny291y/BpXIbPtbs5WShrX6+PDa7XcrTGzMZJU+3dth/enMDMbpN0hvjWEcEetnO3xtACZTQyZ7ROZTQyZLaSKuc36a0xu+1TFzEoZf42rktl2N2dPSJpoZhPMbLCkj0m6u8370Ap3S7q49v7Fku7q4L4UZmYmaYakxSGEb/f6ryyPp0XIbELIbCFkNiFktrAq5jbbrzG5LaSKmZUy/RpXKrMhhLa+SZoi6TlJz0u6st3bL2H/fyRplaQd2v364s9KGqXdE2CWSPqZpIM6vZ8Fj+Vk7b68O1/SvNrblFyPp4WPE5lN5I3MFn6cyGwib2S2X49VtrmtUmZrx0Nuiz1O2Wa2tv+VyW2VMmu1AwIAAAAAdBADQQAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASEBTzZmZnWlmz5rZUjO7vKydAlqFzCJH5Ba5IbPIDZlFKiyE0NgdzQZIek7SGZJelvSEpItCCIv2cp/GNgbUhBCs0fuSWXRCM5mV+p9bMosSrAsh/EGjdyaz6IC2ZrZ2H3KLptR7ftDMlbMTJC0NISwLIWyX9GNJU5v4fECrkVnkiNyi3VY0eX8yi3Yjs6iMZpqzsZJe6vXxy7UakCoyixyRW+SGzCI3ZBbJGNjqDZjZNEnTWr0doCxkFrkhs8gNmUWOyC3aoZnmbKWkcb0+PrxWe5sQwnRJ0yVen4uOI7PIUZ+5JbNIDJlFbnh+gGQ087LGJyRNNLMJZjZY0sck3V3ObgEtQWaRI3KL3JBZ5IbMIhkNXzkLIew0s0skPSBpgKSZIYSFpe0ZUDIyixyRW+SGzCI3ZBYpaXiUfkMb4xIwmtTsWPL+IrNoFplFhuaGECa3a2NkFiVoa2YlcovmtWKUPgAAAACgJDRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAIGdnoHAHQHs/hvLXo1z1tvvVX27gAAACSHK2cAAAAAkACaMwAAAABIAM0ZAAAAACSA5gwAAAAAEsBAEPx/3nCGESNGRLVJkyZFNW9gw9NPPx3Vtm3b1uDeIUUDB/qnkHPOOSeq/cu//EtUGzVqVFRbu3ZtVLvyyivd7dx5551RjeEhALB3w4YNi2of+9jH3Ntu2LAhqt11111RbceOHc3vGACunAEAAABACmjOAAAAACABNGcAAAAAkICm1pyZ2XJJmyTtkrQzhDC5jJ0CWoncIjdkFrkhs8gNmUUqyhgIcloIYV0Jn6ft9tknvnBYb8CBNyzDu//OnTujWgihUM3bhiQNGDAgqg0ZMiSqHXjggVHt9NNPj2p//dd/7W5n/PjxUc0bCOJZvHhxVPv4xz8e1VasWOHevwMLibPNbad4eT/ttNPc237nO9+JamPGjCn0Offbb7+odtlll7nb+cUvfhHVvIEiFUFmM+KdjyXp4osvjmreEJvp06dHtQwHKpHZBBxwwAFR7b777otqxx9/vHv/3/3ud1Ftzpw5UW358uX937n0kFl0HC9rBAAAAIAENNucBUkPmtlcM5tWxg4BbUBukRsyi9yQWeSGzCIJzb6s8eQQwkozO0TSLDN7JoTwWO8b1AJOyJGSveaWzCJBZBa5IbPIDc9pkYSmrpyFEFbW/l0r6U5JJzi3mR5CmMzCSqSir9ySWaSGzCI3ZBa54TktUtHwlTMzGyZpnxDCptr7H5b09dL2rGTesI1hw4ZFtbPPPtu9/ymnnBLVvEEdr776alTbuHFjVDv00EOj2jHHHONue8KECVFt+PDhUc0bpOANXBg0aJC7HW9Ruve4ecM7Vq1aFdW8x6fe0JN2yS23KfmjP/qjqDZjxgz3tl6+Pd4AHY83TESSzj///Kh28803N7ydFKWQWW9QkjfUqOg5xLtdzoYOHRrVzjvvPPe2l1xySVTzBirNnDkzquUyECSFzHYr72f+mWeeGdW887l3X0latGhRVPN+5ueMzDbHO8/Xe67p5cz7meD9jKmXUY/3XDWXnz3NvKxxtKQ7a1+QgZJuCyHcX8peAa1DbpEbMovckFnkhswiGQ03ZyGEZZKOLXFfgJYjt8gNmUVuyCxyQ2aREkbpAwAAAEACaM4AAAAAIAHNjtLPhrdY8aijjopqn/3sZ937T54cD+bxFiZu3rw5qnmLEr1hJN5AD6n4EA1v6IG3aLfeQt5HH300qi1btiyqvfbaa4Xuu2HDhkL7iPQcfvjhUe3ee++Naocddph7f2/R7euvvx7VvCx5i4j33Xdfdzuf/vSno9qDDz4Y1Z5//nn3/oh5j/XIkSOj2h/+4R9GtRdeeCGqvfnmm1HtjTfecLftnR/atYC76HnWu90BBxwQ1eoNBDnooIOi2lNPPRXVtmzZUmh/0L2KZvFTn/pUVPOG2HjnaEn653/+56i2ffv2AnuI3HmD3bwBeZdeemlUO/DAA93PuW7duqi2ZMmSqOYNvjvuuOOi2uDBg93t/Pu//3tU+/GPfxzVdu3a5d6/k7hyBgAAAAAJoDkDAAAAgATQnAEAAABAAmjOAAAAACABXTMQxFtU/uqrrxaqSdK2bduimreI0Fvo6C3s9hYwegsvJWn58uVR7Yknnohqv/nNb6La4sWLC+2j5O+n9xfakS9vAfmECROi2qxZs6LauHHjCn0+yR8C4eVzzpw5Ue2II46Iaqeeeqq7HW/fvcXvV111VVRr16CJ3Oy///5RzRsI4A01evzxx6Pa/ffHf8d16dKl7rY7eb5pZtv9yZL3vTF37txS9wfdwTv/eudPb4iCl9nZs2e72/n1r38d1chn9XgDOL773e9GtQsuuCCqec9f165d627HGwTmPSd+97vfHdWOP/74QtuWpPPPPz+q/eQnP4lqDAQBAAAAALhozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkoGumNXpeeumlqPatb33Lva03Tevoo4+Oajt27Ihq69evj2pbt26NavPmzXO37U25W7NmTVTzJs54E5WYUtcd9tkn/t3L6aefHtVmzpwZ1Q499NCo5mXJm7okSY899lhUe+CBB6LaM888E9WKThuTpFGjRkW1j3zkI1Htuuuui2obNmxwP2e386a2enk46KCDotrChQujmneu2r59u7vtXCfAjRgxIqodcsgh7m29abnPPvtsVMv1sUD7eBPu/v7v/z6qHXzwwVHNmxo6ffp0dzve8xXkzTun33PPPVHtmGOOKfT5FixYENVuuOEG97aLFi2Kat6U4BNOOCGqDRwYty07d+50t3PvvfdGNe85eoq4cgYAAAAACaA5AwAAAIAE0JwBAAAAQAL6bM7MbKaZrTWzBb1qB5nZLDNbUvt3ZGt3E+gfcovckFnkhswiN2QWOSgyEKRH0o2Sftirdrmkh0II15rZ5bWPLyt/91rLG6Axf/5897beIu7zzz8/qk2ePDmqjR8/PqotXrw4qnmLwiVp9erVUc1bAMkC8rfpUUVzuydv8IcknXfeeVFtxowZUW348OFRzcvX888/H9Vuv/12d9u//e1vo5o3gOeVV16Jat5C9XqDR7xjHzt2bFQbPXp0VEtwIEiPEsisNzDIG24xZMiQqPbiiy9Gtddff73QNnJ27LHHRrV3vvOd7m03btwY1bZt21b6PrVJjxLIbNWZmVufNGlSVDv77LML3f+FF16IarNmzXK3U7HnFj3qosx6w2Ak6aGHHopqRx11VFTzfk7+27/9W1S78cYbo5r3s1zyh3pMmTIlqr3rXe+Kat7P/OXLl7vb8Qac5JLlPq+chRAek7TnuMGpkm6pvX+LpHNL3i+gKeQWuSGzyA2ZRW7ILHLQ6Jqz0SGEVbX3V0uKfy0NpIfcIjdkFrkhs8gNmUVSmv47ZyGEYGZ1rxOa2TRJ05rdDlCmveWWzCJFZBa5IbPIDc9pkYJGr5ytMbMxklT7d229G4YQpocQJocQ4sVYQHsVyi2ZRaUIjpQAABazSURBVELILHJDZpEbntMiKY1eObtb0sWSrq39e1dpe9Rh9Raqe4MLvL8+7g0jOOmkk6Kat5h+7ty57raXLl0a1er9RXTsVfa59RZ2/8mf/Il7256enqg2bNiwqLZ9+/ao9sQTT0S1a665Jqp5Q0LqfU5vcfCWLVuimpftxx9/3N3Ou9/97qg2YMCAqOYtin7uuefcz5mYtmf20EMPjWre18lbmD179uyoVrVz1aBBg6La1KlTo9qIESPc+7/66qtRLcHhNM3I/jybGu+8LUk/+clPopo34MkbOPPNb36z0O26RCUyO3To0KhWb2iXN/zD+xn9yU9+Mqp5g2O883y9QTYjR8bDMK+++uqoNmrUqKjmZfTmm292t7N+/Z5LC/NRZJT+jyT9StK7zOxlM/usdgf4DDNbIulDtY+BZJBb5IbMIjdkFrkhs8hBn1fOQggX1fmv00veF6A05Ba5IbPIDZlFbsgsctDomjMAAAAAQIlozgAAAAAgAU2P0u8W3qCQZcuWRbVrr41fqnzGGWdEtY9+9KNR7S//8i/dba9cuTKqLV68OKrV+2vsqI7DDjssqv3Xf/2Xe1tvEfmuXbui2s9//vOodvnll0e11atXR7XNmze72/YWB3vfQ97+ePd95JFH3O186lOfimqDBw+Oat5wBm+xcgh1Jyh3jXHjxkU1b8jKvvvuG9Wq9jh7xz1x4sSo9t73vjeqeQNTJP987g1cQXfyzl/f+9733NtOmDAhqnnfW9758+677+7/ziEZ3nn1ggsuiGrvf//73ftv3bo1qn31q1+Naj/72c+iWtEhT97gO0m66qqrotqkSZOimneM3oC8n/70p+52vO+FXH4eceUMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAIYCNIEb8CB9xfJ77jjjkKf7+/+7u/c+g033BDVbrvttqj2wx/+MKoxJCRfgwYNimrXXXddVBs9enThzzl//vyo9rWvfS2qvfTSS1Ft+/btUc0b6FGv7n2/eLztePsjFR+k4H3ObuctjJakY445JqodccQRUc37Gh955JFR7cknn4xq3mL0eryhHN7QhKIOPvhgt37UUUdFtZEjR0Y1b/jHIYccEtXqLTL3hjmluCAdred9D5511llR7cILLyx8/xdffDGqXXRR/Ke9duzYUWQXkSjvHHjppZdGNe/8KUmLFi2Kavfcc09U835ue8OOvAFk3mAxyc+j9zk3bdoU1bznK6+88oq7HW/fcznXcuUMAAAAABJAcwYAAAAACaA5AwAAAIAE0JwBAAAAQAIYCNIG3sLbu+66K6qdffbZ7v0/+MEPRrV//Md/jGof/ehHo9qXv/zlqLZw4UJ3O7kslOwW48aNi2p/9md/Vvj+v/vd76LaF77whai2dOnSqLZz586o1p/Ftc1kybuvt1hYkkaMGFHo/ps3by50u6ryBgd4j53kD//wzmH7779/VDv33HOj2ooVK6La6tWr3W0fe+yxUe1LX/pSVPOGlni2bdsW1YYPH+7e1jvGBQsWRLVly5ZFtSFDhhTaH0n69a9/HdW87zdUn5fF7373u1Gt3lAHL99/8Rd/EdU2bNjQwN4hZd6wIu/cXc/AgfHT//e///1R7bXXXotq73vf+6LaOeecE9WOPvpod9v77bdfVPOGdt1yyy1R7ZFHHil039xx5QwAAAAAEkBzBgAAAAAJoDkDAAAAgATQnAEAAABAAvpszsxsppmtNbMFvWpXm9lKM5tXe5vS2t0EiiOzyBG5RW7ILHJDZpGDItMaeyTdKOmHe9RvCCFcV/oedYmtW7dGtS9+8Yvubb/+9a9HtQsvvDCqeVMd77vvvqg2efJkdztr16516xnqUQUye9JJJ0U1byrcli1b3Ptfe+21UW3evHlRzZt0lNokw/e+971u3Zt2tm7duqj2yiuvlL5PLdCjFuXW+3p6E7Mkaf369VHNm/bmZdGbtvi3f/u3RXZRknTcccdFtcMPPzyqedM7vYmH3j56E+4k6dlnn41q11xzTVTzplSeccYZUa3ehLxHH300qu3atcu9bQZ6VIFzbTt4E1O/9rWvRbWxY8dGtXrn43/913+Nat45Hm/Towpk9o033ohq3jns+OOPd+8/YcKEqDZjxoyoNmjQoKhW9Pxbb4qiV/dq3/nOd6Ka99y5ivq8chZCeExS/NMaSBSZRY7ILXJDZpEbMoscNLPm7BIzm1+7RBz/wYUaM5tmZnPMbE4T2wLKQGaRoz5zS2aRGDKL3PD8AMlotDn7T0lHSjpO0ipJ19e7YQhheghhcgjBfy0d0B5kFjkqlFsyi4SQWeSG5wdISkPNWQhhTQhhVwjhLUk3Szqh3N0CykVmkSNyi9yQWeSGzCI1RQaCRMxsTAhhVe3D8yQt2NvtUczGjRvdurco/R3veEdU+8hHPhLVDjvssKh27rnnutv5/ve/H9Xeeust97a5ST2zAwYMiGreQl5vUfmSJUvcz/k///M/US2H4R9Dhw6NaldeeaV7W2+x8sqVK6OaNyQkB63M7Zo1a9z6nXfeGdW8Rdh//ud/HtW885I34KDethctWhTVvPOid7sf/OAHUc373nj11Vfdbb/55ptRzRvUccwxx0Q17/tyxYoV7nZee+01t14VqZ9rO+Wd73xnVPvyl78c1bxhC88995z7Ob/5zW9Gtar8zG6nHDPrna++8IUvRDVvaIwkTZw4Map5Qz287Tz11FNR7d57741qH/jAB9xtf+Yzn4lq3nn5pZdecu/fDfpszszsR5JOlXSwmb0s6SpJp5rZcZKCpOWSPt/CfQT6hcwiR+QWuSGzyA2ZRQ76bM5CCBc55XjeJpAIMosckVvkhswiN2QWOWhmWiMAAAAAoCQ0ZwAAAACQgIYGgqC9Nm3aFNW8oQfeonRvcfG4cePK2TGUxhtsMXlyPKl3x44dUa3eYnFvCEZqwz+84/7e974X1SZMmODe33s8vvWtb0U1b6BFt6uXhWXLlkW1W2+9Nardd999UW306NFRbfPmzVHt+eefd7ftneu8RereoI52Ofjgg6PaiBEjotq2bdvc+3uZRbV4P3evvz6ezj5s2LCo5n2/TJ061d1OvYyh+rzz97x586LamWee6d5/4MBiT/+985U3dMbLvDeQTpIGDx4c1VavXh3VvAFm3YIrZwAAAACQAJozAAAAAEgAzRkAAAAAJIDmDAAAAAASwECQhHjDESTpQx/6UFSbMmVKVPMGgniL6X/zm9+420ltWEQ38RbnHnjggVHN+xq//vrr7uf0Fu22i7ef3vH84Ac/iGpetuv56U9/GtXuv//+qEa2i9uyZUuhmreAe/HixVGtao/9kCFDopo3oKTegvv99tsvqnXzwvcqOuGEE6La6aefHtW8742f//znUW3p0qXl7Bi6Tr3hSWUPVfKeb3zwgx90bztgwICo9vDDD0e1qv3s6A+unAEAAABAAmjOAAAAACABNGcAAAAAkACaMwAAAABIAANB2sD7y+mjRo2KaldeeaV7/0984hNRbf/9949q3oLMhQsXRjVvwbHU3YsvO80bMuANDhg8eHCh+0r+QAJv8EDRr7s35MPbH0n68Ic/HNWmT58e1Q455JCo5g2xuf32293tfPrTn45qO3bscG+LcnXr+eLNN9+Malu3bo1qw4YNc+8/dOjQ0vcJneOdp2+77bao5n3dvSzdeOONUc07JwIp8Z4fTJo0yb2tN4zk0UcfLX2fcsaVMwAAAABIAM0ZAAAAACSA5gwAAAAAEtBnc2Zm48zsYTNbZGYLzewrtfpBZjbLzJbU/h3Z+t0F+kZmkRsyixyRW+SGzCIHRa6c7ZT01RDCJEknSvqimU2SdLmkh0IIEyU9VPsYSAGZRW7ILHJEbpEbMovk9TmtMYSwStKq2vubzGyxpLGSpko6tXazWyQ9IumyluxlG3kTZ/pz27Fjx0a166+/PqqddtppUW3kSP8XNd60R2960+OPPx7V/uqv/iqqeROiqiTHzBadouhNRzzllFPcz3nWWWdFtdmzZ0c1L1/jx4+Pauecc05Umzp1qrvtI444Iqp50yO3bdsW1f7jP/4jqv3DP/yDux3vcctRjpntVl7mvEm5AwYMcO/fn58xqeum3Nb7ul12WXxYhx12WFTzfmY//fTTUW3+/PkN7B2K6qbMtlN/pjl7E5U3b95c+j7lrF9rzsxsvKT3SJotaXQt5JK0WtLoUvcMKAGZRW7ILHJEbpEbMotUFf47Z2Y2XNLtki4NIWzs3SWHEIKZuX/0xsymSZrW7I4C/UVmkRsyixw1klsyi07iXIuUFbpyZmaDtDvEt4YQ7qiV15jZmNr/j5G01rtvCGF6CGFyCGFyGTsMFEFmkRsyixw1mlsyi07hXIvUFZnWaJJmSFocQvh2r/+6W9LFtfcvlnRX+bsH9B+ZRW7ILHJEbpEbMoscFHlZ4wckfVLSb81sXq12haRrJf23mX1W0gpJF7RmF/vPW5i47777Fqp5Qxgkf5iBNyDhG9/4RlQ79NBDC+1jvW2vXr06qt10001R7cYbb4xqGzZscD9nxWWXWW9Iy1NPPRXVxo0bF9W84RuSNGPGjKi2a9euqDZo0KCoNnTo0KjmDQ6px1v8/uyzz0a1z33uc1HtV7/6VVTzBi5UTHaZ7Qbeedr7PvDO3d73Vb16f34eJKZrclvv/HfyyScXuq038GDWrFlRbePGjQ3sHfqhazLbTt4ApNdee8297QEHHFColvF5sWlFpjX+UlK98VKnl7s7QPPILHJDZpEjcovckFnkoF/TGgEAAAAArUFzBgAAAAAJoDkDAAAAgAQU/jtnORk2bFhUu+KKK6LalClTopo3MEHy/9K5N4jB27Y3zGD9+vVR7Z/+6Z/cbd96661R7Y033ohq3bJQsoq8jFxzzTVR7X3ve19UqzcQZP/99294f7ws7dixI6o9//zz7v2977cHHnggqm3ZsqWBvQM6Z9SoUVFtyJAhUe2QQw5x7z9+/Pio9vLLL0c1zudp8QYeSP5zA+95hDcQZN68eVFt27ZtDewd0Fne+areQJAjjzwyqp1yyilR7bHHHotqW7dubWDv8sOVMwAAAABIAM0ZAAAAACSA5gwAAAAAEkBzBgAAAAAJyH4gyD77xP2lt+D6xBNPjGpHHXVUVPMWdkv+wAZv0e8LL7wQ1W666aao9v3vfz+qbdq0yd02utP8+fOj2oc+9KGodu2117r3P+mkk6LaoEGDotorr7wS1R588MGo1tPTE9WWLVvmbnv79u1uHcjdfvvtF9W8YTne0CfJ/7nh/XxBWgYO9J8urVu3Lqp5ediwYUNUW758eVQjC8iRl9sXX3zRva33fPwTn/hEVHv88cejmvfcpN4gv5xx5QwAAAAAEkBzBgAAAAAJoDkDAAAAgATQnAEAAABAArIaCDJ48OCo5i3S9RbeeoML/viP/ziqbdmyxd32c889F9VuvPHGqHbPPfdENW/Qh/fX1IHevIx4Ob7gggvasTsAJL355ptRbc2aNVFt7dq17v1ff/31qGZmUY2fEWnZvHmzW/+bv/mbqPanf/qnUe3pp5+OakuXLo1qDARBjrzczps3z73txz/+8ag2evToqHbhhRdGtV/+8pdRrd73Zs6DQrhyBgAAAAAJoDkDAAAAgATQnAEAAABAAvpszsxsnJk9bGaLzGyhmX2lVr/azFaa2bza25TW7y7QNzKL3JBZ5IbMIkfkFjmwvhYdm9kYSWNCCE+a2QhJcyWdK+kCSW+EEK4rvDEzVjijKSGEeOX8HsgsUkJmq2XYsGFRbcKECVFt+fLl7v29gSIJDv+YG0KYvLcbkFkkps/MSuS2nU488US3fv/990e1fffdN6o9+eSTUe1zn/tcVHvmmWfc7eQwEKTe84M+pzWGEFZJWlV7f5OZLZY0ttzdA8pDZpEbMovckFnkiNwiB/1ac2Zm4yW9R9LsWukSM5tvZjPNbGTJ+wY0jcwiN2QWuSGzyBG5RaoKN2dmNlzS7ZIuDSFslPSfko6UdJx2/xbi+jr3m2Zmc8xsTgn7CxRGZpEbMovckFnkiNwiZYWaMzMbpN0hvjWEcIckhRDWhBB2hRDeknSzpBO8+4YQpocQJhd5LTBQFjKL3JBZ5IbMIkfkFqkrMq3RJM2QtDiE8O1e9TG9bnaepAXl7x7Qf2QWuSGzyA2ZRY7ILXJQZFrjyZJ+Iem3kt6qla+QdJF2X/4NkpZL+nxtoeXePheTbdCUgpPvyCySQWaRoSLTGsksUlJ0WiO5bZMBAwa49S996UtR7eSTT45qPT09UW3WrFlRbdu2bf3fuUQ0M63xl5K8O9/X7E4BrUBmkRsyi9yQWeSI3CIH/ZrWCAAAAABoDZozAAAAAEgAzRkAAAAAJKDPgSClbozFk2hSkeEKZSKzaBaZRYYKDVcoC5lFCdqaWYnconn1nh9w5QwAAAAAEkBzBgAAAAAJoDkDAAAAgATQnAEAAABAAvr8I9QlWydpRe39g2sfV0GVjkVK93iO6MA2yWweUj0eMlueKh2LlPbxtDu3Vc2sVK3jSflYOnmuTflxaUSVjiflY6mb2bZOa3zbhs3mtHuyTqtU6Vik6h1PWar0uFTpWKTqHU9ZqvS4VOlYpOodT1mq9rhU6XiqdCxlqtrjUqXjyfVYeFkjAAAAACSA5gwAAAAAEtDJ5mx6B7ddtiodi1S94ylLlR6XKh2LVL3jKUuVHpcqHYtUveMpS9UelyodT5WOpUxVe1yqdDxZHkvH1pwBAAAAAH6PlzUCAAAAQALa3pyZ2Zlm9qyZLTWzy9u9/WaZ2UwzW2tmC3rVDjKzWWa2pPbvyE7uY1FmNs7MHjazRWa20My+UqtneTytQmbTQWaLIbPpILPF5ZzbKmVWIrdF5ZxZqVq5rVJm29qcmdkASTdJOkvSJEkXmdmkdu5DCXoknblH7XJJD4UQJkp6qPZxDnZK+moIYZKkEyV9sfb1yPV4Skdmk0Nm+0Bmk0NmC6hAbntUncxK5LZPFcisVK3cViaz7b5ydoKkpSGEZSGE7ZJ+LGlqm/ehKSGExySt36M8VdIttfdvkXRuW3eqQSGEVSGEJ2vvb5K0WNJYZXo8LUJmE0JmCyGzCSGzhWWd2yplViK3BWWdWalaua1SZtvdnI2V9FKvj1+u1XI3OoSwqvb+akmjO7kzjTCz8ZLeI2m2KnA8JSKziSKzdZHZRJHZvapibivxNSa3dVUxs1IFvsa5Z5aBICULu8dfZjUC08yGS7pd0qUhhI29/y/H40H/5Pg1JrPdLcevMZntbrl+jcltd8vxa1yFzLa7OVspaVyvjw+v1XK3xszGSFLt37Ud3p/CzGyQdof41hDCHbVytsfTAmQ2MWS2T2Q2MWS2kCrmNuuvMbntUxUzK2X8Na5KZtvdnD0haaKZTTCzwZI+JunuNu9DK9wt6eLa+xdLuquD+1KYmZmkGZIWhxC+3eu/sjyeFiGzCSGzhZDZhJDZwqqY22y/xuS2kCpmVsr0a1ypzIYQ2vomaYqk5yQ9L+nKdm+/hP3/kaRVknZo9+uLPytplHZPgFki6WeSDur0fhY8lpO1+/LufEnzam9Tcj2eFj5OZDaRNzJb+HEis4m8kdl+PVbZ5rZKma0dD7kt9jhlm9na/lcmt1XKrNUOCAAAAADQQQwEAQAAAIAE0JwBAAAAQAJozgAAAAAgATRnAAAAAJAAmjMAAAAASADNGQAAAAAkgOYMAAAAABJAcwYAAAAACfh/Vn6OxD8MUHAAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] @@ -193,9 +190,9 @@ "\n", "for index in range(5):\n", " sample = get_sample(index)\n", - " decoded = model(sample[None])['decoded'][0].detach()\n", - " show_image(axes[0, index], sample)\n", - " show_image(axes[1, index], decoded)" + " decoded = model(sample[None])[0].detach()\n", + " show_image(axes[0, index], sample.cpu())\n", + " show_image(axes[1, index], decoded.cpu())" ] }, { @@ -205,9 +202,9 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAACzCAYAAAD48u9xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3XuwlWXd//HPJSc5Cew4CshBxcQTFCokQyY9xg91MEuLsrHRkcfxybGmmXSsqWdspnHGMqesx8wUdUoztTTU0pCf+nMQObQFgQwUkJNyFjbHDVy/P1jNQ3y/y32z19p7X9fi/ZphgA9rrfu+1/qstdfN2td3hxijAAAAAADpOK6tdwAAAAAA8O84UQMAAACAxHCiBgAAAACJ4UQNAAAAABLDiRoAAAAAJIYTNQAAAABIDCdqAAAAAJAYTtQAAAAAIDEVnaiFECaFEN4OISwPIdxarZ0CWgqdRY7oLXJDZ5EbOosUhRhj864YQjtJ/5T0H5LWSJoraWqMcclHXKd5GwNKYoyhudels2gLlXRWOvre0llUwaYYY5/mXpnOog20amdL16G3VRKC/TJ53HH2s6SDBw+arLnnMSko8v6gfQW3f56k5THGdyUphPCYpCmSypYaaGN0Fjmit2htqyq8Pp09gvdG1OO9OS33RvRoLlvkcuWum8kbYTqbCe+50L69PR3p0qWLyfbu3Vsok7LpbZMq+dbHgZJWH/b3NaUMSBWdRY7oLXJDZ5EbOoskVfKJWiEhhGmSprX0doBqobPIDZ1FbugsckRv0doqOVFbK2nwYX8fVMr+TYzxPkn3SXw/L9ocnUWOmuwtnUVi6Cxyw/sDJKmSE7W5kk4NIQzToTJ/WdJXqrJXQMugs8gRvUVu6OwRiq6X8YYleGvRJKlz584m89b67Nmzx2Teup5aWdPTTHS2lXg989at7du3z2SNjY2Fbq+WNPtELca4P4TwDUl/ldRO0gMxxsVV2zOgyugsckRvkRs6i9zQWaSq2eP5m7UxPiZGhSoddX606CwqRWeRofkxxjGttTE6+7+KjimX/Kl4lXyi5n2al5FW7axEb6upY8eOJmvXrp3JvE/ZDhw40CL71BqKvD+o6AdeAwAAAACqjxM1AAAAAEhMi4/nR9vq2rWrybyPmCVp69atLb07AABkp+gPrC6nkmUmvXr1cvPJkyebzPs2sFmzZplsw4YNJsv8Wx+RCe+55HXvaH4oey3jEzUAAAAASAwnagAAAACQGE7UAAAAACAxnKgBAAAAQGI4UQMAAACAxDD1sYbU1dWZ7Hvf+57JVq5c6V7/F7/4hcly/kGCAAB8lEqnOXoqmUznXbdbt27uZa+55hqTrVu3zmSLFi0ymTf1EWgN3g9wL/o89KaWNzY2upetlfevfKIGAAAAAInhRA0AAAAAEsOJGgAAAAAkhhM1AAAAAEgMw0Qy1b69fej+9Kc/meyTn/ykyZYsWeLe5v3332+yXbt2NWPvkDtvsa+XdenSxb3+/v37TbZ3716T1cpiX+TPW8xeyVAI5KstH3evh0OGDHEvO3To0ELXL7odoDUcPHjQZN77iz59+phs0KBBJnv77bfd7Xz44YfN2Lv08IkaAAAAACSGEzUAAAAASAwnagAAAACQmIrWqIUQVkraIemApP0xxjHV2CmgJdFb5IbOIjd0Frmhs0hRNYaJfCbGuKkKt4OjMGLECJONGWNfU9q1a2eyZ555xr3Nffv2Vb5j+Ui+t95ib2+IjPcYS1LXrl1Nduqpp5rsqquuMtnYsWNN5i1c79mzp7vtPXv2mOydd94x2e23326yZ5991mTe4uNjUPKdLcpbOC5JHTt2NFmHDh1M5vXOG7hw5plnmuy0005zt93Q0GCy9957z2Rz5swxmbdofceOHYW34w3V8QZaFM0SknxnU7v/vL5fcskl7mVPOOEEkzU2NprMe/1M7bgTknxnc+d1z3tvc/XVV5ts+PDhJrvjjjvc7TBMBAAAAADQIio9UYuSXgghzA8hTKvGDgGtgN4iN3QWuaGzyA2dRXIq/dbH8THGtSGEvpJeDCH8I8b4yuEXKJWdwiMlH9lbOosE0Vnkhs4iN7ynRXIq+kQtxri29PsGSX+UdJ5zmftijGNYlIlUNNVbOovU0Fnkhs4iN7ynRYqa/YlaCKGrpONijDtKf75Ykp0MgBZxzz33mKxz584m27Jli8kef/xx9zb3799f+Y4lLtXeeoNDjj/+eJMNHDjQZOeff757m5MmTTLZxIkTTdajRw+TeQt7vUXu3n5LfhdHjx5tsh/96Ecme/nll022fft2dzvHglQ76/H64A0IOeOMM9zre0MTvN506tTJZH369DFZr169TOY9ryS/896AJe95sHPnzkKZJNXX15ts165dJps5c6bJZs2aZTJvwbw3nKQ15dTZtuQNgvIGPk2dOtW9vtflDz74wGRbt241GQOa/l1unS33tfdIuQyN8QZEecNEvNfpcsNEakUl3/rYT9IfS2VpL+l3Mca/VGWvgJZDb5EbOovc0Fnkhs4iSc0+UYsxvivpnCruC9Di6C1yQ2eRGzqL3NBZpIrx/AAAAACQGE7UAAAAACAxlY7nRyvwFgxfcMEFJvMWjf7tb38z2cqVK6uyX6geb1H5gAEDTPaDH/zAZN6AEEnq3r27ybwhDHv37jWZN4TG61fXrl3dbXvb8bKhQ4eabPDgwSZbvHixux20HW8xuzdE5uKLLzbZd77zHfc2+/XrZ7INGzaYbN68eSabPXu2yd5//32TeYvWJX+ozoknnmgyr/N9+/Y12SmnnOJu59JLLy10m1dccYXJHnjgAZPdddddJtu8ebO77VwGC9QibwiN91o3bZqd/F6us97jOXfuXJN5fWCYSD6819rjjiv2OUtbDxYqatiwYSYbMmSIyf7xj3+YzBvGVEv4RA0AAAAAEsOJGgAAAAAkhhM1AAAAAEgMJ2oAAAAAkBiGiSSk3E+af/PNN03WsWNHk+3YscNkt912m8m84RFoW95C80984hMmmzBhgsnq6urc2/QWi3vDFZ5//nmTvf766yZbv369yXr16uVu++tf/7rJLrroIpN5PT799NNNxjCR9HhDjsaNG2eyW265xWTeoBzJH37029/+1mReH/bs2WMyb5F5uUX4Xu710xuY0q1bN5ONHz/e3Y43LOK8884zWZ8+fUx24YUXmuzuu+92t4PW4X3dbt/evrXyhiVMmjTJZGPHjjWZ9/VB8oc+vfDCCybznhvImzeAzOvd/v373esXHTLivY+odCiR97XjS1/6ksm8IUu7d+82WWNjY0X7kzo+UQMAAACAxHCiBgAAAACJ4UQNAAAAABLDiRoAAAAAJIYTNQAAAABIDFMf24g3KepnP/uZe9lTTz3VZN7UHW862qpVq5qxd2ht3mQlbyrT0Uw88qY0/vKXvzTZk08+abLt27ebzJsc5k1lkqTRo0ebbOLEiSbzplR50+7KTUStdPoUivEmjPXr189kl19+ucn69u1rsnnz5rnbufPOO022YsUKk3mdb4kueBNyd+7caTJv+t78+fPd2/TuS6/f3mvC7NmzTbZt2zaT8bxoPd5rWP/+/U125ZVXmuzLX/6yyU466SSTlZtUunr1apN5zxfkzXvN8CbNeq+1I0aMcG9z3bp1JmtoaCh0OW+abtHpp5I/7fSrX/2qybzj9jpfbrJlreATNQAAAABIDCdqAAAAAJAYTtQAAAAAIDFNnqiFEB4IIWwIIbx1WFYXQngxhLCs9Huvlt1N4OjQW+SGziI3dBa5obPITZFhItMl3SPp4cOyWyXNjDHeEUK4tfT3W6q/e7Xr9NNPN9kNN9zgXtZbpLljxw6T/eQnPzFZrS+y/AjTlVFvvcEB3qLw+vp6k5VbaD537lyTvfHGGybzhhF4QxS84SblhnwMGDCg0H56t7l582aTHSPDEaYr0c56j503TKRnz54mW7JkickeeeQRdztr1641mfca1lp98LbjPVe9RfOTJ092b/OMM84wmbdo3lvE/+CDD5qs3DChVjJdiXa2tXjPjSFDhpjsC1/4gsm8QWEdOnQwmTfARpLuvfdek7VxH3IwXQl31uuTN7Rr3LhxJpsyZYrJzjzzTHc73rAx77X6j3/8o8lWrlxpsu7du5tswoQJ7ra//e1vm6yurs697JEGDRpUaNuSPxwlx/cSTX6iFmN8RdKRI62mSHqo9OeHJNlRX0AborfIDZ1FbugsckNnkZvmrlHrF2P81+n4+5Lsf60C6aG3yA2dRW7oLHJDZ5Gsin+OWowxhhDKfpYYQpgmaVql2wGq6aN6S2eRIjqL3NBZ5Ib3tEhNcz9R+yCEMECSSr9vKHfBGON9McYxMcYxzdwWUC2FektnkRA6i9zQWeSG97RIVnM/UXtG0jWS7ij9/nTV9qgGeQMXbr/9dpOV+ynu3uLHv/zlLybzFnji3yTbW29gwrJly0z26KOPmqzc4nFvMMPgwYNN9sEHH5hsy5Yjv4Xf7+HFF1/sbvvTn/60ybznwe7du022fPly9zaPUUl01nvsvQEHr7/+eqHbe/PNN9183759R7djbcDr8dChQ0120003udfv1q2byfbs2WOyH//4xybznhsJLo5PorPVVm5oU8eOHU3mDYzxBod06tTJZN7rebnhO96ghwT7UHXec7DC4066s96Amf79+5vsM5/5jMn69u3r3qb3muW9B503b57JvNfpgQMHmuzCCy90t+0NR/EG5HnPrZNOOslk3nFL0rPPPmuyDz/80GSpP2eKjOd/VNJsSaeFENaEEK7ToTL/RwhhmaTPlv4OJIPeIjd0Frmhs8gNnUVumvxELcY4tcw/TazyvgBVQ2+RGzqL3NBZ5IbOIjfNXaMGAAAAAGghnKgBAAAAQGIqHs+Ppnk/cX3UqFEmK7eg0Vtoftddd5nMG0iBPBQd1jB79myTlVvk7i2wnTjRfneHt6jYGzDiLWi+/vrr3W2XW8B8pG3btpnMG2SCtuX101uU7Q3A8YYjlHuMDxw4UGjbbalHjx4m++lPf2qyQYMGudf3jufll1822WOPPWayvXv3FtlFtIByPWzXrp3JTj/9dJN5g0O8oQzeAIQ777zT3fb27dvdPEfegBDJv99bYJhI0ryhW977A++9gDeQQ/JfS7w+NTQ0mMzrrfe1fMaMGe62va8J559/vsl69+5tshNOOMFkY8eOdbfz9ttvm2zhwoUmS32IFZ+oAQAAAEBiOFEDAAAAgMRwogYAAAAAieFEDQAAAAASwzCRKvMWbk6ePNlk3qJPb2iI5P9k+AULFjRj75C7jRs3mmzmzJnuZdu3t09vb5jIhAkTTOb101vEe8opp7jb9hZ2e4MivAXI3sJppMdb0O8NCTl48KDJyg0+8i7blrwBEDfccIPJLr744sK36Q1cueWWW0y2YcMGk9XywITUlRt24Q1j+vznP28yr/Ovvvqqyb71rW+ZbM2aNe62U3u+FOW9T/K+Xknl3xfVKu857g3+WLRokcm8AU/lBntt3rzZZK+99prJ3nnnHZN5Q0c2bdpkstWrVxfetjf447LLLjOZN6hnwIAB7nbOOOMMk61YscJk3tetlF5r+UQNAAAAABLDiRoAAAAAJIYTNQAAAABIDCdqAAAAAJAYholUoF27dibr37+/ya677jqT9evXz2TlFi/edtttJkv9J6mjZXgdaWhocC87e/Zsk3kDQUaNGmWywYMHm8xb7O0NW5D8fnqL6b0BI507dzZZuYX8KS34rWXe/ew9Jt5Cb28QgvfaWe42i267qHLX7dKli8luvPFGk33/+983mXc85QZAXHnllSZbvHixybznBlqHN0xp5MiR7mV//vOfm8z7+r5y5UqT3X333SZ7//33TZbi0BDveeRl3tcI73XCG54jSevXr2/G3uWr6CAu7+v+qlWrTFZu0MZbb71lsiVLlpjMG1DiDfzyHvtyQ6O8wSPekCVvH7332CeeeKK7nY9//OMmmzNnjsm2bt1qspTeW/CJGgAAAAAkhhM1AAAAAEgMJ2oAAAAAkBhO1AAAAAAgMU2eqIUQHgghbAghvHVY9t8hhLUhhPrSr8ktu5tAcXQWOaK3yA2dRW7oLHJTZOrjdEn3SHr4iPynMcYfV32PEuVNtOnbt6/JbrjhBpOdf/75JvMmIf397393t+1N78NHmq5jqLPlJoJt3rzZZN50uS1btphs+PDhJuvQoUPhbe/atctkxx9/vMm6d+9usquvvtpkv/rVr9ztbNy40c0zNV2J9tabgOX1xptENnDgwELXlfxJiXv27Cm0P95rtDeNsWfPnu62r7jiCpN997vfNZn32u1N6psyZYq7nUWLFpkspQljR2m6Eu1sJbyOeNOXJemss84ymdfZp556ymQLFy40mfeaWm5Kqtcb7/qV9svbfteuXU3mTcv0vpaceeaZJps3b5677RaY+jhdmXXWe/y8yYkrVqwwmTdhU/LfC3iTNxsbGwvtj/f+oE+fPu62P/vZz5psyJAhJjvppJNM5r1n6NWrl7sd7z4q2tuUJq02+YlajPEVSf5XVSBBdBY5orfIDZ1FbugsclPJGrVvhBAWlj5G9k9nJYUQpoUQ5oUQ/P8uAVoPnUWOmuwtnUVi6Cxyw/sDJKm5J2r/I+lkSaMkrZf0k3IXjDHeF2McE2Mc08xtAdVAZ5GjQr2ls0gInUVueH+AZDXrRC3G+EGM8UCM8aCkX0s6r7q7BVQXnUWO6C1yQ2eRGzqLlBUZJmKEEAbEGP+1wvPzkt76qMvXgv79+5vMW1zsDULwhih4nnjiCTc/cOBAoeujvGOxs/v37zfZtm3bTLZ8+XKTnXLKKSbzFth7C40laceOHSbzFiD369fPZDfddJPJRowY4W7nlltuMZk32CHXYQ2p9Na7/7zXpY4dO5rs3HPPNdnZZ5/tbsdbDO8NjPG67XXO25/zzvPfg02dOtVknTt3Npk3CMXrbH19vbudXLtYVCqdLcobJOANuxg1apR7fW+4jNdF73LDhg0zmfc66w0nkfznwbp160zmPVe9HpYbWuLt02mnnWayoUOHmmzs2LEm845nyZIl7rY91X4O5dZZSdq7d6/Jli5dajJvmIvkD+XwXhu94Rvetk888USTXXbZZe62P/e5z5nM66g3oORo3od4Q82890ApDQ7xNHmiFkJ4VNKFknqHENZI+oGkC0MIoyRFSSsl/WcL7iNwVOgsckRvkRs6i9zQWeSmyRO1GKP9b0bpNy2wL0BV0FnkiN4iN3QWuaGzyE0lUx8BAAAAAC2AEzUAAAAASEyzhonUMm9hseQvvPQWyXqXK2rePH4sB6rHWyDb0NBgslWrVpnMG+rgDVHYsGGDu+333nvPZN5C8ylTppisR48eJps0aZK7nddee81kDz30kMnKLcZH83kLvT3eIJhu3bq5l/WG2HjDbrxhDatXrzbZyJEjTTZu3Dh3294idW/R/NNPP22yP//5zyZLfYE6DvG+5nuDQ7x+SP7j7PXbe6275JJLTOYN9Ni8ebO77VdeecVkzz//vMm81/O+ffuazBtuIvkDgbzrn3zyySZ79913Tfa73/3OZMuWLXO3zfPI590vL7zwgsnWr19vMsl/rLzHecKECSbzhix5Xw+8oTyS/1zyhomEEApd7q23/Nkvjz32mMk2bdrkXjZlfKIGAAAAAInhRA0AAAAAEsOJGgAAAAAkhhM1AAAAAEgMw0SOUG7hqrcA8fjjj2/2dvbt22eyjRs3Nvv2gCN5i9K9Re7eAJyFCxea7A9/+IPJvKEhktTY2GiyTp06mcxbaH7ttdeabMCAAe52Jk6caLKnnnrKZAwTqYy3qNvjLTIfPny4ybwuSP5wmq1bt5ps9+7dJvOG0Hj7XW4Iivfa7217xowZJvP6jjyUGyB2JK8LklRXV2cyr9/e8Aavc97+eM8hyR+Wc8UVV5gsxmiy3r17m8x7XyJJa9euNdmCBQtM9vDDD5tszpw5JvMGh+zatcvdtrfv8LvjfT1es2aNe33vtdob5jR1qv2xc96AqI4dO5rMew9STteuXU3mvX6vW7fOZPfcc497m6+++qrJvEFUqQ+s4RM1AAAAAEgMJ2oAAAAAkBhO1AAAAAAgMZyoAQAAAEBiGCZSkLfQddu2bSbzFpXv37/fZL///e9NtmLFimbuHWB5i3vHjRtnsnPOOcdkb7zxhsm8QQ/ec0DyO+8tLH7yySdNNnToUJN98YtfdLdz1llnmWz06NEme+mllwrtI4rzFvl7j7E3WKHccBLv9XP79u0m27lzp8m8gTFHs23veLzOe4MQkC/vdeC5554zWZ8+fdzrX3/99Sbr169foW17g0MOHDhgsr1797rX37Jli8m8QTve88UbjjJ//nx3O0888YTJ6uvrTeY9V73nlXeMDA2pXNH7WpIaGhpM5g0RW716tcm8oSPewJvLL7/c3fb48eNN5g3l8Xr717/+1WQvvviiux3v/UmOPeMTNQAAAABIDCdqAAAAAJAYTtQAAAAAIDFNnqiFEAaHEGaFEJaEEBaHEG4u5XUhhBdDCMtKv/dq+d0FmkZnkRs6ixzRW+SGziI3RT5R2y/p2zHGkZLGSvqvEMJISbdKmhljPFXSzNLfgRTQWeSGziJH9Ba5obPISpNTH2OM6yWtL/15RwhhqaSBkqZIurB0sYck/V9Jt7TIXiZg3759Jps1a5bJvIl1Hm+KkjfxDEfvWOtsuSl23bt3N9lFF11ksrPPPttk3gQ9b7KSNxVP8qeZebwJTOvWrTNZualnXbp0Mdm5555rstmzZ5tsx44dRXaxVeTYWW9a3tq1a022adMmk/Xo0cO9Ta+z3muvN4XXu81BgwaZ7GMf+5i7be955E08Y1ro/8qxt0c6ePCgyZYvX26yH/7wh+71vcm1N998s8nGjh1rMu81aMaMGSYr93q6YMECk3nPQW9Sqfd+w3uuSf59lOP0PKk2OttSvMd58+bNJvOmjXpTRL2pzJL0qU99qtC2vfcX3tRHb4KllG9Hj3RUa9RCCEMljZY0R1K/UuEl6X1JxebRAq2IziI3dBY5orfIDZ1FDgr/HLUQQjdJT0r6Zoxx++H/+xhjjCEE99Q1hDBN0rRKdxQ4WnQWuaGzyFFzektn0ZZ4rUUuCn2iFkLooEOF/m2M8alS/EEIYUDp3wdIcr8HKsZ4X4xxTIxxTDV2GCiCziI3dBY5am5v6SzaCq+1yEmRqY9B0m8kLY0x3nXYPz0j6ZrSn6+R9HT1dw84enQWuaGzyBG9RW7oLHJT5FsfL5D0NUmLQgj1pew2SXdIejyEcJ2kVZKuapldTIO30PHxxx832TnnnGOyrl27msxbpF5uKASO2jHV2XILZr3cG8JwwgknmGzChAkmW7p0qcm8oSPlcm9B/KhRo0x26aWXmsx7Dkn+4ndvwEiHDh1M5j3f2nDxcXad9R7j119/3WSPPPKIybzXSUmqq6szWd++fQtdbvjw4SYbOXKkybyBJZK0cuVKk3kL172F9Mfwa3d2vW2ucoM26uvrTXbttdearF27diZriSEdtTJAoQUdM51tKV7HvPe0b775pnv9K6+80mTe15OFCxeabPHixSar9QFPRaY+/j9J5b4KTazu7gCVo7PIDZ1FjugtckNnkZujmvoIAAAAAGh5nKgBAAAAQGI4UQMAAACAxBT+OWqw1q5da7L33nvPZN7CS29hsZcBzeUNDvE66/Wzd+/eJrvxxhtNdtFFF7nb9rrsLfg9+eSTTdanTx+TlRvWsHHjRpM1NDSYjAX2lfHuPy/btm2byX7961+b7Oyzz3a387Wvfc1k3mCbnj17mswbOtKpU6dC+yhJr732msmWLFlist27d7vXB/7Fe27U+sADHNu8YSAvvPCCe9lJkyaZbMSIESabO3euybzX31r/+s4nagAAAACQGE7UAAAAACAxnKgBAAAAQGI4UQMAAACAxDBMpAIffvihyR588EGT7du3z2T//Oc/TdbY2FidHQPkD9W4//77TTZ+/PhCWV1dncnGjRtXeH8OHjxosvbt7UuQt+jee65J/mLjBQsWmGzv3r1FdhEtwBtqM3/+fPey3kLxq666ymQXXHCBybwF5Zs2bTLZSy+95G77ueeeM9ny5ctNduDAAZMdd5z9P0+v7+X2EwByVnS4lCTde++9JvOGRtXX15vMe19T6/hEDQAAAAASw4kaAAAAACSGEzUAAAAASAwnagAAAACQmNCaC5tDCDW/ijqEYDIWj1dPjNHewS2o1jrr9bNPnz4m+8pXvmKySy65pNDtSVL37t1N5g0E2bp1q8kWL15ssqVLl7rb8fIVK1aYbM+ePSbzhkK0BDpbnDeUo3PnziYbNmyYyQYPHmyynTt3mswbECL5C9+9AU/e67k3OKTc634mXw/mxxjHtNbGcu4sktGqnZXobXN16NDBZN26dTOZN1zKGwyWyWuqq8j7Az5RAwAAAIDEcKIGAAAAAInhRA0AAAAAEtPkiVoIYXAIYVYIYUkIYXEI4eZS/t8hhLUhhPrSr8ktv7tA0+gsckNnkRs6ixzRW+SmyWEiIYQBkgbEGBeEELpLmi/pcklXSWqIMf648MZYeIkKFVl4SWdbRrt27UxW7vWjkqE63uVqfbEwnW0ZDHdqtiYHM9BZJKbQMBF6i5QUeX/QvsCNrJe0vvTnHSGEpZIGVr57QMugs8gNnUVu6CxyRG+Rm6NaoxZCGCpptKQ5pegbIYSFIYQHQgi9qrxvQMXoLHJDZ5EbOosc0VvkoPCJWgihm6QnJX0zxrhd0v9IOlnSKB3634mflLnetBDCvBDCvCrsL1AYnUVu6CxyQ2eRI3qLXBT6gdchhA6SZkj6a4zxLuffh0qaEWM8s4nb4ft5UZGiPzyYzlYfa9Sah862HdaoNVvR9T50Fqko/AOv6S1SUZUfeB0OfaX7jaSlhxe6tCDzXz4v6a3m7CRQbXQWuaGzyA2dRY7oLXJTZOrjeEmvSlok6WApvk3SVB36iDhKWinpP0uLND/qtvjfB1Sk4AQ9Ootk0FlkqMjURzqLlBT9FJjeIhmF3h+05reBUGpUqui3kVULnUWl6CwyVPiakt3UAAAEJ0lEQVTbyKqBzqIKWrWzEr1F5aryrY8AAAAAgNbFiRoAAAAAJIYTNQAAAABIDCdqAAAAAJAYTtQAAAAAIDGcqAEAAABAYjhRAwAAAIDEcKIGAAAAAIlp38rb2yRpVenPvUt/rwW1dCxSusczpA22SWfzkOrx0NnqqaVjkdI+ntbuba12Vqqt40n5WNrytTbl+6U5aul4Uj6WQp0NMbbND1YPIcxr7Z8i31Jq6Vik2jueaqml+6WWjkWqveOpllq6X2rpWKTaO55qqbX7pZaOp5aOpZpq7X6ppeOphWPhWx8BAAAAIDGcqAEAAABAYtryRO2+Ntx2tdXSsUi1dzzVUkv3Sy0di1R7x1MttXS/1NKxSLV3PNVSa/dLLR1PLR1LNdXa/VJLx5P9sbTZGjUAAAAAgI9vfQQAAACAxLT6iVoIYVII4e0QwvIQwq2tvf1KhRAeCCFsCCG8dVhWF0J4MYSwrPR7r7bcx6JCCINDCLNCCEtCCItDCDeX8iyPp6XQ2XTQ2WLobDrobHE597aWOivR26Jy7qxUW72t1c626olaCKGdpF9I+j+SRkqaGkIY2Zr7UAXTJU06IrtV0swY46mSZpb+noP9kr4dYxwpaayk/yo9HrkeT9XR2eTQ2SbQ2eTQ2QJqoLfTVTudlehtk2qgs1Jt9bYmO9van6idJ2l5jPHdGOM+SY9JmtLK+1CRGOMrkrYcEU+R9FDpzw9JurxVd6qZYozrY4wLSn/eIWmppIHK9HhaCJ1NCJ0thM4mhM4WlnVva6mzEr0tKOvOSrXV21rtbGufqA2UtPqwv68pZbnrF2NcX/rz+5L6teXONEcIYaik0ZLmqAaOp4robKLobFl0NlF09iPVYm9r4jGmt2XVYmelGniMa6mzDBOpsnhojGZWozRDCN0kPSnpmzHG7Yf/W47Hg6OT42NMZ49tOT7GdPbYlutjTG+PbTk+xrXW2dY+UVsrafBhfx9UynL3QQhhgCSVft/QxvtTWAihgw4V+rcxxqdKcbbH0wLobGLobJPobGLobCG12NusH2N626Ra7KyU8WNci51t7RO1uZJODSEMCyF0lPRlSc+08j60hGckXVP68zWSnm7DfSkshBAk/UbS0hjjXYf9U5bH00LobELobCF0NiF0trBa7G22jzG9LaQWOytl+hjXbGdjjK36S9JkSf+U9I6k77b29quw/49KWi+pUYe+H/k6SR/ToUkyyyT9TVJdW+9nwWMZr0MfAS+UVF/6NTnX42nB+4nOJvKLzha+n+hsIr/o7FHdV9n2tpY6Wzoeelvsfsq2s6X9r5ne1mpnQ+ngAAAAAACJYJgIAAAAACSGEzUAAAAASAwnagAAAACQGE7UAAAAACAxnKgBAAAAQGI4UQMAAACAxHCiBgAAAACJ4UQNAAAAABLz/wEh+Ywm1FUU8AAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAyEAAAHUCAYAAAAtLidwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzdeYAU1dU28OeKgCAoqwMM+yaiCCgIKi6ICyiCMYqgGIwaDC5B42vEmC9kcXsTY+IWDVGCMZiEuOISlRAEeVEURFllERhA9kU2cUHv98f0OX2K6WFmerqrq3qe3z8ezixd45yprqp77r3Oew8iIiIiIqKwHJLrAyAiIiIioqqFNyFERERERBQq3oQQEREREVGoeBNCRERERESh4k0IERERERGFijchREREREQUqkrdhDjn+jvnljrnVjjnxmTqoIiyhTVLccS6pbhhzVLcsGbD59LdJ8Q5Vw3AMgDnAFgH4H0Aw7z3izN3eESZw5qlOGLdUtywZiluWLO5cWglvvYkACu89ysBwDn3DwCDAZT6C3POcWfE8tvqvW+c64PIM6zZ7GLNZkeF6pY1WyGs2exgzWYPazY7eH2QRd57lypfmXasQgBrzb/XJXIBzrmRzrk5zrk5lXitqqgo1weQh1iz2cWazY4y65Y1mzbWbHawZrOHNZsdvD7IgcqMhJSL934cgHEA7xopHlizFDesWYob1izFEes2syozEvIpgBbm380TOaKoYs1SHLFuKW5YsxQ3rNkcqMxNyPsAOjjn2jjnagAYCmByZg6LKCtYsxRHrFuKG9YsxQ1rNgfSbsfy3u93zt0I4A0A1QCM994vytiREWUYa5biiHVLccOapbhhzeZG2kv0pvVi7J+riLne+x65PoiqjjVbIazZCGDNVghrNgJYsxXCmo0I1m35ZWN1LCIiIiIiogrL+upYRESUP5xLPtA68sgjAQBnnHGG5vr376/xvn37AACPP/645latWqXx119/nbXjJCKiaONICBERERERhYo3IUREREREFKoq34516KHJ/wXt2rXT+LDDDtN40aLiBRL2798f3oEREUWEbcFq06aNxv/6178AAF27dtVctWrVNN6xYwcAoEmTJpp74IEHNJ4/f77GX331VQaPmIiIoo4jIUREREREFKoqPxJSt25djW+++WaNmzZtqvEPfvADAMCWLVvCOzDKW4cckrz3tyNx4ttvv035dZIvbVntMJfbpqrF1uzll1+usYyA2NGPxYsXa3zvvfeWyH3xxRca2xFnmaTOOqaw2fqV+JtvvtGcPSfb+pS/C/v1XGyBosKOYNtYlHatESaOhBARERERUah4E0JERERERKGqsu1YMnwq69wDwYnpe/bsSRkTpeuII44AAJx77rmaGzZsmMYNGjQAEBzat+0qEtth1b1792q8ceNGjaUNZs6cOZqz7QVEFVGzZk2NBw0apLEM5z/77LOau/766zWWiem2Zm1N16lTR+PPP/8cABcAoXDYFkM5NwPJmi7tfT/V19m/D9u2HYV2F8pf9rxaq1YtAMB5552nueHDh2vcqFEjjTdt2gQA+OijjzQ3ffp0jT/88EMAweuLbLXJciSEiIiIiIhCxZsQIiIiIiIKVZVqx7LDqBLbdqxjjjlGYxmOArh+PaWvRo0aGt92220AgJtuuklztWvX1liGPvft25fye8nQq21nsW0A3bt31/j0008HAPzv//6v5h588EGN7QpFRGU56qijNLbnzDVr1gAAfvrTn2pu+/btJb7eDuXb2rP1K38rbMeibJLzaPPmzTV30kknabxw4UIAwCeffKI5W7+2dUvaae15/I9//KPGpZ3LidJlV9Ts1q2bxg8//HCJXPXq1TW2NSxtggMHDtTcmDFjNF69ejWA4Iqxtl0rk+dojoQQEREREVGo8n4kxN4J2gm/okuXLhrXq1dPY7t/CFG6WrdurfE111wDIPgk+bPPPtP4zTffBAA899xzmlu3bp3GmzdvBhAcXenZs6fG1113ncayyMK1116ruY4dO2r80EMPAQAWLFigOU6ipNIUFhZq3LBhQ41l/48NGzaU+3vZOpPJ6AD3B6FwyPnz6quv1pw9j8qI8apVqzRnrx369u2r8ahRowAAy5Yt09yf//xnjTkSQpVhJ57LNansWwcAt99+u8aysI3t+LEjFl9++aXGcq6118e2w6JDhw4AgEceeURzN954o8bvvvsugMycvzkSQkREREREoeJNCBERERERhSpv27FkGMu2VdmhI2kJsC0qdnJZ27ZtS3wvovKw9XLiiSdqLLVo15//5S9/qfGf/vQnAMGJu2UNcdo2gJkzZ2osE8oGDBigObsniexV8t3vfldz77333kFfi6oeqeXjjjtOc/acunbtWgDpT1T8+uuvNZYJl7adgC2ClGlHH300AOA73/mO5mwdyn5Ldl8lOxn4hBNO0Lhly5YAgHnz5mnOXmcQVZQ9/3Xu3FnjsWPHAgjuM2avWYVtASwqKtLYthdKa5bsLQIATZs2LRHbBUnsIjdy3fLf//5Xc3ZPkYoocyTEOTfeObfZObfQ5Bo456Y455Yn/ls/rVcnygLWLMUR65bihjVLccOajZbytGNNAND/gNwYAFO99x0ATE38mygqJoA1S/EzAaxbipcJYM1SvEwAazYyymzH8t7PcM61PiA9GMCZifgpAG8BuB0RUqdOncB/AWD37t0ayzC/He63w2C2ZYYtAfGS65q1q1edccYZJT4+Z84cjSu7koptg7HDrb/+9a8BBPe4uf766zWW4dbf/va3mjv//PM1TndoldKX67pNRdqx2rdvrzlbUytWrACQ/soodtUhOVfb2rMrulD0RLFmU7Er/9xwww0AgEaNGmnOtpXIHgn2fd+e0/v161ci//rrr2uO+9xEW1RrVs61sscXADz66KMat2nTBkDwnGnPxevXrwcAvPXWW5r717/+pfHKlSs13rp1K4BgrTZp0kTjW2+9FQAwZMiQEq8PJK8Vli9frrmPP/74ID9d6dKdE1LgvZc1GTcCKCjtE51zIwGMTPN1iDKFNUtxVK66Zc1ShLBmKW54fZAjlZ6Y7r33zrlSH4N578cBGAcAB/u8TJN17e0kMTvRTJ7c2dEP+zRv/vz5GnMkJL9ku2ZTre1tyd4KQGZ3Lrd1KvuPTJkyRXP2qYY89bBPuGWdcYAjIVF0sLrN9nnWnjvt0zeZsGufzqV6EmzPs/ZvolevXhq3atUKADB79mzNLVmyRGM7eZjiIZc1a2vSjkhfeOGFAILn3ieeeEJjGZG25/FTTz1VY7u3mLyG3V2d4i3Ma1pbY7JI0sSJEzVXUJC8F5L3923btmnOvr8/9dRTAIIjEvZzU+0TYq1Zs0ZjuUaxf0M2luPesWNHKT9Z+aW7RO8m51zTxME0BbC50kdClF2sWYoj1i3FDWuW4oY1myPp3oRMBjAiEY8A8FJmDocoa1izFEesW4ob1izFDWs2R8psx3LO/R3FE3YaOefWARgL4D4Ak5xz1wAoAjCk9O8QHju0JS0Dpe25IO0BvXv3TvnxLVu2ZO04KbtyXbO2Dps1a6ZxzZo1AQBHHHGE5mybim15SYf9XtWrVwcQbEe0Q7NyXHbCpY0pfLmu21SkBWDWrFmau/jiizWWdqyGDRtqTloBbX7QoEGau+aaazS2a9PLOVsmuwPAgw8+qPGbb74JILMtjFQ5UaxZOf+2bt1ac3aPg/r1i1dffeml5HWmbWGRmj788MM1N3r0aI3tYjfSemhbFCnaolSztjX1b3/7G4DgOdGS8+J9992nOduOJe/vZbVdlUauT4Dk+dou6GCvJd544w0AwXN9usqzOtawUj7Ur5Q8UU6xZimOWLcUN6xZihvWbLSk245FRERERESUlkqvjhUlduhp8+bieUXSlgIkh1mB5BrhJ5xwguZsO4tdCYCoImwd1qtXT2OpRdsmYIc701n9p1atWhp37txZY2lJaNy4seZSrfJma54tBVSajz76SGO7NrysrnbFFVdozrb1nXfeeQCA7t27a84O+6dqobVff9ddd2ksbYzPPfec5tLZW4fym5xz77jjDs3JymsAsG7dOgDBPZrsHmJSk/bcXdoqmqnavtNh/w7S3XOH4sH+rvv27avx8ccfX+LjdlrAqFGjAAAffvih5uwqlum8f9vr3Msuu0xjWbXQtog///zzGsu+OJnYx4kjIUREREREFKq8Ggmx5A7RPlWzd30y+ceOjlh2Eo7cmfIJBZWH3Sch1ZPaY489VuOuXbtqLPsjlLbjrtRh27ZtNffkk09qbCdMyk7o9vXtqIl8rz179mjOPg0ksjZs2KDxnDlzND733HMBAMOHD9ecXXhBzq929MTuwWT3V5CRwKOPPlpzJ554osYjRxbvD1ba0znurl612CfGdhL50KFDAQA9e/bUnH16/Jvf/AYA8N5772ku1TnXPoWeNGmSxvb8KxNz7dfb64yyRp/luO0oeGmL6VD++c53vqOxnCvt73zatGkaL1y4EEBw9CPdvZOkBgcOHKg5u3iDXCssW7ZMc3feeafGmRyB5kgIERERERGFijchREREREQUqrxtx5IhLTtEb4dvd+7cCSDYrmWHwd56662UeaKy2HaRt99+W+OTTjoJQHKdegAYN26cxg888ACA4BCs/V79+/cHANxyyy2aKygo0PjVV1/VeN68eQCC65CnsmTJEo1tCyKRZYf9Z8yYofGZZ54JIDjZ3LZeLV68GADwxBNPaG7p0qUap9obx34vu9jC9ddfDyC4T4n9XlLzPF9XDbaV+vzzz9f4yiuvBBA8n9l2KmkxsV9vW6TkOsEuGvLpp59qLG2zQPL6wbZ+2fqT86+d5G7bcaUda/r06SWODwB27NgByl+2dVVa9+w50bYRyjnYtv5V5FxnF2m66qqrAAD33HOP5myNyvWx3dNJFnvKNI6EEBERERFRqHgTQkREREREocrbdqxU7NCVrARkh6jsSharVq0K78Aor9g6e/jhhzUePHgwAKBjx46a69Chg8Zjx44FAJx99tmas/Upq67YFYWeffZZjR955BGNZRi/U6dOmrNtYNJyMHfuXM2VtioXka1p25oitbht2zbNvfTSSxrLii67du3SXKoVgyy78oq0WAHJWh8zZozmhgwZUuK1uN9NfpNzV5MmTTT3gx/8QGPZu2bTpk2a69Kli8bSAmNX1LIrC0o7Ye3atTUn+yYAwT1vpIXFnmfttYO8lj2P2xZZ+fuxx2prnvKPnRZg261kTyV7/rLteOVdCct+f9tSeOmll2p89913AwheE9jvf9999wEA3n//fc1lq82VIyFERERERBQq3oQQEREREVGoqlQ7liXDpHZVDDvcZDeEIUqXbV2R4dbHH39cc7Y1S1ZzsStmLFq0SGNZ/cquCCSbZQHBleCkrtu0aaM5u1mhtLzYlbjKapMhAoKtK3LOnDlzpuY++OADjWXjtXRry36dbJho22hk01kg2fLCdqz8Ju0mrVq10lxhYaHGcp5r0aKF5i666KISH7erY9makfd+ew1gW1xsG5itRWFXh5N2G/u97Aax0u5i/2ZYv/nNXmfadit5T7ZtUXbjS2mtSrWiIJBcqe2oo47S3FlnnaXxbbfdprG0YdnvNWHCBI0ffPDBg75WJnEkhIiIiIiIQlVlR0LsmsgijLs+qlrsU4+ioiIAwKBBgzRnn+Y1b94cQPBJiOyzACQn99qnw6VNFpMnKHYSpd1/QRZmWL16dTl/EqrK7Iix3bujWbNmAICtW7dqzj7JrexkRvskcMCAAQCAhg0bam7dunUa8/xdtaxcuVJjuwDItddeCyA4GdySUQ07irxixQqNN27cCCC5VwIQfLo8dOhQjWU05eWXX9ac7PcEAGvWrAEQ/DuwC4DI3wprt2qS+gCSI2S2Y+GCCy7QWEbTbN3az5Ua7dq1q+bOOOMMjeX6AkjW4+uvv665W2+9VWPbVZFtHAkhIiIiIqJQ8SaEiIiIiIhCVWXbsWQSsJ0wZttgpF2FKNNsndmWAhmate1Wdpi+Iq0t0orQr18/zdnJlbI+/ebNm8v9PanqsrVnF05o0KABgOAEXVtnFalZ+To7YVj21gGAG2+8EUBwYq9tgwmzhYByR2pK2qYA4C9/+YvG//znPwOfBwTf56UdqrTalNZD284le48AwcU+CgoKAADjx4/XnF04pLx7O1DVYevuo48+0lhqpXHjxprr3bu3xkceeSQA4N1339Wcff+WxRn69u2rObtwh22plff/6667TnO5WoypzJEQ51wL59w059xi59wi59zoRL6Bc26Kc2554r/1y/peRGFgzVLcsGYpblizFEes22gpTzvWfgC3eu87A+gN4AbnXGcAYwBM9d53ADA18W+iKGDNUtywZiluWLMUR6zbCCmzHct7vwHAhkS82zm3BEAhgMEAzkx82lMA3gJwe1aOMgu2bNkCILiqi20D4NB+fMWpZm3rVSbXh+/QoQMA4Nhjj9WcXWlo6tSpAJJrk1NuRb1mbQuBXTVIWlNuueUWzdkWAWlNsW2Fsp49ALRu3VrjCy+8EEByPx0AOOaYYzSWv5X7779fc3PmzEl5jJR9uapZ+T3b37fsR3NgnA6pVVuzdhXB+fPnayyrG8rKhQd+HUVPlM61dqUrqRt7HWpbqGR1K1vfdp8Rad2yqwfalkJbo7/4xS8AJK+Dc6lCc0Kcc60BdAcwG0BB4pcJABsBFJTyNSMBjEz/EInSx5qluGHNUtywZimOWLe5V+6bEOdcHQDPAbjZe7/rgMmH3jmX8jGU934cgHGJ7xGZR1Uy0mEnoNetW1dj2Z0SCE6EpPjIt5oti32CcvXVVwMIThi2ox5vv/02AE6cjJo41Oy8efM0lvqyoxd2nfpVq1YBCJ5D7SRfu3a91K99urdw4UKN77rrLgDA9OnTNWf3XKDciEPNVpYsZAMkd0EHkudU+5SZ4iEKdWvPi2+88QYAYMSIEQf9XLswiN0R/dRTTwUA1K5dW3P2/PjMM89oLAt6RGHUrlxL9DrnqqP4lzXRe/98Ir3JOdc08fGmALjMDkUGa5bihjVLccOapThi3UZHeVbHcgCeBLDEe/+A+dBkAHLLNgLAS5k/PKKKY81S3LBmKW5YsxRHrNtocWVN5nPO9QHwNoAFAGQW7U9R3EM3CUBLAEUAhnjvt5fxvSIz5CpDVjNmzNCcXVO5Z8+eGq9fvz68A0ua673vkYsXjrt8rVmZWG4nmNuJaz16JMvl6aefBgC0bNlSc7I2OACcdtppAIBNmzZl8hBZs2mKU83a+rv88ssBJCc6AkD9+smVLWVipK1TG9tzq7QLTJw4UXN2H51MLtxgsGbTFKearSzbqiOT0QGgUaNGAIDFixdrzra9ZmmxBNZsJUS1bmUfkJtvvllzp5xyisbSjmUX9ujUqZPGLVq0ABA8P7/zzjsay8IfQHBxkbB4712qfHlWx5oJIOUXA+hXSp4oZ1izFDesWYob1izFEes2Wso1J4SIiIiIiChTKrREbz6RVYFk9RYguGeDHfIiigJZycKu/W1XF5IVsYBkS8z27cnR5BdeeEHjvXv3Zu04Kb/ZFVWk7c+2UNWsWVNjaTGwLQK29uzqhFFYqYUoFdtWtWHDBo2lnTBLrYJUhUiL1N133625Zs2aaSytWdJKDQRXdJWVsOzeSbfddluJ7x81HAkhIiIiIqJQVfmRkOHDh2uuQYMGGttdf4mixI7Y2R3Ru3TporE8ubPrkNs9F/jUmTLJ1qSdmGtjonxgRz2yNPGcqjC7t8fGjRs1/vjjjwEA9erV09y6des0lt3PJ0+erLmtW7dm7TgzhSMhREREREQUKt6EEBERERFRqKpsO5b48ssvNbYTzoiiyrZS2RarV155ReOTTjoJAPDuu+9qbvr06RpzIiURUcWxBYvCYluzZBGE559/XnN2z6XPPvsMQPCaNg61ypEQIiIiIiIKFW9CiIiIiIgoVC7M4ZpMbnFfBcz13vfI9UFUdazZCmHNRgBrtkJYsxHAmq0Q1mxEsG7Lz3ufcpd6joQQEREREVGoeBNCRERERESh4k0IERERERGFijchREREREQUqrD3CdkKYG/iv/mmETL7c7XK4Pei9LFmy481Gw1bARQh87/fKGDN5ifWbPmxZqMjX68PQqvZUFfHAgDn3Jx8XNkhX38uyt/fbb7+XFQsH3+/+fgzUVI+/n7z8WeipHz8/Yb5M7Edi4iIiIiIQsWbECIiIiIiClUubkLG5eA1w5CvPxfl7+82X38uKpaPv998/JkoKR9/v/n4M1FSPv5+Q/uZQp8TQkREREREVRvbsYiIiIiIKFS8CSEiIiIiolCFehPinOvvnFvqnFvhnBsT5mtninOuhXNumnNusXNukXNudCLfwDk3xTm3PPHf+rk+Vqo81izFTT7ULMC6rUpYsxQ3rNkMvX5Yc0Kcc9UALANwDoB1AN4HMMx7vziUA8gQ51xTAE299x845+oCmAvgIgBXAdjuvb8vUZD1vfe35/BQqZJYsxQ3+VKzAOu2qmDNUtywZjMnzJGQkwCs8N6v9N5/BeAfAAaH+PoZ4b3f4L3/IBHvBrAEQCGKf5anEp/2FIp/iRRvrFmKm7yoWYB1W4WwZiluWLMZEuZNSCGAtebf6xK52HLOtQbQHcBsAAXe+w2JD20EUJCjw6LMYc1S3ORdzQKs2zzHmqW4Yc1mCCemp8k5VwfAcwBu9t7vsh/zxT1uXPuYIoU1S3HEuqW4Yc1S3OSqZit1E1LBiTmfAmhh/t08kYsd51x1FP+yJnrvn0+kNyV666THbnOujo9Kx5plzcZRBeo2b2oWYN3GGWuWNRs3rNnwazbtm5DExJxHAQwA0BnAMOdc54N8yfsAOjjn2jjnagAYCmByuq+fK845B+BJAEu89w+YD00GMCIRjwDwUtjHRgfHmmXNxlEF6zYvahZg3cYZa5Y1Gzes2dzUbNqrYznnTgbwC+/9eYl/3wEA3vt7D/I1HIIsv63e+8a5Poh8wprNOtZsFlS0blmzFcKazQLWbFaxZrMgjZo9H8Cr4R1hvHnvXap8ZdqxyjUxxzk30jk3xzk3pxKvVRUV5foA8hBrNrtYs9lRZt2yZtPGms0O1mz2sGazo0LXBwB+FdaB5bNDs/0C3vtxAMYBfNpB8cCapbhhzVLcsGYpjli3mVWZkZC8mphDVQJrluKIdUtxw5qluGHN5kBlbkLyZmIOVRmsWYoj1i3FDWuW4oY1mwNpt2N57/c7524E8AaAagDGe+8XZezIiDKMNUtxxLqluGHNUtywZnMj7dWx0nox9s9VxFzvfY9cH0RVx5qtENZsBLBmK4Q1GwGs2QphzUYE67b8srE6FhERERERUYVlfXWsfFCrVi0AQIsWyTlLe/fu1Xj9+vUAgDBHlSg/HHJI8jlAQUEBAKBPnz6aa9asmcarVq0CALz99tua++yzzzRm/REREVFccCSEiIiIiIhCxZsQIiIiIiIKFduxStGgQQONX375ZQBA69atNffvf/9b41GjRgEAvv7663AOjmLNtmB17NhR4wceeAAAcNppp2nusMMO01jarWwL1uTJyRUEx44dqzFbBImIiOLFueT87Tp16mh88cUXAwC6du2qOduaPXXqVADA7t27NVfW+799rVSx/frS4sriSAgREREREYWKIyHGoYcm/3fceuutGnfv3h1A8E6xRo0aGn/77bchHB3li+rVq2s8aNAgjU855RQAQO3atTVna06ePthRuuHDh2t8/PHHa3zppZcCAIqKijJ12EREZMi5/IgjjtCcvR7Ys2cPAHZJUPnVrVtX47///e8an3zyyQCC9XXkkUdq/M477wAILppU2rWpdGPUrFlTc02aNNG4Xbt2AIANGzZozl5L2NGWyuJICBERERERhYo3IUREREREFKoq345l21169EhuQnrVVVdpLEOuX3zxheZee+01jdmORRVhh0A7d+6ssdTi/v37NWdjqTPbCmhbu0444QSNX3jhBQBAv379NLdjx45KHzsRUVVTrVo1jZs2baqxTBY+/fTTNffVV19p/OKLLwIAXnnlFc19/vnnWTtOij/bbm33ppM2bWnxA4CZM2dqLO/vpV2Pppp4bq8fevXqpbHUtW27koVzAGDRokUAMjNBnSMhREREREQUKt6EEBERERFRqKp8O5ZdXeDOO+/U2K52IStbLF26VHPTpk3TmHsxUEXYlVKmT5+usaxIYfcGmTt3rsbffPMNAKBv376as3vX2K+TtcTvv/9+zV1zzTWVPXSiwLC+tKnYFkFbh9K60qdPH81dfvnlGh977LEaS2vAxo0bNXfPPfdoPHHiRADBFkWibLGrZdqVB3/xi19o3Lt3bwBArVq1NCfnaQAoLCwEAMyZM0dzq1at0pjXDnQge+3ZsGHDEh+3bdV2nxC5riitpsra5+PEE0/U+OyzzwYQbC386KOPNJZ2rEzgSAgREREREYWqyo6EyDrJAwcO1Jw81QCCT/ZkreQxY8ZobsuWLdk+RMpTpS1wIDuh24lny5cv11hq0n7NTTfdpPFZZ52lsTxVlglmAHDjjTdqvG/fvvR/AKoyZKTDjhgPGDBA4wsuuAAA0KVLF83ZibvyVM9OgCyLfRI4btw4jYcMGQIA+N73vqe5bdu2lfv7EpWHjPQdd9xxmvvnP/+psZ0sLNcRdvRj165dGsvEdLvfAkc/6GDsSK8djZO6lOsEIDgqUpEFkqQGbS1269ZN48MPPxxAcIQv1f5lnJhORERERESxw5sQIiIiIiIKVZVtx5Ih/9tuu01z9evX19gOM02ZMgUAMGPGDM1xbxBKl60tO5wqa37bdeTtJHZhJ+7u3LlT4+7du2t81FFHAUgOqwLBiWd2fXEiy048l8US/vGPf2jOTiaXdi1b0/brpV2lPFK1CNh2BFmQwR6LbTFct24dgGC7oyXHaidbEh1I9nF6+umnNde2bVuNbX1L68yKFSs0N2rUKI1nzZoFgNcLVH516tRJGYslS5ZobN//K0Jq2O5JYvcsS3Vety2H9m+gssp8h3DOjXfObXbOLTS5Bs65Kc655Yn/1j/Y9ypI4qAAACAASURBVCAKE2uW4oh1S3HDmqW4Yc1GS3keU00A0P+A3BgAU733HQBMTfybKComgDVL8TMBrFuKlwlgzVK8TABrNjLKbMfy3s9wzrU+ID0YwJmJ+CkAbwG4PYPHlRUyxAQA9957LwDg6KOPTvlxuwLBgw8+CIDD+HERp5q1Q5xSc3boPtXa3nb1jIUL9WEONm3apLG0Y9maPu200zRmO1b0RKVubQtVz549AQBt2rTRnK0pqV/bQvjll1+WiG3b4YIFCzS2ey8VFBQAAC666CLNSR3b4+rUqZPmzjjjDI3/+9//lvhZGjVqpPGaNWsAAOvXry/xeZSeqNRsZdn2kkcffRRAsD3F/k3Y87PU74UXXqi51atXZ+swKQOiWrNSg0OHDtWcXVVQzrV///vfS+QqSurZ1q1tzZJjsed1u1JnJtsL050TUuC9lzXnNgIoKO0TnXMjAYxM83WIMoU1S3FUrrplzVKEsGYpbnh9kCOVnpjuvffOuVIXC/bejwMwDgAO9nlhsOsgy669qe40AeCRRx7R2E4EoviLUs2WNYnXjnqUtSb33r17S3yu/ZpsrfNN4ThY3WayZu15cNq0aQCCe9O0b99e49mzZwMAJk+erDm7I/TmzZsBBCeL25qz+zGdd955AIBLLrkk5efKqMqHH36oublz52ose4bYEWu7n9Pu3btB4QqrZiurefPmGl9xxRUAgudjW4e2vvv16wcgWecUf7m6PqhXrx6A4EiwrUGZhP7ee+/ZY0nrtWTxBXuutSPcMtIho8dAcAQ7k9JdoneTc64pACT+y79AijrWLMUR65bihjVLccOazZF0b0ImAxiRiEcAeCkzh0OUNaxZiiPWLcUNa5bihjWbI2W2Yznn/o7iCTuNnHPrAIwFcB+ASc65awAUARiSzYOsDNvucvfdd2ss6y/bCTZ2H5B77rlH43Qn/1BuRLVmpRZtC6DsVwMAtWrVAhAcYt2+fbvG0o5iP273AbG1LjVra3fx4sWV+wEoq6JYtxs2FLdJ270PbFufTFy0+9nYdig5v9qalToHgMsuu0zjX/3qVwCCEyTtxEjZH+SXv/yl5rZu3VritbgnQ3iiWLPp+MlPfqKxnJ9L289p+PDhGrMNK36iVLP2PVvasOxiGrYGZTGZXbt2Vfp1ZcGPVq1apfy4nM+lHffA17XXMAd+DVCxNrHyrI41rJQP9Sv3qxCFiDVLccS6pbhhzVLcsGajJd12LCIiIiIiorRUenWsqLPtLnafBFl1YM+ePZobPXq0xvv27Qvh6Cjf2dUt6tcv3oR1yJDkSO/AgQM1lmFY285i9z6YNGkSgOAeNscdd1zK15LViOyKWcuWLdOYq2JReUid2KF4e86UmrNtBdahhxa/xRQWFmru6aef1vjEE0/UWIb47bC+XaXwrrvuAhDch4QoE15//XWNU62cOWXKFI2ztUoQVT22nVra/OyKgfb9+8knnwSQfrupvT6Qa2F7fWyvCaQN1q4oaD9XzsGpVuSs8HGl9VVERERERERpyvuRkDvuuENjO6FS2HWQi4qKQjkmqjpkF2gAuP/++wEAgwYN0txhhx120K+3T4plBMXuk2DX9m7cuLHG8tSjtJ2suU8IpSvVkzj79K5JkyYaDxgwAEBwMnmqnXmB5GjL448/rjkZ/QA4AkLZ884772gs59eOHTtqLtUCIUTpsOe8wYMHa9y5c2cAwffkFStWaDx//vwSHy+LHf045ZRTNJbzcd26dVMel5zj7XldRrWB5Gh1JhYB4UgIERERERGFijchREREREQUqrxtx5JhpOuuuy7lx2X/BNvaQpQJtkXqpptu0ljasFK1BQKp96OxQ6Bt2rQBADRt2lRzdjjUTnKTr6tXr57mOnTooLFMUreTgFNhuxYdjAzhy7rzAPCzn/1M42HDilfDtG2Hts5Xr16tsewT8tJLyX3C2PpCYbDn7BYtWgAITsS1bbVElWHPhTfccIPGsnedXZhm3rx5Jb7OtrPa9+eaNWsCAHr16qU5u9iSzcvnWvv379d448aNAIJ75+3cubPE52bi+oAjIUREREREFCrehBARERERUajyth2rb9++AIJDX3a46eOPPwYAPPXUU5qz64LblQLYkkIVYevo1FNPLZG3LVBbtmzRePHixQCCe9S0bdtWY1l1qFatWpqzLVi2pUDYYdcrrriixNfZdsTNmzdrLOt/23YY+/dDVZc9N0pr4WWXXaY5G8v5155D7UpD9957r8YvvvgigOCKbkTZYlcO6tGjh8ZynrbnO7uKkG2R5TmRKsq+pzdv3lxjmUJg27H69OmjcadOnQAA7dq105xtzZK6tHVtpdrLybbGzp49W2Np41q+fLnm7HGxHYuIiIiIiGKLNyFERERERBSqvGrHssNNsiqLHS5du3atxqNGjQIQHG6qX7++xnaYyq4KQFQWu2LVZ599prG0YX3xxReae/bZZzV+4oknAAA7duzQnN2A8Hvf+x6A4AZHhYWFKY9BhmRti5bdrEhWM7IrEf3f//2fxrJqkf1Z7N8EWxSrLtuOcswxxwAALrzwQs3ZdgCpeduCKCuzAcDMmTM1lr8L1haFwa5S2LBhQ43nzJkDILhRbCY2ZaOqTc6L55xzjuZsm5+8V9spBLb16uijjwaQuq3KKu38afNyXfzGG29obsSIERrLdUt5vldlcSSEiIiIiIhClVcjIXZd77POOgtA8KnzY489pvHChQsBBCf5nn/++RrbUZPXXnsNQHBiDlFp7OjbW2+9pbFMMrNPEexTjT179gAAdu3apTlbc7Jmt90nxE5ca9SokcYyyS3VJGIg+TTbjm7YmpcnIXZiOp9QV122juxT45NPPhlAcMTNLqwgCyDY0ZFt27ZpbEf9+LSZwiATz2WhDyC4QMiqVasAAL1799acPbfaJ9X2+oLoYFJ1J9hzpbw/23Ntqknm9n3Yvj9v2rQJALB7927NyZ43QPKaAABWrFgBALjxxhs1Z8/FYb7XcySEiIiIiIhCxZsQIiIiIiIKVV61Y11yySUay4Qfu/fB+++/r/GRRx4JIDih8tJLL9V4+vTpGr/++uuZP1jKW7atZPLkyRpfffXVAIJDpBdccIHG0hIwbdo0zdmhWfk6u7a43Qck1d42tt3KDrfK34KtbduSwNZDsmzbaseOHTWWWrQTLO3nSguA3fvjP//5j8Z20Q+2+1G22PNkmzZtAAAtW7bUnN3bSRZbsHswSKsLwHMjVY6dDN66dWuNU7VrS4s2kKw72+JtF7aRNvAf/OAHmrv22ms1tq1bcg62dZ2r82+ZIyHOuRbOuWnOucXOuUXOudGJfAPn3BTn3PLEf+uX9b2IwsCapbhhzVLcsGYpjli30VKedqz9AG713ncG0BvADc65zgDGAJjqve8AYGri30RRwJqluGHNUtywZimOWLcRUmY7lvd+A4ANiXi3c24JgEIAgwGcmfi0pwC8BeD2rBzlQdhh1uuvv15jWZVF2q6A4NCUrKQlQ69AcJjVtmPZYSyKvijVrOy3AQDjx48HAPz4xz/WXLNmzTSW/PDhwzVnV7qQmrWrY9kVL+xKGlLLdp+SDz74QOMXX3wRALBmzZoSX0Phi1LNWlJTnTt31pxdT15WELI1aVcPkiH+9evXa27KlCka25XkKF6iWrNC9kICkq2wAFBQUAAg2Opiz8Oy4ps9n9pzp13RiOInV3Ur57rFixdrbuzYsRpLDUp9AsBHH32ksayaafdcsmQFt/bt22vOtsbauv34448Dx5RLFZoT4pxrDaA7gNkAChK/TADYCKCglK8ZCWBk+odIlD7WLMUNa5bihjVLccS6zb1y34Q45+oAeA7Azd77XQdMgvXOuZSzWrz34wCMS3yPjM98qVOnjsatWrXSWPZBsGvaDxo0SGO5AywqKtLcz3/+c43t5GBOmIynKNSsnRgu+9Rs3bpVc3feeafGMlGybdu29lg0lidz9gmd/f72yZ6McNgdqd98802NFyxYAIDr3EdNFGrWkgm7Q4cO1dy5556rsTx9sxN7U+1988c//lFzK1euzNThUQRErWZldPiRRx7RXL9+/ezrAgguimD3bpDuCjt6d9ddd5X4eoq3XNWtXbhm+/btGsvCMXakpKy9k+wxN27cGABw3HHHpfxcu/+YdGjIdTKQu1GRci3R65yrjuJf1kTv/fOJ9CbnXNPEx5sC2Fza1xOFjTVLccOapbhhzVIcsW6jozyrYzkATwJY4r1/wHxoMgBpDh4B4KXMHx5RxbFmKW5YsxQ3rFmKI9ZttJSnHetUAFcCWOCc+zCR+ymA+wBMcs5dA6AIwJDsHOLB2aFRO7QlE9Ltx+2EnldeeQVAsAXLDr9yyDXWIlmz0vr0t7/9TXN2AYQ//OEPAIDTTz9dc7Vr19ZYhl7tBHK7t8d///tfjWX98KVLl6b8XJmkxjqPjEjWrExs7Nu3r+bsxEk7nC/27t2r8e9//3sAwLhx4zRXVosBxUYka1bOk506ddKc3cdGPi6L1wDBc+qGDcXTAq677jrNrV27NjsHS7kQybqV9+KKvCfbdiyZkG6nKNjvZVu35VrYLiJir4/lHB3G9UF5VseaCcCV8uF+peSJcoY1S3HDmqW4Yc1SHLFuo6Vcc0KIiIiIiIgypUJL9EaRrCgAAAMHDtT4yiuvBBBs0Zo0aZLGMuTK1gAKmx3itPt0XHLJJQCC69sfffTRGsvqF9u2bdPc8uXLNbarvUi7la1v+7p2GJeoNLLSkB22t6Sm7CprDz30kMa/+c1vAHCvJQqPtFbZ9/tbbrlFY9uaJdatW6fxNddcAwB47733NMe2VYoi+z4u0wnsNa/dR8zuOSbXB7ZFy666mU5rWLo4EkJERERERKFyYd7hZ2OfkDw213vfI9cHUdWxZiuENRsBmaxZmbx7++3JjYNllBlIPlH79a9/rbnnnntOYzvhN6JYsxGQjfOsnXj+u9/9TmNZ+GPWrFmak0VBgOQ+DRHukmDNRkSUrg9kr5sWLVporrCwUGPbNbRs2TIA4e4N4r1P2X7BkRAiIiIiIgoVb0KIiIiIiChUbMeKLg65RgBrtkJYsxGQyZqtXr06AKBXr16a69atm8bvvvsuAGDRokWakxatmGDNRkC2z7N2Aq/EEW63KgtrNiJ4fVB+bMciIiIiIqJI4E0IERERERGFKvb7hBARUXZI64qsGAQE97bZunUrgOA+IURRY9vOuecHUXRwJISIiIiIiELFkRAiIkpJ9vmwu/DamIiIKF0cCSEiIiIiolDxJoSIiIiIiEIVdjvWVgB7E//NN42Q2Z+rVQa/F6WPNVt+rNlo2AqgCJn//UYBazY/sWbLjzUbHfl6fRBazYa6WSEAOOfm5ONGO/n6c1H+/m7z9eeiYvn4+83Hn4mS8vH3m48/EyXl4+83zJ+J7VhERERERBQq3oQQEREREVGocnETMi4HrxmGfP25KH9/t/n6c1GxfPz95uPPREn5+PvNx5+JkvLx9xvazxT6nBAiIiIiIqra2I5FRERERESh4k0IERERERGFKtSbEOdcf+fcUufcCufcmDBfO1Occy2cc9Occ4udc4ucc6MT+QbOuSnOueWJ/9bP9bFS5bFmKW7yoWYB1m1VwpqluGHNZuj1w5oT4pyrBmAZgHMArAPwPoBh3vvFoRxAhjjnmgJo6r3/wDlXF8BcABcBuArAdu/9fYmCrO+9vz2Hh0qVxJqluMmXmgVYt1UFa5bihjWbOWGOhJwEYIX3fqX3/isA/wAwOMTXzwjv/Qbv/QeJeDeAJQAKUfyzPJX4tKdQ/EukeGPNUtzkRc0CrNsqhDVLccOazZAwb0IKAaw1/16XyMWWc641gO4AZgMo8N5vSHxoI4CCHB0WZQ5rluIm72oWYN3mOdYsxQ1rNkM4MT1Nzrk6AJ4DcLP3fpf9mC/ucePaxxQprFmKI9YtxQ1rluImVzVbqZuQCk7M+RRAC/Pv5olc7DjnqqP4lzXRe/98Ir0p0VsnPXabc3V8VDrWLGs2jipQt3lTswDrNs5Ys6zZuGHNhl+zad+EJCbmPApgAIDOAIY55zof5EveB9DBOdfGOVcDwFAAk9N9/VxxzjkATwJY4r1/wHxoMoARiXgEgJfCPjY6ONYsazaOKli3eVGzAOs2zlizrNm4Yc3mpmbTXh3LOXcygF94789L/PsOAPDe33uQr+EQZPlt9d43zvVB5BPWbNaxZrOgonXLmq0Q1mwWsGazijWbBWnU7PkAXg3vCOPNe+9S5SvTjlWuiTnOuZHOuTnOuTmVeK2qqCjXB5CHWLPZxZrNjjLrljWbNtZsdrBms4c1mx0Vuj4A8KuwDiyfHZrtF/DejwMwDuDTDooH1izFDWuW4oY1S3HEus2syoyE5NXEHKoSWLMUR6xbihvWLMUNazYHKnMTkjcTc6jKYM1SHLFuKW5YsxQ3rNkcSLsdy3u/3zl3I4A3AFQDMN57vyhjR0aUYaxZiiPWLcUNa5bihjWbG2mvjpXWi7F/riLmeu975PogqjrWbIWwZiOANVshrNkIYM1WCGs2Ili35ZeN1bGIiIiIiIgqLOurYxERERGF6ZBDDkkZf/PNNwCAMLtAiDKleG/BYkceeaTGtWvXBgA0aNBAc9WqVdN42bJlGn/xxRcAovE3wJEQIiIiIiIKFW9CiIiIiIgoVFW2HUuGqexwlR2yPfTQ5P8aGb7dv3+/5uwwlsSpcgfGRERElDn2vfuwww4DAHTp0kVzjRs31viDDz4AAOzYsUNz8h5v4/K8h/O9nTLN1nLdunU1Hj16NADg+9//vuaOOuoojeX69PPPP9fc4YcfrvGePXs0vuGGGwAAL7zwQqYOO20cCSEiIiIiolBVqZGQgoICjX/yk58AAM4991zNycQeANi5c6fGn332GYDgxB77BETuQFeuXKm5l19+WePVq1cHPo+ovOwktOrVqwMA6tWrp7kWLZIbvH799dcar127FkCydgE+tSOi/GG7GNq1a6fxkCFDAABnnXWW5pYuXarxvn37ACRHRIDkRF0AqFGjRonvb68NLLlOkO8J8DxLlWPr58svv9RY3tPfffddzdn39xkzZgAITkwfOXKkxs2bN9e4V69eAIDJk5N7MdrRwDBxJISIiIiIiELFmxAiIiIiIgpV3rdjSQsLANx2220a//CHPwQA1KpVS3O2ncVOCBInnXSSxnaim3zu3r17Nde6dWuN77rrLgDAli1bKnz8VHVIG0DHjh01d/XVV2vct29fAEBhYaHm7MSzb7/9VuN169YBAP7f//t/mnv++edTfi5RJtkWQrvAhyhtgQ+iimjUqJHGjz/+uMY9ehRvJm5bVebMmaNxUVERgOD7tT0fSn3a2rXXEe3bt9dY9mn48MMPNbdp06aK/ihEyp4TbZvgX//6VwDA3/72N82lOpfa8+/06dM1vvnmmzWeNWsWgNT754SNIyFERERERBQq3oQQEREREVGo8r4dS9YMB4Bu3bppLK0vdhjWDt++/vrrGq9YsQJAsE3mggsu0LhOnToAgqtpSA7I3TAXRZMdAm3ZsqXGf/7znwEAffr00ZxtAyiLHcaVlgHbpmA/Lq1ZbIehTGvVqpXGf/jDHwAE17Nfs2aNxra14O233wYA7Nq1S3OsTzqQtJucc845muvZs6fG0mL9ySefaO6ZZ57RuLyrVdr2bNsWY/MXX3wxgGQLGAA8+uijGttrCqoa5NoSAL766quMfV+5jizretKeMzds2KDxe++9p7GcY6NwbcqRECIiIiIiClXej4TYO9F58+Zp3LVrVwDA7t27NTdu3DiN//3vf2ssT6PtSIgdYZEnM/ZpybPPPquxfbJHVZfUid3b47XXXtO4U6dOgc8DUq8ZbuvJPmk74ogjNJa9RGTiJAD85je/0VjWFOdiCZQJdhLvE088obHs1WBr1u7XZM+psm/DhAkTNGfXxOc+SwQkz4/2Ka49D0p3w3PPPae5jz/+WON06sieh20tv/POOwCAH//4x5o7+eSTNX7jjTcCx0T5L5OjH+mwe9rInjkAsHnzZo3nz58PIBp1yZEQIiIiIiIKFW9CiIiIiIgoVHnfjmW3vX/44Yc13rdvHwBg5cqVmnvrrbc0tpPM+/XrBwA499xzNZdqfxE7mV3aXYBoTP6h3JOaGj16tObatWunsbQZ2ImPdn17WSd88eLFmrPDqWeeeabGI0eOBAA0bdpUc3Zy8IknngggWLNE6bJ71xx//PEay/n1vvvu05ydMHzKKado3L9/fwDJBRqA4N/Km2++mcEjpriSc57dA+GVV17RWFqsx48frzl7HVBZtjVLWlzs9+/Vq5fGch1g9yQhygZpbR07dqzm7J41EydO1DhKUwTKHAlxzo13zm12zi00uQbOuSnOueWJ/9bP7mESlR9rluKIdUtxw5qluGHNRkt52rEmAOh/QG4MgKne+w4Apib+TRQVE8CapfiZANYtxcsEsGYpXiaANRsZZbZjee9nOOdaH5AeDODMRPwUgLcA3J7B48qK9evXa/z73/8eQLBVyrZY2TaBO++8E0CwncWSlgO7QgaHX3MnqjUr7Vi2NuzqbDKk/8gjj2jOrhS0Y8cOAMF2ALsqkV0h5tRTTwUANGzYUHN2f5IzzjgDADBlyhTNsW0wt6Jat+Vx2mmnafzpp59qfO+99wIAXn31Vc3Z+rWrs8lKWq1bt9bc0KFDNZ46dSoA1mmU5LJmN27cqPFdd92l8eGHHw4geD7M1n4zUotbt27VXLNmzTSWlRCXLl2a9WOh8onzefZATZo00fjll18GELyOvfTSSzWOUguWle6ckALvveyCshFAQWmf6JwbCWBkmq9DlCmsWYqjctUta5YihDVLccPrgxyp9MR07713zpV6a++9HwdgHAAc7PPCYJ+gyV2h3ZHaTqi0EynlaYZ9gmGf9g0aNAgA91yIi1zVrEw4f+yxxzRndzGVnaTtxF275niqNb3tniJ79uzReOHC4nZX2Q8HCK4ffuyxxwIAGjVqpDk7iY2i52B1m6vzrIzude7cWXN2jyWJP//885RfL6N7QHJPGztiZ0cNba1TPGSzZu350HY5yJNg+35f2t5LmWLr1H5/Oc/KuR0o/W+BoiFO17S9e/fWuH794mksL774oubmzp0b+jFVVLpL9G5yzjUFgMR/N5fx+US5xpqlOGLdUtywZiluWLM5ku5NyGQAIxLxCAAvZeZwiLKGNUtxxLqluGHNUtywZnOkzHYs59zfUTxhp5Fzbh2AsQDuAzDJOXcNgCIAQ0r/Drllh2FtLEP+th3ld7/7ncZ2/wZhW7AuueQSjVesWJGZg6WMiGrNSvuAnVBpW1dkGN/Wqd2vxrZTiZo1a2pcr149jaU9QdaxB4CCgmSbq9R39+7dNScTf4HgXiUUjqjW7cHUqVMHQHCfkAcffFBjaVOxLSq2vlu1aqVxmzZtAARbEJ999lmNU7UjUm5FpWZtfckeYLatL1tkYRB7vrStWc2bNwcQvM6wrWOpFlngxPXsikrNpsuePy+//HKN5Vw8a9YszcVhEY/yrI41rJQP9cvwsRBlBGuW4oh1S3HDmqW4Yc1GS/YfFRARERERERmVXh0rTKlaq+zqVjY+7LDDAATX7JZ9GABg//79AIAbbrhBc126dEn5umvXrgUAXHDBBZpbvHixxmwToIqw9ZKqTcW2YNk1v1u2bAkA6Nu3r+ZsO1aqdcDt+vUNGjTQWFq77H44H3zwQcqvS3WsbBkgAGjfvj0AYN68eZpLdW605+66detqPGZMck8wqc/XXntNc7Nnzy7xvYgORs5Ntl5StWLbnG1bKevcJtcWQPKaQc7NQHA/MWkttNch27Zt0/iLL74o8f3j0EJDuWPf8y+88EKNpS7tKq/2mti2uUYJR0KIiIiIiChUkR8JsU+FZR1kILlD77XXXqs5O5lcJunar7c7qMrkMLuPgt192u6ZcOWVVwIAFi1apDk+CaZ0pXoqBySfWsgEMwDo1KmTxldddRWA5M7SQHBvEDvJfcGCBQCAGTNmaK5GjRoay2vI3gxAcK8HqXX71MVOco/qUxXKPvt0TZ4Av/POOyk/V86ptqZHjRqlsR1dllr+0Y9+pLlUT4qJKsqOKMveHR07dtScPSfLPk323GivE0466SSNjzjiCADBc6tdIESeTsuIIQCsXLlSY/lbks4MIDmxHkg9qkNVm73Otedi2X9m9erVmrM1bBdPkK6JKNQVR0KIiIiIiChUvAkhIiIiIqJQRb4dy05i/PnPf67xd7/7XQBA48aNNWfbqWR41U7yst9L2gjscJaduG4n6a5atarCx22Hd+W47LFEYRiMss+2A8rQvG0rtOvHS8uK3c/j9NNP17hPnz4AgkOs27dv13jhwoUl4i1btmhOWgcA4Oijjw68JgCcccYZGjdt2rTE8b/44osasx2r6jrmmGM0vvjiiwEk2/+A4DlV9gE555xzNNerVy+NbVvro48+CiC98y3Rgez1wNChQzW++eabAQSvHWy7lsT23GfZ6wRp8batqra1qmHDhgCCfzPz58/XeOfOnQCC5/HSFi4hAoL70tl21iVLlgAANmzYoDlbw926ddNY6sqea+1iNGFen3IkhIiIiIiIQsWbECIiIiIiClVk27Gkncm2o9g1kaW1xK6ekmqFCZuzbSy2DUXYlYp69uyp8dixYwEAEyZM0Ny6des0lvXtZegVCA7vfvrppwCCw2R29S2uC55fbDvK2WefrbGs5GZXt7A1t3HjRgDBVSzatGmj8eGHHw4gOYQPAB999JHGtiZt3Qs7NNuiRQsAQNu2bTV33HHHaSw1PXnyZM2xTqsu29pi22IHDhwIAOjfv7/m7D4IUtN2xR9bh3ZFt3vvvRcAW1UpM2zbq7RgAclWVNsybWtSznO27cqek/fu3auxrOhm26bsuVfeyrdB1QAAIABJREFUC6S9FQiutCXnctuileqahm1ZJGxdvvHGGwf9XFu3H374ocay19jw4cM1Z/dnmj59OoDkilvZxJEQIiIiIiIKFW9CiIiIiIgoVJFtx5JhTBnuB4Kr+8jwvm1HmTJlisayakXz5s01d+mll2osq7akWlELCA7lXnHFFQCAIUOGaM6uDiTDt7ZdxQ6ZFRUVAQgOd40bN67EsVJ+sJv+PfTQQxpLLdo627Fjh8YyDG9rJ1VN2XYA2/p13nnnaSztgNJ6ACRbsIDkqlz2+9vvK6+1e/duzdmhXapabNugPfdJTdhWwGeeeUbjefPmAQjWnm3d+u1vf6uxbKBFlAm25mQ1TCB5zrR1vGbNGo1lg9f//Oc/mqtdu7bGrVu31riwsBBA8O9D2maBZGuhtCUCwc0Me/ToEfg+QPA6xq6ERFQZts1v6tSpAILv77ZlUFbdks8DgtcqmWwP5EgIERERERGFKrIjIfJkoWPHjpqzExZlYve//vUvzdm9PWQSup1sa5/kyuRJ+/TNPgmWPR2A5L4N9mmHfZqdaiTETjyX17ATNu3TbsoPUh92RMKOxNWoUQNAsA7thF2ZpGgXTbALHNSsWRNAcJTOPu2zoyJyLHbCpX1dmXAmEysBYPHixRq///77AIDly5drjhPTqy5bOz/72c80vvvuuwEER0JSjdQNHjxYc/ap8Mcff6wxJ99SJtmatbFM9rajDL///e81tiMgwo5uyLkRSHYx2MnotmNDOi1sx8VRRx2lcZMmTQAA69ev15z9+5DrDP5tUCbJKODMmTM1Z0dCpOtH9oECgKefflrjTF6/ciSEiIiIiIhCxZsQIiIiIiIKVWTbsaT1yrZA2dYSYYeQTj31VI1lHeRjjz1Wc3YSzj//+U8AwPjx4zVnJ6fZ15XWLtsaY1vDpN3K5uzXS8uNHRJmO1bVYIfppR3LDq3bYXrZZ8a2W9mhfWltsa2CpbUISq3ZPUVs68u7774LAHjnnXc0Z+t/7dq1AIJ/M2wJqLrs737lypXl/jqp1WHDhmnO7pcUxjr0VDV98sknGts9Eo4//ngAwXOf7OUFJBftqFu3ruZs65at31T7MdnzsMT2PG3bZmUSvH0fsH8TPOfmn9IWMZBasteGYe6ZZKcLyMJN9vp5wYIFGs+ePTtjr1vmSIhzroVzbppzbrFzbpFzbnQi38A5N8U5tzzx3/plfS+iMLBmKW5YsxQ3rFmKI9ZttJSnHWs/gFu9950B9AZwg3OuM4AxAKZ67zsAmJr4N1EUsGYpblizFDesWYoj1m2ElNmO5b3fAGBDIt7tnFsCoBDAYABnJj7tKQBvAbg9UwcmQ1MLFy7UXKdOnTSWFau+973vac4ObUls9+B49tlnNb7//vsBANu3b9dcaUOfXL8+XnJVszJ0OmnSJM2df/75GksbgB2ul1Y/INnuZ4drLWnrS9UCAARXVZH9SV555RXN2XW+5XvYFa9SrX7FdoBw5Kpms8HWd8+ePQEAXbt21dxf//pXjcNsN6DMinrN2ram0aNHayz71Nj2U7sioaxYtXTpUs1t2bJF49LOv8KeMyW2dZ7q6+05n38T2ZWrupXzYvv27TVn90mSa9o333xTc3ZvuVWrVgEIXo9msnXLth/K9XMYK2JWaE6Ic641gO4AZgMoSPwyAWAjgIJSvmYkgJHpHyJR+lizFDesWYob1izFEes298p9E+KcqwPgOQA3e+932add3nvvnEv5yNR7Pw7AuMT3KPdjVXmCYPcB6dOnj8YyccZOFrdkBOTFF1/U3MMPP6yxjIDwSW/+CrtmRVFRkcaDBg3S+MQTTwQAnHnmmZqzo3uyfvzWrVs1Z0c35MmcnSS5bNkyje3kSi58EE+5qtlMshNvR44sfq+29Wh34aX4i2rN2vf2JUuWaCx72nTo0EFzjRs31lgmntvJ7Ha36WzgdUj4wq5bWXygXbt2mrOjD4WFhQCS50wAOOecczSWa13bvTNlyhSNn3nmGQDA5s2bNWcXQ7I1JiN/PXr00Jzdy0kWFJk1a5bm7D58mVSuJXqdc9VR/Mua6L1/PpHe5Jxrmvh4UwCbS/t6orCxZiluWLMUN6xZiiPWbXSUZ3UsB+BJAEu89w+YD00GMCIRjwDwUuYPj6jiWLMUN6xZihvWLMUR6zZaXFnDgM65PgDeBrAAgMx8+SmKe+gmAWgJoAjAEO/99pTfJPm9KjzmaPcB+fGPf6zxRRddBCA4ccaucyyTe5544gnN2daWMCbcVNJc732Psj+NDpTrmi3H8Wls976pWbNmic+1kxglthPQIjaMz5pNU9RrtiLsxMtXX30VAPD2229r7rrrrtM4Audh1mya4lqzcs6VicAA0KxZM41lfxG7qE22z7N2Ynqqie0HYM1WQph1m2oRGvs+L4vVAEDLli0BJNuygWBra/fu3QEEW7htm+vcuXMBpN7vAwC6dOmisbyGPf/aCfGyeMjMmTM1JwvjpMt771Lly7M61kwAKb8YQL/KHBRRNrBmKW5YsxQ3rFmKI9ZttJRrTggREREREVGmVGiJ3lywKwE89thjGsvQkR2O2rNnj8arV68GAOzcuVNzERj6JwoMsadqtyKKG9tOMmDAgBIf/8tf/qIxz8OUS1J/dhVC23olK2GF0eoqfzf2tWyLLt8T4s3+Xm2NCbu6lW3dSkVqpbQWbmndkrYuINnCBQSvhWWlrvfff19zdiVaaekK5W8g669ARERERERklDkxPaMvlsHJZ6nuGm0uD3Yd5eSzCMj1JN+YYc1GQC5qtn79+hr/+9//1lie3g0cOFBzdh37CGDNRkAuatZeL8Ts2oE1GxG8Pii/0iamcySEiIiIiIhCxZsQIiIiIiIKVeQnppcmVRtZxPZMICKqEvr376+xXZt+2rRpAIKTIomioBz7cRBRlnEkhIiIiIiIQsWbECIiIiIiClVs27GIiCgaPvnkE41t69WHH34IgPsdEBFRSRwJISIiIiKiUHEkhIiIKuW9997TuFOnTjk8EiIiiguOhBARERERUah4E0JERERERKEKux1rK4C9if/mm0bI7M/VquxPoRCwZsuPNRsNWwEUIfO/3yhgzeYn1mz5sWajI1+vD0KrWRf2Jj3OuTne+x6hvmgI8vXnovz93ebrz0XF8vH3m48/EyXl4+83H38mSsrH32+YPxPbsYiIiIiIKFS8CSEiIiIiolDl4iZkXA5eMwz5+nNR/v5u8/XnomL5+PvNx5+JkvLx95uPPxMl5ePvN7SfKfQ5IUREREREVLWxHYuIiIiIiELFmxAiIiIiIgpVqDchzrn+zrmlzrkVzrkxYb52pjjnWjjnpjnnFjvnFjnnRifyDZxzU5xzyxP/rZ/rY6XKY81S3ORDzQKs26qENUtxw5rN0OuHNSfEOVcNwDIA5wBYB+B9AMO894tDOYAMcc41BdDUe/+Bc64ugLkALgJwFYDt3vv7EgVZ33t/ew4PlSqJNUtxky81C7BuqwrWLMUNazZzwhwJOQnACu/9Su/9VwD+AWBwiK+fEd77Dd77DxLxbgBLABSi+Gd5KvFpT6H4l0jxxpqluMmLmgVYt1UIa5bihjWbIWHehBQCWGv+vS6Riy3nXGsA3QHMBlDgvd+Q+NBGAAU5OizKHNYsxU3e1SzAus1zrFmKG9ZshnBiepqcc3UAPAfgZu/9LvsxX9zjxrWPKVJYsxRHrFuKG9YsxU2uarZSNyEVnJjzKYAW5t/NE7nYcc5VR/Eva6L3/vlEelOit0567Dbn6viodKxZ1mwcVaBu86ZmAdZtnLFmWbNxw5oNv2bTvglJTMx5FMAAAJ0BDHPOdT7Il7wPoINzro1zrgaAoQAmp/v6ueKccwCeBLDEe/+A+dBkACMS8QgAL4V9bHRwrFnWbBxVsG7zomYB1m2csWZZs3HDms1Nzaa9OpZz7mQAv/Den5f49x0A4L2/9yBfwyHI8tvqvW+c64PIJ6zZrGPNZkFF65Y1WyGs2SxgzWYVazYL0qjZ8wG8Gt4Rxpv33qXKV6Ydq1wTc5xzI51zc5xzcyrxWlVRUa4PIA+xZrOLNZsdZdYtazZtrNnsYM1mD2s2Oyp0fQDgV2EdWD47NNsv4L0fB2AcwKcdFA+sWYob1izFDWuW4oh1m1mVGQnJq4k5VCWwZimOWLcUN6xZihvWbA5U5iYkbybmUJXBmqU4Yt1S3LBmKW5YszmQdjuW936/c+5GAG8AqAZgvPd+UcaOLEKKFw8A0p3ET9FQlWqW8gfrluKGNUtxw5rNjbRXx0rrxWLaP5ejm5C53vseYb4glRTXms0R1mwEsGYrhDUbAazZCmHNRgTrtvyysToWERERERFRhWV9daw4qVOnjsbNmjXTuHv37gCAzZuTG0bOmjVL4y+//DKEoyMiIiIiyg8cCSEiIiIiolDxJoSIiIiIiEIV+XYsmRQOZHZi+CGHFN9/tW3bVnP9+/fX+Oyzz9a4Q4cOAIBPP00uGf3b3/5W42nTpgEA9u/fn7HjIyIiotzr1q2bxk8//bTGM2bMAADcdNNNmvv222/DOzDKG1V1FVaOhBARERERUagiPxKSybvC2rVrazxq1CgAwMCBA1N+/IsvvtB49+7dAID69etrrnfv3hrPnDkTAEdCqHLkSUhpo39V7QkJkWX/LgT/JihbbL3dfvvtGrdu3VrjXbt2lfhcolSqVasGADj00ORlt72mPPLIIwEA33zzjeb27t1bIrY5+7lxxZEQIiIiIiIKFW9CiIiIiIgoVJFvx6qsww47TONHHnlE4+9+97sAgM8//1xzr732Wsq4QYMGAIDTTz9dc/Xq1dO4Zs2aAIB9+/Zl6rCpiqhevbrGffr0AQB06dJFc2vXrtV4wYIFAICtW7dq7uuvv9bYtqZI3n6cKAz2nNu8eXMAQKdOnTR3zDHHaHzUUUdpLC2wmzZt0pxtN5C/C1v/48eP17ioqAgAW7QoM2rVqqWxbb+uUaOGxlJznIxOZWnYsCEA4He/+53m7DWl1Ju0+AHJqQAAMGfOHADApEmTNDd//nyNd+7cCQD46quvNBeHuuRICBERERERhYo3IUREREREFKq8bceSlQh+9KMfaW7YsGEaSxvMxo0bNWeHyVauXKlx3bp1ASRbCwCgUaNGGsuqWp999llGjp3ym9QmAHz/+9/X+Ic//CGAYDvLf/7zH42ltWr16tWakz1sAOCEE07QWIZ0//jHP2rOrqpBlAmyKlD37t0199hjj2ncvn17AMG2QxvbdsE9e/YE/gsATZo00bhOnTolPr5582aN//SnP5X4nkTpsqtgNWvWTGO7EtYLL7wAgC2AlJo918l1on3Ptu2oUle2DdC2/Uu+R48eJb6nZdu55s6dq7G9bpDr3jfffFNzGzZs0DjMeuZICBERERERhSpvR0LatWsHILi+t0wgB5ITHh999FHNLV26VGM7oUdie6doJw/LSAlRediJubfccovG8qRXnq4BwAMPPKCxPPW1T+Lsk5ILLrhAY3lCMnHiRM1xJIQyrWvXrgCAV199VXMFBQUayyTJLVu2aG7btm0a20noUt92lNmOhMjTueXLl2tu8uTJGnOfJsqkO+64Q2M7Gd3W2aJFi0I9JooXWysygrtu3TrNFRYWaizXkfZr7KiG1KD9GjsScsghxWMKdhSjW7duGttrWrn+lYUVgOCeeZ988kkZP1nmcCSEiIiIiIhCxZsQIiIiIiIKVV61Y9kJvTJJsX79+pqzbSyy/8ITTzyhObsmvSXDY3boyra2tGrVCgCwbNkyzXGiGh1IhksHDBigOTuc+oc//AEA8NBDD2ku1STbVHUMAEcccYTG0trFOqRMs+fZP//5zwCCLVh2H4/7778fQHACpG3Hsq0H8j0uu+wyzdnFQKSl66abbtKcbW1grVMmyGTiSy+9NOXH7XWCbdEmOpA9J+3YsQMAcM8992hu1KhRGh9//PEAku/dQHARG2nHkusIIHgtUBb7uTI1oW3btpr7n//5H41vuOEGAOHsM1LmSIhzbrxzbrNzbqHJNXDOTXHOLU/8t/7BvgdRmFizFEesW4ob1izFDWs2WsrTjjUBQP8DcmMATPXedwAwNfFvoqiYANYsxc8EsG4pXiaANUvxMgGs2cgosx3Lez/DOdf6gPRgAGcm4qcAvAXgduSAHa6yQ1t9+vQBEByCssOod955JwDg888/L/M1ZEhq4cL/3969h0tVX/cff3+rqFWpohIERPEWUhJj8FG8IRAv0Rj7qIkxNU2kjQZNYxRrjUbzaIz2SR7b2l+aagxRI30kIglWqCZBa7yhICDiDRAUQZCLIiiImsRk//44s9ZZA3M8t5k9s+d8Xv+w+J5zZvY8s84+s/d3fdfXL5x57733PLZOWTNmzPCxjjyu1Eaj5uy227b8un3sYx/zsVji19F9DuIU7/vvv+9xnMa1/NTeNcXRqHm7pVgiZdP5cb+O0047zePZs2cDbU/rx/O3lRO2tU/OzTffDKjstZEUJWc749RTTwXKu2lGc+bM8fjtt9/O5ZikeuqVs/b5M56/5s6d67F1BYwlUvHzq3UajB2z4mda+974Nz/Gcf8RK32N59+4z5iVfsXPF7XS1TUh/bIss2LINUC/tr4xpTQWGNvF5xGpFuWsFFGH8lY5Kw1EOStFo88HddLthelZlmUppTZvR2VZNh4YD/Bh39dZdtU3cuRIH7vmmms8trvO0YIFCzy+++67O/2c8aoz3g20fR9ee+01H5syZYrH2sG3sdQrZ028IxH7zHflrkPsAx4f1+5A//73v+/KIUoD+rC8rXXORnGPJFtYPn78eB+Ld4rbW9gY7zYff/zxQPks80033eSxnV81+1EcjZKznREXDlcyefJkj/NYuCv5qvXng1glE2dCvva1rwHlu6jHzwSLFi0CYObMmT4W91myhh+xWUf8HDx06FCPL7roIgAGDx7sY7GSIs+ZkK626F2bUuoPUPr39Xa+X6TelLNSRMpbKRrlrBSNcrZOunoRMg0YU4rHAFOrczgiNaOclSJS3krRKGelaJSzddJuOVZK6U5aFuzskVJaCVwN/BCYnFI6B1gOnFnLg6xk9913B+BHP/qRj8XpJPPOO+94fPbZZ3vclTKVOE02YMAAj21K66qrrqr4+FOntuSzpm7z0ag5a6Urs2bN8rHevXt7bP3p494JlcTFal/4whe2+nmAO+64A1DOFUmj5m3p2Dw+8MADPbYF6RMnTvSxtvZbMrHPfdzzw/bPsaYhUF7iqjKsxtPIOdsZsSwwllqbmHtxMbr9Xig3i6PeORv/Ji9btsxjO9ftvffePhY/v/73f/83AL/97W99LC5Sr/S3Pp5r44L4ww47bKvnshIsaP1cEh+/VjrSHeusNr50XJWPRaQqlLNSRMpbKRrlrBSNcraxdLUcS0REREREpEu63R0rT7Hc5JxzzgFgyJAhPlZpTxDbewHKu2N1hnUdOvLII30sltHsuOOOQHmZwnXXXeex7R+ybt06H9P0bc9j06X33nuvj511VutNGeuy9swzz/hYpdKWWHZ4zDHHeBx7flvJQNxzIf5+2O9SHNu8efNWxxrzVDnbc8Vp/X322cdj6+4Wy6YqiXlmezgBnHfeeR4/+OCDADz55JM+ppyTPNi+CVBelmJih8tYQiPSHZs2bfL4scceA6B///4+Zl0uAR5++GGgvBywvXLr+PnhjTfe8PgXv/gFAKeccoqP2RIHgKOPPhoo7wRXK5oJERERERGRXDX8TEi8gxYX0VxyySVA5bsW0LpgctKkSR1+rni3b7fddvP48MMPB2D48OE+Fmdl7OfinejYf9n6M9uVLugOX08Wd0l/4oknPLY7xLHpQZy9s2YHxx57rI/Fu9Ixf+0OR9ydfd999/V4+vTpACxZssTH4t0+6w+uPBUoP9/ZzuYAK1as2Op74znb8mennXbysQsvvLDi49rscXuNGUSqbdiwYR7H86iJM33PPvtsLsckzS/+zb3rrruA8pm2+Pff9gHparOZ+Lfc9ieJ1Tmxkufiiy8G4J577vEx27G92jQTIiIiIiIiudJFiIiIiIiI5Krhy7Gi2FM+LqIxcbrJpo4GDhzoY6tWrfLYFvfYonKA0047zePPfOYzHtvi3vXr1/vY6tWrPY4LiUycMrPnUmmLQPlisZkzZ3pspVPf/OY3fSzmrOVfXExmTRMA3n33XY+fe+65sp8BeOqppzxetGgRUL4YPe5to/1FJIrlVLHEzxYwvvjiiz5mpX7QmkexgUI8X/7qV7/yuL3F7SK18g//8A8eWzlhPAf+7Gc/8zguJtbfdOmOmD9r1qwB4P777/exmIPVLFN97733gPK9Q+J53ZYQWLMcKG+YU02aCRERERERkVzpIkRERERERHJVqHKs2H2qktiVpU+fPgCce+65PjZy5EiPredy7GJ10kknebzHHnt4bB1g4hb2ca8G66YRp85Wrlzp8dKlSwFN3crWrAsVwM9//nOgvERr++2399g6tp1xxhk+Fku7fvKTn2z1WPHrKrGSroqlerE7lnVcu+mmm3zMurhA6z4i8edjCezUqVM9Vn5KnmJnzbh3jYnlrbfffrvHlfZuEukuO//FvKvUabAa7HHjZ+rYFc7O0bGM1kq847FWg2ZCREREREQkV7oIERERERGRXBWqHGvKlCken3322QD07t274vdaGctRRx3lY3Gzw223bXnpcerLyqagvJOQbXz42c9+tuJjmXfeecfj7373uxXHRdpipVnz58+v+HUrg4kbDFnnNoD/+q//8jhugiTSXdZNBcq7t1g5ayxt2WWXXTy2LoaxrCBuehXPv1YaEMsOVMIqtbLnnnt6vOuuu2719VdffdXjN954I5djEmnr/Gfn0K6WaMXSK9sQOW52HB/LSg5j6WF83mrSTIiIiIiIiOSq4WdC4tXZjBkzPD7zzDOB8gWRH/nIR7b6+XiHzhb2QutdkFmzZvlYvMMX79B98YtfBGD48OE+Fu9A213n2267zcemTZvmsRZcSjXY3eiYe3FWxGbsRKot3hG7+uqrPf71r38NwBFHHOFj8Ty81157AeXnTuuHD5XPjZr9kDx89atf9bhS05s777zT42ru0SDSFTYTEfcGixUPFscZC6v4gfIZ6mHDhgHlDZjiz9n5vlevXhW/Xk2aCRERERERkVzpIkRERERERHLV8OVYUZwStdKp2Md41KhRHh955JEAfOITn/CxuCDSeh4//fTTPrZ69WqPBw0a5PHxxx8PlPfHjyUDy5YtA+D666/3MS0MllqJiyhjaUvci0GkVjZs2ODxAw88AMDjjz/uY5/61Kc8vvTSS4HycpeYs3buBJ0zJR+2H8L5559f8ev2OePWW2/N7ZhEOqp///4ex+YKb731FlDeRCHuQxb3Z7IF6bG0q5L29uarhnZnQlJKg1JKD6WUFqSUXkgpXVQa3y2l9EBKaUnp3z41P1qRDlDOStEoZ6VolLNSRMrbxtKRcqwPgEuyLBsKHAF8M6U0FLgceDDLsgOBB0v/F2kEylkpGuWsFI1yVopIedtA2i3HyrJsNbC6FG9KKS0EBgKnAqNL3zYBeBi4rCZHWfm4AFi1apWPTZo0aas4dhKKZSzWKSt+PZYRnHTSSR5baVbs5PL22297fOGFFwKwdu3arrwUqbJGzdnusqnROMXaXqeh7upqT3LpnCLlbMyDSjk3cOBAjw844ACgvNQqdg5cv359LQ5RclCknI2sI1C/fv0qfv3NN98E1G2wWRU1b03seHXyySd7bJ1g77vvPh9bsWKFx3E5gXXHsv30oPy8bqXdcYlCrbq8dmpNSEppMDAMeBLoV3ozAdYAFX+jU0pjgbFdP0SRrlPOStEoZ6VolLNSRMrb+uvwRUhKaWdgCjAuy7KNW9whzVJKFW+TZlk2Hhhfeoya3kqtdKc27vQbY1tcefDBB/vYiBEjPD7kkEM8toVq8c7IhAkTPH700UcB7QfSaIqQs51hCyo3b97sYxs3bqzX4UgNFC1n7a6c7ZwOrfsqAfTt2xeAl19+2cf+53/+x2OdM4uvaDlrzWziotuYh1OmTAHK98aR5lO0vLXPt3Em5NOf/rTHtkjd9mYCmDdvnsdxhtp+B2IlUGSfK2bOnLnV81dbh1r0ppR60fJmTcyy7O7S8NqUUv/S1/sDmruUhqGclaJRzkrRKGeliJS3jaMj3bEScCuwMMuyG8KXpgFjSvEYYGr1D0+k85SzUjTKWSka5awUkfK2sXSkHOto4KvAcyml+aWxK4AfApNTSucAy4Eza3OItWF7hsQF5tG6des8fvXVVwG4+eabfezhhx/2OC4UlobQlDlr5QExZ22RJZRP01ZrzwUtRs9NYXK2V69eHlszj29/+9s+NnLkSI8tZydPnuxjWvDbNAqTs9Ho0aOB8nNb3IPszjvvzPuQJF+FzFvL17jYfOnSpR7vt99+QGt+Q/mygp133tnjXXbZBWgt8QZ49913Pb7xxhsBWLly5VbPX20d6Y41A0htfPm46h6OSPcpZ6VolLNSNMpZKSLlbWPp0JoQERERERGRaulUi95mYt0wFi9e7GPXXHONx7atPbT2So77gKhzhuTNpkPjFKrtd7NlbCUvKqeSaoslgNdeey0Ao0aN8rFYFjh79mwAJk6c6GOx9EUkb7Y3gpVkQ/l+S8uXL8/9mEQ6KpZj/9M//ZPH48aNA+D000/3MetOCLDjjjt6bJ3hYglWPEdb99c8PudqJkRERERERHLVY2dCTOwPHhejx1ikEdhdibhYzHak3jJ+4403AM2ESHXEHvr777+/x7bwMe68GxeeX3nllVuNidTT2LEt+8ydf/75PhbPkzpnSlHE6pzrr78eKN9H7Nxzz/U4zmrYjujPPfecj91+++0er1+/vurH2hbNhIiIiIiISK50ESIiIiIiIrn0YaouAAAePklEQVTq8eVYIkVh06lz5871sSFDhngcy7HmzJkDlC++FKm2jRs3ArDddtv52G233ebxrFmzgPKyV5FGoJyUZrJhwwYAfvKTn/hYLIONC9NnzJgBwIsvvuhjcZF6niWJmgkREREREZFc6SJERERERERylfKcdkkpqe1Exz2VZdmh9T6Inq4RczbuE7Lrrrt6bNOxULcOL8rZBpBnzlpXrD/+8Y8+VrAyF+VsA2jE82wDU842COVtx2VZVnGXes2EiIiIiIhIrrQwXaRg4p3mPPt5i2zJ+s2LiIh0lmZCREREREQkV7oIERERERGRXOVdjrUO2Fz6t9nsQXVf1z5VfCzpOuVsxylnG8M6YDnVf38bgXK2OSlnO0452zia9fNBbjmba3csgJTS3Gbs7NCsr0ua971t1tclLZrx/W3G1yStmvH9bcbXJK2a8f3N8zWpHEtERERERHKlixAREREREclVPS5CxtfhOfPQrK9Lmve9bdbXJS2a8f1txtckrZrx/W3G1yStmvH9ze015b4mREREREREejaVY4mIiIiISK50ESIiIiIiIrnK9SIkpXRSSunFlNJLKaXL83zuakkpDUopPZRSWpBSeiGldFFpfLeU0gMppSWlf/vU+1il+5SzUjTNkLOgvO1JlLNSNMrZKj1/XmtCUkrbAIuBE4CVwBzgrCzLFuRyAFWSUuoP9M+ybF5KqTfwFHAa8PfA+izLflhKyD5Zll1Wx0OVblLOStE0S86C8ranUM5K0ShnqyfPmZDhwEtZli3NsuwPwCTg1ByfvyqyLFudZdm8UrwJWAgMpOW1TCh92wRa3kQpNuWsFE1T5Cwob3sQ5awUjXK2SvK8CBkIrAj/X1kaK6yU0mBgGPAk0C/LstWlL60B+tXpsKR6lLNSNE2Xs6C8bXLKWSka5WyVaGF6F6WUdgamAOOyLNsYv5a11Lip97E0FOWsFJHyVopGOStFU6+c7dZFSCcX5rwGDAr/36s0VjgppV60vFkTsyy7uzS8tlRbZzV2r9fr+KRtylnlbBF1Im+bJmdBeVtkylnlbNEoZ/PP2S5fhJQW5twIfBYYCpyVUhr6IT8yBzgwpbRvSmk74G+BaV19/npJKSXgVmBhlmU3hC9NA8aU4jHA1LyPTT6cclY5W0SdzNumyFlQ3haZclY5WzTK2frkbJe7Y6WUjgS+l2XZiaX/fwcgy7IffMjPaAqy49ZlWda33gfRTJSzNaecrYHO5q1ytlOUszWgnK0p5WwNdCFnTwbuy+8Iiy3LslRpvDvlWB1amJNSGptSmptSmtuN5+qJltf7AJqQcra2lLO10W7eKme7TDlbG8rZ2lHO1kanPh8A38/rwJrZtrV+gizLxgPjQXc7pBiUs1I0ylkpGuWsFJHytrq6MxPSVAtzpEdQzkoRKW+laJSzUjTK2TrozkVI0yzMkR5DOStFpLyVolHOStEoZ+ugy+VYWZZ9kFK6AJgObAPclmXZC1U7sgbS0jwAdtppJx/bc889PV66dCkAf/7zn/M9MOmUnpSz0jyUt1I0ylkpGuVsfXS5O1aXnqyg9XN1ugh5KsuyQ6v5gNJ5Rc3ZOlHONgDlbKcoZxuAcrZTlLMNQnnbcbXojiUiIiIiItJpNe+OVVQ777yzx9/61rcAOPfcc33s8ccf9/j8888H4N13383p6EREREREikszISIiIiIikitdhIiIiIiISK5UjhXssMMOHl933XUen3POOQD8/ve/97H77rvP4/feey+HoxMRaXzWyANgm222KfsXIDZDiXGlxh7tfV1kS5Z/f/EXle+xWh7l2ZRHpCviubQ9Rc1nzYSIiIiIiEiuevxMSLxDd/rpp3t89tlne2yL1FetWuVjjzzyiMdFvQKVYrG7Ittvv72P7bPPPh5/9KMf9Xi77bYD4Pnnn/cxayUN8Mc//rFmxyk9T5xFjufRE044AShvb/5Xf/VXFeNdd90VaM1dgPfff9/jadNa9g27/vrrfeztt9/u9rFL8cVZD2uf/8lPftLH9t57b4+XLFkCwOLFi33snXfe8fgPf/gDAH/605987IMPPvBYM3JSC9tu2/JxfMiQIT727W9/2+PDDjsMKP/7v+OOO3psnw/i59GYt6+//rrH8+bNA+COO+7wsVmzZnkcq35qTTMhIiIiIiKSK12EiIiIiIhIrnpsOZZNXR1++OE+NnbsWI9tShZgwYIFAPzbv/2bj73xxhu1PkQRdtttN4+/9KUvAXDMMcf42IABAyr+3FtvvQWUl6tMnjzZ44ceegjQ3jbSPb169QJgzJgxPva9733P47jfkonlLLGMxh7LyhKgvLSgf//+ADz77LM+9qtf/crjWD4jPcvuu+/u8bXXXgvA0Ucf7WOx/HTZsmUAvPzyyz4Wy1Y+/vGPA635CLB69WqPn3jiCY/nzJkDwJo1ayo+l5V7xxLD+Fh5lr1IY7PPpEcddZSPfeYzn/HYcjyeM2PemraWB/Tt29fjgw46CIAvfvGLPjZ9+nSP7Xwey2FrRTMhIiIiIiKSK12EiIiIiIhIrnpsOZZNuf7rv/6rj/Xr18/jRx991GObporTVeqQIbUSu7r87Gc/89hKW37zm9/42E033eRxnOa3qdtRo0b52BFHHOHx2rVrAXj66ad9TOUs0hGxd/1+++0HwMUXX+xjsTTGSgPiXkqxlDWWsViZQeyYNXjwYI+tTCt+XefhniuWpRxyyCEejx49GmjtkgWwadMmj60cK5anWKkfwL777guU59mnPvUpj0eMGFHxcU0sJ7RjiKVdGzZs8HjkyJEAvPrqqz6mnO6Z7O9vLPezsmmA/fffHyjPn/nz53u8fPlyoLyLq3UcBDjppJM8PuWUUwDYZZddfCyWftl5d9GiRV14JZ2jmRAREREREclVj5oJiXc7br31VgAOOOAAH4u7oN9yyy0eL1y4ECi/AtXeIFJtw4cPB+CXv/ylj8W7fdYzfMqUKT4WGyhEr7zyCgArVqzwsbgIzWZFrOkCaJG6dEzcE8RmQAYNGuRjcabE9l9oazH5jBkztvpe64cPcOmll3psd/VeeuklH9N5uOeKe3ydeOKJHlszjziza+dDaP07b3eOoXxWxGZAYlOQuB9DrJiwO8nxPG0zKdD6u/KXf/mXPhb3zKnU9Ob73/++x5UWHktzshmwOPswbtw4j21Wbf369T4WF47bbHPMmbinSNwnxCokYl6/+eabHscZ6lrTTIiIiIiIiORKFyEiIiIiIpKrpi/H6tOnj8dTp0712Bb//u53v/OxOCUaF4pZmYCmRqUa4tT9oYce6vGkSZOA8p7yZ511lsePPPII0LESFJvajVOscTrWFkTGMoRYniASxZy1RY0AX/jCF4DyhbexrM8aH/z4xz/2McvjLb/XniMulty8ebPHtv+C9mgSKD9PxrLqSue+uEfSPffcA5TvoRQXg1sZV8z5uNg8llZZHPf7iPl73HHHAfCd73zHx2Lpov38eeed52M333yzx3GRsfQMsYxw3bp1Hlu+xnK+2DzB8jX+fMy1888/32MrbY2/A9/4xjc8tn3G8tDuTEhK6baU0usppefD2G4ppQdSSktK//b5sMcQyZNyVopIeStFo5yVolHONpaOlGPdDpy0xdjlwINZlh0IPFj6v0ijuB3lrBTP7ShvpVhuRzkrxXI7ytmG0W45VpZlj6aUBm8xfCowuhRPAB4GLqvicXWbdQWI+yjErivWreXCCy/0sZUrV3psU/+gvt1F06g5ayUrX/nKV3zsu9/9rsc2nfrlL3/Zx2LpSlc6AcWpWdtnBGDYsGEAHHvssT42YcIEj5Xz+WvUvIXy/Tr+/d//3WPrIBRLVWMZzOzZs8v+hdbyVijPT8v/WNoSy7ysdCX2vpf6qmfOxlLroUOHbvX1WMoUO19aOV9n9kWKXQjjnjcmnptjueDSpUuB8r2dYln4wQcfDJR3KYr5rXKs6mvk8+yW4t9hO8cOGDDAx2LHSzuvxo5YsXTW9saD1nPslVde6WP/93//V63D7pSurgnpl2WZ7Yy2BujX1jemlMYCY7v4PCLVopyVIupQ3ipnpYEoZ6Vo9PmgTrq9MD3Lsiyl1OYt2izLxgPjAT7s+6oh9qe33R/jLpGxv/znP/95oHX3VCi/6lT/+eaVZ87GBY1XXHEFAP/4j//oY3Hh2SWXXAJ0f/YjiosrbTE6tN4Vif3177jjDo81E9J4Pixva3WetYWP8e7tXnvtFZ8XKN9D6eGHH/b4rrvuAsqbIrTV4MMeKy6WjP3qbcfggw46yMdmzpzpcWfubEs+apmzcYFubGBgM76x6UH8enfPbZ05J9tzxRmNeMfZZkLi34k4qxP3cZJ8NNJn2shyKeZKnN2w/Wnifni9e/f2OC42tz3H7PwM9fvM29UWvWtTSv0BSv++3s73i9SbclaKSHkrRaOclaJRztZJVy9CpgFjSvEYYOqHfK9II1DOShEpb6VolLNSNMrZOmm3HCuldCctC3b2SCmtBK4GfghMTimdAywHzqzlQXZUXNz1z//8zwBs2rTJx+Ii9BUrVgBtl2DF0i6VZhVLvXM25s7o0aM9tj7dcWFu7M39+OOPA9UthYrTsccff7zHVsoQF/4qz+ur3nlrYpnLtGnTgPISkZgntvD22muv9bF7773XYysB6EhOVyrHsvM0tDYWib9Td95551bPJfmpZ86uX7/e45hfds6L57ZYwmLn5zzPd/G54u+XicevPK6tRjnPdpbl0OLFi33MPjMAjBo1CihvbBBzyT4TA/zyl78EGqPsuiPdsc5q40vHVflYRKpCOStFpLyVolHOStEoZxtLV8uxREREREREuqTb3bEayemnn+7xgQceCJRPV8VOEzYlu9122/lYnL7dZpttPLbOGm11dRGJbO8EKN8HxHLq6quv9rGYn93t7hPLwCyvYwniRz7yEY9tajeWzqi7UM8Ve8vHafsjjzwSKO+yFkukrE/9woULfSzuqdCVkpe4R1Pcc8H2Cfn0pz/tYyNGjPDY9oJQWWHP8P7773sc/zZbrsaylF122cXj1atbOrHGPKt1zuywww4eDx8+3GM758bSsueffx6RtsR9ah577DGPLYfj54B4rp47d67HjVCGZTQTIiIiIiIiuSr8TEhcjP61r33NY1v8u2jRIh+L/ZNtt9UDDjjAxwYNGuRx3759PX7mmWeA8v7eccGP7iALtC5+jHdq454KtifIrFmzfKzSHbh4JyOq9L1xxs72dIDWPRXGjBlT8XttV2tboNbW40tzs1wbMmSIj8XzqM0Ox13ML730Uo/trm01Z4njHepXXnllq2ONzRbOOOMMj3/7299W/VikccW7uXEmz8QZ6bifgs2uxQYIMb/tcatxPrTfH9u3DGDw4MEe26zha6+9ttXzi1QS8zJ+Dl27di1QPgMYP9NecMEFHtueZXFWpV40EyIiIiIiIrnSRYiIiIiIiOSqsOVYNjV/+OGH+9iAAQM8XrNmDdA6RQWtJVjQWq5yzDHH+JgtfNzye628Jvant6n/GMcpXel5KpVRxalTWzB58skn+5jtwxC/Nz5OpZKBWFYVGyvEn7NywpjHccHwuHHjgPI9S6TnsZz52Mc+5mMxZ6w0KjYwuOeeezyuRelT/J2JC9Ot7DXm/yc+8QmPrfRF5Vg9T8xZK4uN+3EMGzbM4w0bNgCwcuVKH7PF6tBaDhUXk8eS63fffddj+/2IORt/7ogjjgDgBz/4gY/Fslk7v1tTBWgt2xVpz8aNGz22z6GxDHGPPfbw2PYpg9ZGDV//+td9rF7LCjQTIiIiIiIiudJFiIiIiIiI5Kqw5VjWDSN2/7FpWIDf/OY3QHkf5UrTTbErRZz6j49l3SxOPPFEH4u99G0q+I477vAxlQT0PDY1P3v2bB+bP3++x1Y6eMkll/jYl770JY9tH5v//d//9bEnn3zSYyvHinkcy6liOZZNw8bSlTjNH59Dei4rI4kdVWJZ6aZNmwC46qqrKn69lscE5XvbWHlNzPPY3UXd3XqWWPYUYxP/Bu++++4e9+vXDyjPrXietI6GsdQ1lmC9/PLLHts5O+5ZYo8P8K1vfQuAgQMH+ljMU+veOWXKFB9Tdyz5MHHPpujnP/85AE888YSPXXPNNR7HDnF/93d/B8Att9ziY/Hn8qSZEBERERERyZUuQkREREREJFeFLcfae++9ATj66KN9zLpeAMycOROAZcuW+VgsY1m1ahVQXloQ4zg9axvCfPnLX/axv/7rv/b42GOPBco3flM5Vs8Vu66MHTvWY8uTmLMrVqzw+PHHHwdg4cKFPrZ582aP2ys3se5AAMOHDwfKSwqeffbZio8rYt0Eofw8+uqrrwKt58s8xO5BVs4CrfkdS7AmTpzosc65PYt1+IHyv9d2noz5EM99dn6N58tDDjnEYyvHmjdvno/Z7wGUdxm0Ety2ygKthDBuwBlLaG+44QYAlixZUvE1ihgrQ41dXGNpqpUJvvjiiz4WN0aeMWOGx/vuuy8AP/7xj33ssMMO8zjPkkDNhIiIiIiISK4KOxNy8MEHA+ULyOMdPIvjHYjIrvTiXYt4NyX2HT/99NOB8l76caHaXXfdBZTfDZGeK95FiDlpiw/vvvvuij/X3YW1vXv39vhzn/scUD7799Of/tTjevUEl8ZiORdn7+IiWzt/xvNsrey4444ATJgwwcfiHWq76xfvSk+ePNljzYT0LPH9jnGlmZDYlMP2Dot3keN5evr06UD57GCc/Yjs9yL+fsTvtX0c4uPff//9HlvFRlufU0SMfSbt37+/jy1fvtxjy7v4OSLm8OWXX+7xL37xCwA++tGP+phVF0F5BVGtaSZERERERERypYsQERERERHJVaHKseL0qW1NH8d23nlnjwcMGACUT8PGchWbhrIFOlC+2NzKvaC1n3jcsyH2X7Ze4ervLR1Rq/0MjjvuOI/79u0LwOrVq33sd7/7Xc2PQYop7pcUS/UOOOAAAPbbbz8fe/PNNz3uSh5VOo8D3HbbbQCcfPLJPhZ74tu53MpjAdavX9/p55fmEPPQSqygdc8bWxQO5aXUc+fOBcr3BYvnSSuNaiu3Y/5aHJsp7Lnnnh7bYzz99NM+Zvs5QPnnE5Et2R51AKeeeipQee8wqJyvcSz+nDVbis0dTjnlFI9vvPHGNh+z2tqdCUkpDUopPZRSWpBSeiGldFFpfLeU0gMppSWlf/u091gieVDOStEoZ6VolLNSRMrbxtKRcqwPgEuyLBsKHAF8M6U0FLgceDDLsgOBB0v/F2kEylkpGuWsFI1yVopIedtA2i3HyrJsNbC6FG9KKS0EBgKnAqNL3zYBeBi4rCZH2XosHi9evBgo3wchTrleccUVQHl/8EqlV3HKNnbHsp7LABdccAFQXs4Sp8GksTRSztZazNmvfOUrHlsZy2OPPeZjmzZtyu/ApFPqnbMxN+I5deDAgUB5Z5VvfOMbHltJS1vT9pafsVzl7LPP9vjSSy/12EoIYwlWLJmxMi0790t91TtnY4fKSZMmeXzllVcC5aUmf/M3f+OxdYL79a9/XfGxLP9ieXXMSeviBvDJT34SaC2VARg0aJDH9hgvvPCCj8WORirhzl+987Y922+/vceXXdb69FYSa+WE0Ln8iV0PLY6/I3HfnDx1ak1ISmkwMAx4EuhXejMB1gD92viZscDYSl8TqTXlrBSNclaKRjkrRaS8rb8OX4SklHYGpgDjsizbGBdnZVmWpZQq3grLsmw8ML70GFVb5fLMM88A8Mgjj/jYiBEjPLaF53GRWFzEaDuU2uNA634fAM8995zHmvUopkbL2VqIu6cOHTrUY1tcGXeU1mL0xlevnI17HD366KMeW+/4E044wcduueUWj+2cGRe2x513bbFjbPoR777F12d35+ICygsvvNBj2+laGku9cjaez26++WaPTzvtNKB1lgLKGyucd955AAwZMsTHYs7FReom5mxsWjN69GigvClO3Cdkzpw5AEybNs3HbFHwlq9B8tWonw9irsXPtDY+fPhwH4sNDyrtUxcrJY466qitHivOjsyfP9/jPPOyQy16U0q9aHmzJmZZZjutrU0p9S99vT/wem0OUaTzlLNSNMpZKRrlrBSR8rZxdKQ7VgJuBRZmWXZD+NI0YEwpHgNMrf7hiXSeclaKRjkrRaOclSJS3jaW1N60S0ppBPAY8Bxgq2CuoKWGbjKwN7AcODPLsg9t2l6LqatYjvLxj3/cY1tQGRc2Llq0yGNbiPnBBx/4WINNjT6VZdmh9T6IImr0nK2mWBoQSxOtHCuWxixbtqzWh6Oc7aJGylnbGwRg+vTpQGtZFpQv0rVzZjx3xhIAK3GIX7fcBHjllVc8vvjiiwF46KGHfKy9PvhVoJztokbK2VhKs//++wPwox/9yMcOOuggj20BbmzGEEtZKuVZXLj+zjvvbPW8cb+PefPmeXz//fcD5c0UYk53cWG6crYbGilvK+nfv7/HsTTW9r5bsWKFj/30pz/1+KmnnrJj8rG490dsXLP77rsD5eVco0aN8jiWaVVLlmWp0nhHumPNACr+MHBcG+MidaOclaJRzkrRKGeliJS3jaVDa0JERERERESqpd1yrKo+WYOXtjQYTbk2gEbM2Tjd+vWvf93j//iP//B48+bNABxyyCE+Zv3xa0g52wC6m7Ox3Opzn/scANddd52Pxa5Csae9iSUmGzZsAFq7BAH853/+p8exhNBKYnIui1XONoBal2rHstXPf/7zQGvJNpTnvJUT/ulPf/Kx2DFrzZo1HlteL1iwwMfefvttj+13IZYgViG/lbMNohZ5G/fruOqqqzy+6KKLgPJ9arY4FqA8l6N4Xl61ahUAZ5xxho/FDnG10FY5lmZCREREREQkV53arFBEGku8axL7069duxbQLunSefGO2b333gvAgw8+6GMjR4702HaKjneKbTEutO4UHRfzNlgDEGlScbH57NmzPbZZi759+/pY7969Pd5hhx2A8jx98803PX799dbOrbZgPc6axJ+r1JhB5MPEWbNrr73W41mzZgEwbtw4Hzv00NYJsZ122gkob7YUF5jbuRzgyiuvBGD58uXVOuwu00yIiIiIiIjkShchIiIiIiKSKy1Mb1xafNYAGj1n4yI06yMO8NZbbwHlZTA5UM42gEbP2QajnG0AeeasnTNjg4/IyhEbuIRKOdsgdK7tOC1MFxERERGRhqCLEBERERERyZW6Y4kUWOxklMM+ICIihRbPmSJSX5oJERERERGRXOkiREREREREcqWLEBERERERyZUuQkREREREJFd5L0xfB2wu/dts9qC6r2ufKj6WdJ1ytuOUs41hHbCc6r+/jUA525yUsx2nnG0czfr5ILeczXWzQoCU0txm3GinWV+XNO9726yvS1o04/vbjK9JWjXj+9uMr0laNeP7m+drUjmWiIiIiIjkShchIiIiIiKSq3pchIyvw3PmoVlflzTve9usr0taNOP724yvSVo14/vbjK9JWjXj+5vba8p9TYiIiIiIiPRsKscSEREREZFc6SJERERERERyletFSErppJTSiymll1JKl+f53NWSUhqUUnoopbQgpfRCSumi0vhuKaUHUkpLSv/2qfexSvcpZ6VomiFnQXnbkyhnpWiUs1V6/rzWhKSUtgEWAycAK4E5wFlZli3I5QCqJKXUH+ifZdm8lFJv4CngNODvgfVZlv2wlJB9siy7rI6HKt2knJWiaZacBeVtT6GclaJRzlZPnjMhw4GXsixbmmXZH4BJwKk5Pn9VZFm2OsuyeaV4E7AQGEjLa5lQ+rYJtLyJUmzKWSmapshZUN72IMpZKRrlbJXkeREyEFgR/r+yNFZYKaXBwDDgSaBflmWrS19aA/Sr02FJ9ShnpWiaLmdBedvklLNSNMrZKtHC9C5KKe0MTAHGZVm2MX4ta6lxU+9jaSjKWSki5a0UjXJWiqZeOZvnRchrwKDw/71KY4WTUupFy5s1Mcuyu0vDa0u1dVZj93q9jk+qRjkrRdM0OQvK2x5COStFo5ytkjwvQuYAB6aU9k0pbQf8LTAtx+evipRSAm4FFmZZdkP40jRgTCkeA0zN+9ik6pSzUjRNkbOgvO1BlLNSNMrZaj1/njump5ROBv4fsA1wW5Zl/5Lbk1dJSmkE8BjwHPDn0vAVtNTQTQb2BpYDZ2ZZtr4uBylVo5yVommGnAXlbU+inJWiUc5W6fnzvAgRERERERHRwnQREREREcmVLkJERERERCRXuggREREREZFc6SJERERERERypYsQERERERHJlS5CREREREQkV7oIERERERGRXP1/12oknek+Fn8AAAAASUVORK5CYII=\n", "text/plain": [ - "
" + "
" ] }, "metadata": { @@ -217,13 +214,41 @@ } ], "source": [ - "samples = torch.randn(5, model.representation_length)\n", + "samples = torch.randn(5, 5, model.representation_length)\n", "\n", - "fig, axes = plt.subplots(1, 5)\n", + "fig, axes = plt.subplots(5, 5)\n", "\n", - "for index in range(5):\n", - " decoded = model.decoder(samples[index][None])[0].detach()\n", - " show_image(axes[index], decoded)" + "for i in range(5):\n", + " for j in range(5):\n", + " decoded = model.decode(samples[i, j][None].to(config.device))[0].detach()\n", + " show_image(axes[i, j], decoded.cpu())" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 10/10 [00:01<00:00, 7.01it/s]\n" + ] + } + ], + "source": [ + "bs = 1_024\n", + "\n", + "results = []\n", + "\n", + "for i in tqdm.trange(validation_dataset.num_batches(bs)):\n", + " nll = model.nll(validation_dataset.get_batch(i, bs)['x'].to(config.device))\n", + "\n", + " results.append(nll.cpu().numpy())\n", + "\n", + "\n", + "full_results = np.concatenate(results)" ] }, { @@ -234,8 +259,7 @@ { "data": { "text/plain": [ - "tensor([[ 0.8606, 0.9047, 0.1575, -0.7448, -0.3117, 0.0745, -0.3145, 1.4116,\n", - " -1.5365, -0.6043, 2.6963, 0.4136, -1.0794, -0.8664, 0.7766, -0.4429]])" + "90.79018" ] }, "execution_count": 14, @@ -244,17 +268,205 @@ } ], "source": [ - " model.encode(get_sample(0)[None])" + "np.mean(full_results)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 10/10 [00:01<00:00, 6.69it/s]\n" + ] + } + ], + "source": [ + "bs = 1_024\n", + "\n", + "results = []\n", + "\n", + "for i in tqdm.trange(validation_dataset.num_batches(bs)):\n", + " nll = model.nll(validation_dataset.get_batch(i, bs)['x'].to(config.device), num_posterior_samples=5)\n", + "\n", + " results.append(nll.cpu().numpy())\n", + "\n", + "\n", + "full_results = np.concatenate(results)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "88.15819" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.mean(full_results)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 10/10 [00:01<00:00, 5.54it/s]\n" + ] + } + ], + "source": [ + "bs = 1_024\n", + "\n", + "results = []\n", + "\n", + "for i in tqdm.trange(validation_dataset.num_batches(bs)):\n", + " nll = model.nll(validation_dataset.get_batch(i, bs)['x'].to(config.device), num_posterior_samples=50)\n", + "\n", + " results.append(nll.cpu().numpy())\n", + "\n", + "\n", + "full_results = np.concatenate(results)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "87.12366" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.mean(full_results)" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 10/10 [00:04<00:00, 2.24it/s]\n" + ] + } + ], + "source": [ + "bs = 1_024\n", + "\n", + "results = []\n", + "\n", + "for i in tqdm.trange(validation_dataset.num_batches(bs)):\n", + " nll = model.nll(validation_dataset.get_batch(i, bs)['x'].to(config.device), num_posterior_samples=500)\n", + "\n", + " results.append(nll.cpu().numpy())\n", + "\n", + "\n", + "full_results = np.concatenate(results)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "86.84696" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.mean(full_results)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 10/10 [00:12<00:00, 1.28s/it]\n" + ] + } + ], + "source": [ + "bs = 1_024\n", + "\n", + "results = []\n", + "\n", + "for i in tqdm.trange(validation_dataset.num_batches(bs)):\n", + " nll = model.nll(validation_dataset.get_batch(i, bs)['x'].to(config.device), num_posterior_samples=2000)\n", + "\n", + " results.append(nll.cpu().numpy())\n", + "\n", + "\n", + "full_results = np.concatenate(results)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "86.82053" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.mean(full_results)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAABpCAYAAABLV9A4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJztnXmULlV5vZ8KisZoVFAIYUYwSsAIMioqKiCjoCLBiWv8RYKiCCErEsiSRMUgGoyzIUrESBAQFMSBSQaJigwyhEFA5SIEQQRBjcZo6vfH7X1qF7fu7am+/qq797MWi7pvT1X7O3VO1Xn3eU9V1zUhhBBCCCGEEIbD74z7BEIIIYQQQgghtMmLWgghhBBCCCEMjLyohRBCCCGEEMLAyItaCCGEEEIIIQyMvKiFEEIIIYQQwsDIi1oIIYQQQgghDIy8qIUQQgghhBDCwJjVi1pVVbtWVfXdqqpuq6rqiL5OajETTfsnmo6G6No/0bR/omn/RNP+iab9E037J5rOPdVMN7yuqmoV4BZgZ+BO4ArglXVd39jf6S0uomn/RNPREF37J5r2TzTtn2jaP9G0f6Jp/0TT8fCIWfzsNsBtdV1/H6Cqqs8CewMr/MCqqprZW+Hi4D7gJUTTPomm/XNfXddPZpr3fzRdKdG0f6Jp/8xI04nvia4roK7rimjaNxn7+yea9o/61JUyG+vj2sAP7d93TsRaVFV1YFVVV1ZVdeUs/tZiYCnRtG+iaf8snfj/pLpG0ykTTfsnmvbPlDWF6DpNomm/ZOzvn2jaP0sn/5bZZdSmRF3XJwAnQN6s+yKa9k807Z9o2j/RtH+i6WiIrv0TTfsnmvZPNO2X2WTU7gLWtX+vMxELMyea9k80HQ3RtX+iaf9E0/6Jpv0TTfsnmvZPNB0Ds3lRuwLYpKqqDauqWhXYHzi7n9NatETT/ommoyG69k807Z9o2j/RtH+iaf9E0/6JpmNgxtbHuq5/U1XVm4FzgVWAE+u6vqG3M1uERNP+iaajIbr2TzTtn2jaP9G0f6Jp/0TT/omm42HG5fln9MfiVV0ZV9V1vdV0fyiarpRo2j/RtH+iaf9E0/6ZkaYQXVfGRNXHaRNNV0ru//6Jpv0zJU1nteF1CCGEEEIIIYT+GXnVx/lGVS2b3FpllVWW+5pnH3/729/O2TnNd37nd5bNB3Rp+n//93/lOJpOnS5N1T5dUz8Oy6P73Y+lLXRrOpcuhPlIl6Yek5bRceq4fl1Ey9kjjaNlCGFIJKMWQgghhBBCCANj0WbUfIbyd3/3d8vxXnvtBcCSJUtKTDPsp556aomdccYZAPzsZz8rscU+E+eaPuYxjynHu+yyCwCvfe1rS0yz6ieddFKJfe1rXwPgv//7v0ssmjaa/t7v/V45fuELXwjAy1/+8hL7+c9/DsCnP/3pErvmmmsA+J//+Z+Rnud8wjNmrumzn/1sAHbaaacSu+uuZZWHv/CFL5TYnXfeCcBvfvObkZ7nfGJFmm6xxRat/wPccsstAFx++eUl9tOf/hRIBthZkaZPfepTAfjDP/zDErv11lsBWLq02T/1V7/6FZA+9OG4rj72r7XWWgA86lGPKrH/+q//AuChhx4qsTg/lsfHKdfv93//94F2G3zwwQcB+N///d8SSxtdHtf0kY98ZDmWvj7+aHxP/zl13I2kY9dvSON7MmohhBBCCCGEMDDyohZCCCGEEEIIA2PRWR9le3j84x9fYq973evK8etf/3qgbStRWtktDzfddBMAV199dYn9+te/7v+E5wFKG8vmAPCqV72qHEvT9dZbr8Rky/H08h133AHAjTfeWGJuj1hMdGn6ile8ohz/2Z/9GQAbbrhhicky6jaJ973vfQB8//vfL7EhpfTnki5N99lnn3KsfmCjjTYqMVmeZIsC+PCHPww0tihYvHYoaer9qezj0NidXdMf/ehHQGMfh8YCff/995fYYrXxPOIRy4Zlb6e77757OVbfuv7665eYrI+nn356iZ199rJ9aGWJhsVtMVO/+IQnPKHEdt1113L8spe9DIAnPelJJXbFFVcAcOaZZy4Xi50cVl11VQBWX331EnvRi15UjnfbbTegbYc877zzgGapA8Dtt98OLN6xyW2O0nS11VYrsec+97nlWMse9AwFcM455wBw7bXXlthPfvITYPH2o13W0cc97nEl5nb85z3veQA88MADJXb++ecDTdsE+MUvfgHMfT+ajFoIIYQQQgghDIxFkVHzN+snP/nJAPz5n/95iR100EHlWDND/sasGYk//uM/LjHNFHn2ZzFl1FzTNdZYA2hnJl1TzVD6zI5+frvttisxzRp3ZX8Ww0ywL3JXBscLsBx44IHl2Gd9hWaNdt555xJT5vdf/uVfSmxcs0LjwBcMr7POOkB3thdgzTXXBLqL4uy7774lpmIYp5xySol5AZyFjjI+ABtssAHQzvYecMAB5fgP/uAPlvsZZYq8D9Y9/+Uvf7nEFlPGwrPgT3nKU4B2oaD999+/HMvt4ZqqP/Asm4refOMb3yixxeJQ0D3smZxnPvOZQJM5A9hjjz3KscYx/yzUvp/1rGeV2CGHHALADTfcUGKLIROk8cmzEsr0vPSlLy0xFWWC5nnL+9TNNtsMaGfe3va2twHwwx/+sMQWg0tB97BnzzR+77nnniXm2Z+usX+HHXYA4JJLLimxY489Fmgya7A4smvKSOp+Bnjxi18MtJ+NnvGMZ5Rjfa/ro37Cs+l6jtIzFMzNc1QyaiGEEEIIIYQwMPKiFkIIIYQQQggDY0FbH5Wql6UJ4OijjwbaViZP5cvC5Pum/PKXvwTae67IpucLYi+77DJgYafsZSXzIhZvf/vbgXaq/rGPfWw5lqaeLpatya0p+kzcqrMYNJXVxlPxRxxxBNBO1fteStJSbRMa+42301e/+tVAe88qHS9kTR/96EcDsP3225fYX/3VXwGNTQTamqroglvuZIXwwg6y+F511VUldt1117W+fyGie1o2EoCDDz4YgK222qrEfA9FaertVBq5hUdWabeTqUDGQrPoug1MhS3222+/ElP7kkUMmvYM3e1Uv1P2Xmgsej/4wQ9KTNayhaYptK3j6667LgBvfetbS0xjthe1cvuodPUlDNL96U9/eokdeuihABx55JElpgI5C01Xt4Fuu+22ABx11FElJjvpE5/4xM6f19jvY42+1wtkSNNjjjmmxO677z5g4Wnq97Is43/5l39ZYnq28u9z1Je6prrv3YKuPX4/8IEPlJj2q1womqrf8+fNww47DIDXvOY1Jbb22msD7fbsGqgwi4/fm2yyCQBvfvObS0zaf+YznymxuVhKkoxaCCGEEEIIIQyMBZdR8+IBeiP+6Ec/WmJbb7010J6t8EIAWtT+3e9+t8S0AHvjjTcuMS0y/tM//dMSU9nehVZYwGchtKj6Ix/5SIltuummQLOIE9oa3HbbbQAsXbq0xDT74IvfNQu69957l9i3v/1toD0jvxDw9veCF7wAgH/6p38qMbUv19TLbKughS++1ufkM8aaSdpll11KTFtKLJSMmmbVPDumhcDvfOc7S6yrCIO3UxVe8bL7yg55pkKfjWfmVFRooRQUkqZdmURle6FZhO3ZDG+nKhd99913l5hK+Xs7fepTnwq0izaoL14ohRqkqRcO0Gytsl/QZBw88+aa6v71dqrPwTVV8SvP1N91113Awrn3oWl76uugue+9IIvuZZ81f/DBB8uxxpp77rmnxFTUSWMTwJZbbgk04x7AvffeCywcXfUc5QXUtNWLrh+aMcf7PWXCoHkmkj7Q9KU+9qsAyR/90R+VmIpgLJTsj8ZyL6Lyrne9C2iPL2rP/szj+klTLxKi+15jEzRunK985SvL/ex81tT7RbmHPHum7KSPXfoZ70eVBQe48sorgXZ/oMym3/t6rvjmN79ZYtdffz2QjFoIIYQQQgghLCryohZCCCGEEEIIA2NS62NVVScCewL31nW92URsNeBUYAPgdmC/uq4fWNHvmAtkZ3J7ohb8ufVDaWVPG1944YXlWMVBrrnmmhKTpUqFGaBZXCzbGjTWKln9pskqVVWdz4A0VareLR7aR+JpT3taiUlTtzx4kZXzzjsPaKxl0Nh7lixZUmKyP73whS8sMe3Dcscdd8zoGobWVlU8xYsvfPCDHwSa/ZOgSdW7Deeiiy4qx2effTbQtpPKpuP72el3ut3i4x//ONBO/U+HoWkqG6nv3/OOd7wDaFvBhNvwXNPPf/7zQFsXWUlcU1l2vNDLv//7vwNw//33z+gahqapbGLev6kYi/ZGg8bu4e3Q+9PTTz8daOui+9z3UVNRAm+nX/rSl4B2YafpMDRNteBdRS0A3vjGNwJtO6Ssnr6fpPpQaPb1cV3UR/teixqj/L5QvzxTe/7QNIWmGJgvQ9hrr72AtsVceqnwD8C5555bji+44AKgbTmTrvqcoHnG8L2tZlP0aiiauqVMhX7e9KY3lZjbIIWs996PfvWrXy3H3/nOd4C2LUyaut1XNjN/rlDRqxkWaBrE89RkBW5U6M4LA33ve98D2ntKur5a9uC/e/PNNweaQhrQPA+4xVQFsBaKphpL3vCGN5SYxq4uu7jGeIBvfetb5Vh7Tnp/IYuvF3qRpnrWh6YA1igLiU0lo/YpYNeHxY4ALqzrehPgwol/h9mxFtF0FKSt9k807Z9o2j/RtH+iaf9E0/7J81T/RNMxMWlGra7rS6uq2uBh4b2BHSeOTwIuBt7W43lNCZ8B0kzMJz7xiRLTLK2jDIWyCgCnnXZaOdbMr884KqOmhZgAO+20E9De/XzXXZe9z3rxkmm8ZT+BZVrCGDX12QppesIJJ5SYZ9eEFrX7dZ911lnlWJk2n6nUDLJnLjXz69spPOc5zwGaGQ+Y9szF2Nuqa6pZbi8coqyXzzpqpvJDH/pQifmiYC0uVqEbaLLEKmwBTebOF8RqJthnk+ebpl40SDO97373u0tMmTSf4VbW5/3vf3+JeeZXmroWmoX3LPmf/MmfAO3svWb2VJDg4b9nCgxKU83QausNaGYRvXDAzTffDMCxxx5bYr4VhPpT76v1814IR64H71/091RmGqa9YHtQmuoaDz/88BJT5sJnf9WG3vOe95SYFqz79/rvVjv3bLG0VFEtaLYD8L54vmkK7YJAaqvuzlCRAXdinHHGGUDjCoF29lx9qRfPEu5s0L3umWX9Pc+MTINBaNpViG2bbbYpMbUx3z5Hpd8vvvjiEvNnJ7UtL4qlz06l4v1ve0EofZ+PcdNgcM9Tcgd51ubHP/4x0B7b9RzVVdDOcU31O/371I59HPJ+eAYMQlNH16ssIzSOhH/9138tMT2PSm9oPxtIF9dUmXrva+SImuvidjOt+rhmXdcaEX4ErLmib6yq6kDgwBV9PRQeEU1HwpTaajSdFtG0f6Jp/0TT/snY3z/RtH/yPNU/0XRMzLo8f13XdVVVK5ySq+v6BOAEgJV9X2iIpqNhZbpG05kRTfsnmvZPNO2fjFP9E037J5r2TzSdW2b6onZPVVVr1XV9d1VVawH3TvoTPaI0pdsOZWfwVL3S7r54WDvfu+2rK1XvdhDZGdz6KCuEFhcC7LjjjgCceOKJnb97En4zBE1lxYGmyIUXvpAuWpwJcNxxxwHtBe/ard1/xjWV/cltUloM7pYyaepWymkuhB9bW5WmKpwCjT3P96KRNcHb1z/+4z8CcP7555dYl6ZuZVAq39u7bGNuJ5XF1Bco/+pXv5rGlY1fU98jRft6+eJ+aeH7ncjy6MUuutqS21V0n7v1VnYLv1e0/5cWz8O0bVCD0vQtb3kL0C4gIK283Rx//PFA2/LZdd1uJ1ObdAulNHc7mfZU0uJ6mLYNalCayprnRYNkDVXRFWisT7feemuJ+V5yXTYdfV0WPGjsZH7vy37lVr5pFsAYxNivggHQFGfx+1Havfe97y0xFWd44IGm/sFktjDFZBmFpq36Zyvrmf+OaVhKx6qp8HtUVnm3HWsM1tgEjT3P72W/bunh9jEV1fFnpy68H54BY32eEt4e1O6++MUvlpjuQ7c+qk9Y0X2p3+ltV+3TNVU/7HuDzZJBaNq1RMT3S9V1+5it2GT3pH9dWnr/KWu5W6bnYk+6md4JZwMyhC8BzlrJ94ap8VOi6ShIW+2faNo/0bR/omn/RNP+iab9k+ep/ommY2Iq5flPYdlC1ydVVXUncDRwLHBaVVX/D1gK7DfKk3w4WnDqC9233nrr5b5PGQovWaxCCz5bMdkbsb73rrvuKjEVF9BMOsBmm23WOj+YVvbnbmDncWmqGUovx6tCHj4rpAzYoYceWmIqCLKiWTXhv0czwL74XTNJPlOswg0+UzwNTZ/EGNuqZln33XffEtPWA66FshF/8zd/U2Iqo9uV7XW6NPVF2ppJ8tlSaaqFsTCtjNpYNdU5exn3XXbZBWhrodK7KtMPcOWVVwLtzGRXwY+uWXFv28I1VfbHY9PIqI1VU91v2267bYm5vkKFV3z2UjPq3n66ZtR9dlwz6r6oXtkfn3nXrL4XOZhGRm2smuo61C4Att9+e6C9EP1zn/sc0C5wo3FmRdcqTV0/leD2rJLuFb/PNfM+w6ICY9UUmrbg5bFVNMVn0D/72c8C7RLnyiysqMiP2qhnz5SlX2eddUpMuncVc5hhRm2smuq6vZ3o2F0yGvs9w60xp+ueh+bz8kyPtjXxmPpNzxzrZ2eo6Vifp4Sfr8Ydz56pL/Cs12RjUpf7YLfddgPa7h09T3lWdJYMTlONsb6VidrNZM+jTle/st9+yy7P+1kVJ/JnrLnIqE2l6uMrV/Cl5UfyMBt+W9d1NO2X++q6/glpq30STfsnmvZPNO2faDoCoulIyPNU/0TTMTErE3AIIYQQQgghhP6ZddXHucJTv1rg/upXv3q5r/u+XPr6D37wgxKbye7hSm36XjdKY/t5KUU6y0Wwc4afu6wzBx10UIkpHeyaagf4m266qcSmuhjdU8T6HDyFLPuU6ydL5iz3/xgLWrjvO9vLLuMFPw477DCgXYhCtqfppNVlG/G9QpT+d0uZrLnzUVPtbXLIIYeUmGyx//mf/1lif//3fw+09/xR+1pRH9C1SFuaegECafr4xz++xHQ8HzWVzem1r31tiamN+L2vPf18n77JFmlLD9dFx26XVHt3i7M+6/moqe437bcJTRuRLRcai572ooRGiy7NoLGJyUIKjWWnywrt7XQ+96fQnLf2MoPG7nXppZeW2GWXXQa0bc7CxxfvF9X2fM9JFX1yW6DGu9VXX73E9PX5qiu0i6NIU+2TCI3ty/vHLnuiW8X0e7zI2/Oe9zygbRNXX6B7HtqfzXzFbdtde51pLPE22bX/mWuq+1lWamiKrvnnoH1V/XfP5/bZha7Nde6y0UtLH6fcuqx26v31dtttB7SXMOi5d4b7Jc6Y+fFGEUIIIYQQQgiLiHkzZeFvx/vvvz/QLtqhGcnDDz+8xG6//XZgZlm0LnxW42lPexrQfpO/7777gHbmbcj4TMuee+4JtBdSqwSpshPQlD2eraaa2fFZ4a7iAcpkzPVO8DPFz/35z38+0F6geu+9yyraausDgBtuuAFoL6SeyQJVaeqzkqutttpy56X2Oc1S52PD2+kznvEMoL2Fg9qpb4uhTFDXNa5oVrEr+yP83tfMu/dJ+uymWep8bPg1rrfeekC78IXKHivjA03hEG+balcrmrVVvCvT41rp6z7L7jOe8wG/RhX1UKELaApQeTEBjVt+rX6vCtdF2Rv/PhUd6iq05D87HzNqfq7q21zXpUuXAk37hCaT5sUVNGZ5W/UsksY+L8et7/XMnDIaXX3CfNJV6DrUt0JzjcrKQDOWONLH+0IvEqIxfcMNNywxae4ZdX02nv3V850X2pgv/av0WHvttUvs6U9/OtAucqFjb3P+HCBcU/XXKl4HTX/jz0ldzg/1M/43+no+HjVqa/7cL9eS959qp66zrtevu+uzee5zn1ti0s0dSirM4s4PxXxc7LvASDJqIYQQQgghhDAw8qIWQgghhBBCCANj3lgf3aKgBX+exjz55JOBZq8k6C9NrpSrUs4Am2++OdBOG1999dXA/LGUue1wr732AtqaaY8fLcyG/q5N1gDt6QWwwQYbLHcOsrB12QGGiKfEX/aylwHtNPhXv/pVAC688MIS08LULsvYZEUaHNmanv3sZ5fYGmusAbT1k9VyvthIfCG/2qnbvlQwxAuHyPbgti8dr+i6palrLhuF75co+5VbK2Rrmy+4ptrbzy0lWjTteybpZ9ymI9vUiqw0sox5v6H9f9x6ov7ArTuyks+X4kxuv9H+W66VLLpuT5Qt2q1hsjG5tc5tYrL6PfTQQyWm8dHbuz4T/3uKddkrh4pf00YbbQS0rc+6b739anx225famz9LONqLyQsH6W/7va7PwvVX+50vbdU11R5x2jcVYJNNNgHaNnpp7zrrXvbv8/tAtjFfDqK+wPVTn+sW0y6Lb1dRiKHg5yldfD9KjcvelvS84Nej9ul9tD9XqACb97n6nb5nmvrPrkJiXgxDf3uImvr9pOI9z3zmM0tMVkU9+0D3Ehpp6Vr486+eCVxn3ef33HPPcjH/e+qnfezqu53Oj14lhBBCCCGEEBYReVELIYQQQgghhIExeOujUp+qsgiNXcTtNKpG6Gnl2eBpU6VSP/zhD5eYUv1eEUaV/IZufezSVFYST6fLzjVZxcUuy1iXjc/TzrIGHXXUUSWmtLMsQgCf/vSngf4+11Gha9Qef9Bco1vtZK9xi4K06qqc17VnDTRauj3ixS9+MdDeX1Bf932azjnnHGD47VTX6xXeZCVxLXRtXqGsay8VxVxTtwDJwuCayr6qvX/8Z9wSccUVVwDzR1O/9/fdd1+gbROTBcnbqfRxS55bnoRbn9R3eNuWfdXvFf2MayqLX1d1yCHZdNTGVDkMmj3pZOeGpn369ciKJLsjNLapFdl0ZI3yPlHt0/cA0897e9fPdO3RNiRNobnPvK2+5jWvAWCrrbYqMfWp3ieoXXrVR7Vvty2papz/PR8DtV+b75mm9u9VEPX3uqynQ6qqp/us6/5/wQteUGKy101WyU7X6F/ztty1F5qqa3qbVv/gSyG+/vWvA02lZGi09Hto3O1W44XsotCMU7vtttty36+KutBUtOwah7xv9aqPatPe3tUneNtVFd9ddtmlxGTr9T1H1dd3VZ32fmIudZambgnXvmZuJ9Uzo2ye0LRdH8fVDv0+9r5S1+afg/T1JU+77rrrcr/7ggsuANr7NWvc8+eB2WiajFoIIYQQQgghDIzBZ9T09ul7fGjGwbMEmlXrekOdbG8Tn+3VjJP/Pe0l5LMamoX4h3/4hxJTVm/cMzyToZkC39les1t33nlniamgQNeMYNeMrOOz6ppB86zE+9//fqA946nZpeOPP77Ebr75ZmD4mup6VZgBmuu+4447SkyZF58N75pp0WfkOrpW66+/PgA777xziR188MFAe7H3/fffD8A///M/l5g0HdJMbxea+VYGBppsg7ItABdddBHQ3m9Hi6W7iom4Pj5jp31pvJ0qS+kzw8qMnHrqqSXW1/6Co8Dbldrky1/+8hJT1sfb6de+9jWgPUuoPs9nEzVj6EUzPAOqjITv+bPlllsC7X5XM72+x5jOZ4j3vp+72qRnsnW93p+qnapAEjRFFjzLozYp7aCdAVG/7eORPkPXSvfD5ZdfXmIq4ODnr+MhFBfyLMFTnvIUAA444IAS06z63XffXWLS9aqrrioxFVDxrI2KEKj9QXe79UyGjl0btVXt0wrNM4kXI9C90TWrPpe4prpG17SrrV588cUAXHfddSUmDVwffR7eFn0vVhUq8XGsq0CLNPWiOXJO6dkOmqyPf9+4NVW7eulLX1piysL4OPXtb38baO/3p77Ux3Y9Q3iGzgvgeFbo4bguOkf/PPT5e5ZSx+6eGMc45poqY+5ZXmWwNdYCXHvttUC7P5CWvn+tMpveTv05QNfr96qeIXzsl/bunpDm3g7lDHGXz2wK4iWjFkIIIYQQQggDIy9qIYQQQgghhDAwBm99VGrY94ZSmtKLTsjS4SnbLkueYp7O1F5T0CwWfNe73lViskf4XiCy7v3bv/1biQ294IWQHWTHHXdc7mtu51BKvCtl69p2Wco8NSybldstlMb21PA73/lOAD7zmc+U2GSFTMaJa6BF6W5FFL5wVwuJu9qK25FkhfJUvX9esgP6faHP1e0W0vSLX/xiieleGaKlzO9LWZ/22GOPEpNGbnOSFcL3i1I/4J+RLBFaZA2w5557lmPtyaL9gvx8ZBcFeO973ws0i4ihKSo0ROujWxVlm9tnn31KTNf4zW9+s8S+853vAI11Fpr24jYm3ccqnAPtxesqGOIFBvR7rr/++hL70Ic+BDQFBGDl/c+4cUudtHzJS15SYmp3spBCY0F0y76+zwu0yErphRW22WabcixLj9sl1fZ9H1EVYtJn6X973Ha8hyMdvJ0sWbIEaN+jKhTg+1BK17vuuqvE1OalJTT3vS9rkDUPGtui38Oy3fm9IXuuf46yDXZZ2seFNPX9Cv/iL/4CaNu7tVei9viEps34+Kx73a3NapduzXVrpOySbk2T9ddtleoLfLzX59ml6bi07dL0TW96E9AujqQlDj5GaJzyveJkkXRruGx/Pvb7uKjCGdq3Fxqrqtv/1X96fy1LXlfhEGcu9ZWmXtzjkEMOAdrWZFmcfYzQPejWZGnqS3tkT/V+262eep4499xzS0zt1PtZ9ZtehEi/x/twPRN3FQ6ZbClWF8mohRBCCCGEEMLAGHxGTVkaz9Zo9tBntDSD5m/MWuTXNav+/Oc/v8QOOuigcqzZDH+L1iyPZtIB3ve+97XOZeh4tkYzNT4roGv0WUlp7gtZNRPjsweaTfMCBbvvvns57lpQrNmev/7rvy6x0047DRj/QuGp4oUqlFHwmTbNBPpsorT0Uuhqn/55POc5zwHaC5Sf9axnLfd7vG1rYa1reumllwLNwlgYtqZ+n2sBsM96d2kqLb1ctmYgfUG2MnN+73v2TG3atdIM27HHHltimm3uaqeIb6M4AAAOJ0lEQVRD0rarXb3yla8E2rO1yrB23fuuqfpWFRCA5jPyGWHPYuge8fLJp59+OgCf/OQnS0yzzZNpOm59teDdi9BIU9dZRVi8QIP6P88a6dgL5ijb4e2+qw/2jMRJJ50EwJe+9KUS0+y5t2dli4akKTS6ejtSptKvXe3E26ruW5+RVyEWba8BTcbDxy5vbyrycP7555eYMqK33HJLiWnM98ybZvRdyy6t5xLdez5uqDiSo3biW2PoXvc2LceBZ8y7tkq68cYby/F5550HwJe//OUS03PbZMUruorbdOk8l2hc8WyNin+oH4Xm+cYdCXr29MybssW+3YS094IfnplXESsvqqE26bromc+LdHQ5TfTZjcsNIk19XNax95/S0jOsan+bb755iek51Md2XZsKugCccMIJ5fg//uM/gPZnqLbmz846V+9DunTW59HVhmfSdifNqFVVtW5VVRdVVXVjVVU3VFX11on4alVVnV9V1a0T/3/iZL8rrJxo2j/RtH+iaf9E095ZA6Jrz0TTERFN+yea9k80HQ9TsT7+Bji8rutNge2Ag6uq2hQ4AriwrutNgAsn/h1mzqOJpn0TTfsnmvZPNO2fNTJO9U40HQHRdCSkT+2faDomJrU+1nV9N3D3xPHPqqq6CVgb2BvYceLbTgIuBt42krOknUJUitHTna9//euBJoUJzcJjTzXvvffeQLuggNscZSu57bbbSux1r3sd0F5QPII08aqMUFNPdSt966la2XK23nrrEpOd9IYbbigx2Sl8cfu2224LtPfr8N+txbP+2WhRsxYyw0jsDCPV1K9RbcgLN2hxuhcBkTXSC37I/uTaq5CGp9gd2UfOOeecEnvHO94BtIvszBdN1T5dP1kaPaa2K8sdNAuFfeG7LBF+78uS51YGt+xo8fCnPvWpEpMdt6uoRo+MVFO3jmmhv9t2FVPfCI2dx61hspa57U/3vC9Od/vImWeeCbRtjrLoenGmEWj6S0Y4TnkBKt37vuBfltF99923xKSvf5/sOb7nj9q76+P72X384x8H2nYytf0R28Znpana48r2OnXruHTytiqd9ttvvxKTXj7+aJ9Jb/uyTLllzDVUESu3W2mcH3FBm5Fqqj32PObjimyivg+gbHpuY5YV18c9WdDPPvvsEjvllFPKseyk3i7FiO2LM+5TV6apxg5/fhReREV79XlfqbarYhfQ6Ov25G9961sAfOQjHykxL6ChZSozeQZdWSGLKXweM9ZUunVZL3Wfaxz3c3Hr/Q477AC0rby6z93mqOcpH7NVWOkTn/hEiamwm59XF/7uoecFb8+z1HRKTGuNWlVVGwBbAJcDa068xAH8CFhzBT9zIHDgzE9x0fBzYKNo2ivRtH+iaf9E0/55DFMcp6LplJmyphBdp0E07Z/0qf0TTcdENdU3vqqqHgtcAhxT1/WZVVX9tK7rJ9jXH6jreqV+1aqqpv16qZmfN7zhDSX27ne/G2jPjOsN12fIH/47/Niv2xcmazb9ox/9aIk98MADy/3MCLgK2HhUmnYVVPm7v/u7ElOGy2cPNAPpsw2a/fBZYX0OPsvgpbePO+44oF1SeY6KsIxUU5/p1YzOBz/4wRLbaaedgHab7GqfmqHsyvT4rJDrd/zxxwPtRe5ztD3ESDX1YkCaQVP7gSaLPlnJZs2A++yvMj2+XYIyZgCf//zngbbmXYuBR8BINO0qef6qV70KgLe85S0lprbrs7pqi65p16y4ZiV9+4cvfOEL5VgFL8ZQzOZ7dV1vPN1xajJNpYvPih922GFAu/BP10y5+gu/frVJb8/f+MY3gPY2JSpBD032bI7apjMjTSe+Z6W66j71DPjf/u3fAo1jA5oZdu9HlVHzcUrFazw7duKJJwJNgQtoF2zo6pvngrquq1FoqjHaC1VozPftc5QR9qyhNPU2puzZJZdcUmIf+9jHgLYzZiBbFIykT9U97IUvjj76aKBdzEbt2a9fP+s6azskdxxoHHJnwhAK/jAiTeXm8m14jjzySKA9dun+9Ock6ezt9KabbgLgmGOOaU58ovz+uO7xlXBVXddbTfZNUyrPX1XVI4EzgJPruj5zInxPVVVrTXx9LeDeFf18mDLRtH+iaf9E0/6Jpv3y04n/R9f+iKajI5r2TzTtn2g6BqZS9bECPgncVNf18fals4ElE8dLgLP6P71FRzTtn2jaP9G0f6LpaIiu/RNN+yea9k807Z9oOgYmtT5WVbUD8HXgekDegiNZ5qs+DVgPWArsV9f1/Z2/pPld087fKs2pBa8AJ598MtBeQKj0vadFZf3x3e5lG3GrjvaggWah+xhSpLexrKrmyDWVJcILgsjy6fumdGkqe4lbRlSEwa062r8Lmv1ZxrBPx0g1dTup0vde5OIDH/gA0F5k7IVrhIoGaOE1NPaxr3zlKyXmBQW69k2ZI0aqqbc1aeX787397W8H2pqqaIvbH+644w6gvQhbC4q9UJAKDj385+eYkWrqNmUVaJEFEuCNb3wj0LadSnvf6+iKK64A4KyzmrFZe049+OCDJTYQ69MvgX2Z5jg1VU19T0gVFnFNX/GKVwDtfQGlrxf7UREGt4uq2IWPQePa4+hhzEhTmLpNV/cyNPZSL8iiPeb8+9RWvUiI9pq6+OKLS0xtdCBaOnswQk29r9RzlNvMZIP0fkKWMreJf+5znwMaaxnMmd1+JoykT5WmWj4CjbVUe59C27InNGZ7cTbZcL1PGGD7FCPVdM01m+VuWvbgVmh9n/eLep73e1/jlFtHB8yUrI9Tqfp4GbCisiYvmu5ZhRXyYF3XPyGa9kk07Z9o2j/RtH9urOtaJf2iaz9E0xEQTUdC+tT+iaZjYsrFRHr5YzPI/ggv0b3FFlsAcMABB5SYSnt6xuLHP/4x0MwEQZOh8BkMf0Mf5+LXqbxZP5zZaOpabb/99gDsvvvuJbbxxhsD7Rmem2++GWhmLKEpaOHFBgYyKzRnmmq2p6sYhped1UycCtRAs9D16quvLjF9fYxZnhUxVk1V9tiLOSgL5/e02qnHNPs7kIXZzpxr2jXL7jEVwdA2ENAUaPBCQAO5z7sYqaaeTe9qpyrF7RliaapxCRq3xwAXuXcxI01hZrpKO89Kqv/0TI608xl0FWwYcPss1HW94vreK2EmLgVlyjwjqQJrXZr6mD7AsWhljPT+9yJVOvaiddLcNVWbHMjz5kwYqaZenE2aekz3suunNjnP2qbTXzGREEIIIYQQQghzR17UQgghhBBCCGFgzBvr48N+D9Cdfna67A8DTjXPufXRUaq+y9LjmknLAevojFVT6de1c73rN0+0FIPQ1Jln+nUxVk0XKNG0f0ZufVyMjNr6uEjJ/d8/0bR/Yn0MIYQQQgghhPnIpFUfh4hm0H2Hdz8O02c+LLqeb6idLoCMz2CIliGEEEJYLCSjFkIIIYQQQggDIy9qIYQQQgghhDAw8qIWQgghhBBCCAMjL2ohhBBCCCGEMDDyohZCCCGEEEIIAyMvaiGEEEIIIYQwMPKiFkIIIYQQQggDY673UbsP+MXE/xcCT6K/a1l/hj8XTVdMNF1GNO2foWi6tOdzGSfRtH+GoCksrPs/mo6GIegaTVdMNF3GnGtazfUGslVVXVnX9VZz+kdHxFCuZSjn0QdDuZahnEcfDOVahnIefTCkaxnSucyGIV3HkM5lNgzpOoZ0LrNhSNcxpHOZLUO5lqGcRx8M5VqGch59MI5rifUxhBBCCCGEEAZGXtRCCCGEEEIIYWCM40XthDH8zVExlGsZynn0wVCuZSjn0QdDuZahnEcfDOlahnQus2FI1zGkc5kNQ7qOIZ3LbBjSdQzpXGbLUK5lKOfRB0O5lqGcRx/M+bXM+Rq1EEIIIYQQQggrJ9bHEEIIIYQQQhgYeVELIYQQQgghhIExpy9qVVXtWlXVd6uquq2qqiPm8m/Phqqq1q2q6qKqqm6squqGqqreOhFfraqq86uqunXi/08cw7lF0/7PLZr2f27zUlMYrq7RdCTnFU37P69o2v95RdPRnNu81DWa9s+gNK3rek7+A1YBvgdsBKwKXAtsOld/f5bnvhaw5cTx44BbgE2B44AjJuJHAO+Z4/OKptE0mi5CXaNpNI2m0TSaRtdouvA1ncuM2jbAbXVdf7+u618DnwX2nsO/P2Pqur67ruurJ45/BtwErM2y8z9p4ttOAvaZ41OLpv0TTftn3moKg9U1mvZPNO2faNo/0XQ0zFtdo2n/DEnTuXxRWxv4of37zonYvKKqqg2ALYDLgTXrur574ks/Atac49OJpv0TTftnQWgKg9I1mvZPNO2faNo/0XQ0LAhdo2n/jFvTFBOZBlVVPRY4Azi0ruuH/Gv1sjxo9jqYJtG0f6LpaIiu/RNN+yea9k807Z9o2j/RtH+GoOlcvqjdBaxr/15nIjYvqKrqkSz7sE6u6/rMifA9VVWtNfH1tYB75/i0omn/RNP+mdeawiB1jab9E037J5r2TzQdDfNa12jaP0PRdC5f1K4ANqmqasOqqlYF9gfOnsO/P2OqqqqATwI31XV9vH3pbGDJxPES4Kw5PrVo2j/RtH/mraYwWF2jaf9E0/6Jpv0TTUfDvNU1mvbPoDTtqyrJVP4DdmdZ5ZTvAUfN5d+e5XnvwLL05nXANRP/7Q6sDlwI3ApcAKw2hnOLptE0mi5CXaNpNI2m0TSaRtdourA1rSZOKIQQQgghhBDCQEgxkRBCCCGEEEIYGHlRCyGEEEIIIYSBkRe1EEIIIYQQQhgYeVELIYQQQgghhIGRF7UQQgghhBBCGBh5UQshhBBCCCGEgZEXtRBCCCGEEEIYGP8fXaTHKTFPSgIAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2cAAABpCAYAAAC+qVuIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO2de9xmU/n/3zulo+RQyGGQkRBhnKeInEYZlFHE0EEhftIBSUhR5ItEMQjlnEgo52NFEXLO5BQpRiqlc/v3xzyfta9t9jPzzHOv+773PfN5v15e9lzPae/PvfZae6/rs65VlGWJMcYYY4wxxpj+8pJ+n4AxxhhjjDHGGL+cGWOMMcYYY0wr8MuZMcYYY4wxxrQAv5wZY4wxxhhjTAvwy5kxxhhjjDHGtAC/nBljjDHGGGNMC+jo5awois2LoniwKIqpRVHsn+uk5masaX6saXewrvmxpvmxpvmxpvmxpvmxpvmxpr2hGO0+Z0VRzAP8GtgEeAL4BfCBsizvy3d6cxfWND/WtDtY1/xY0/xY0/xY0/xY0/xY0/xY097RSeZsLWBqWZYPl2X5L+BcYGKe05prsab5sabdwbrmx5rmx5rmx5rmx5rmx5rmx5r2iJd28LOLA78N/34CWHtmP1AUxejSdHMH04DdsaY5sab5mVaW5euZzfvfms4Ua5ofa5qfUWkK1nVmlGVZYE1z47E/P9Y0P+pTZ6CTl7MRURTFbsBu3f47cwCPjfQbremIsab5sab5sab5sab5GbGmYF27gTUdMb7/82NN8zOspp28nD0JLBn+vcRQrEZZlicDJ4PfoEeANc2PNe0Os9TVms421jQ/1jQ/7lPzY03zY03zY017RCdrzn4BjC2KYpmiKOYF3g9ckue05lqsaX6saXewrvmxpvmxpvmxpvmxpvmxpvmxpj1i1Jmzsiz/UxTFJ4ArgHmA08qyvDfbmc2FWNP8WNPuYF3zY03zY03zY03zY03zY03zY017x6hL6Y/qjzm9OTNuL8ty3Oz+kDWdKdY0P9Y0P9Y0P9Y0P6PSFKzrzBgqCDLbWNOZ4vs/P9Y0P8Nq2tEm1MYYY4wxxhhj8uCXM2OMMcYYY4xpAV0vpT+oFEUxw3G0gPbSDjroSL+XvKSaC9Dxf//73xT73//+19sTG2Ck30tfWt3C0vnf//53ilnTkSNN55lnnhm+9p///Ccd+94fOWqTsT+VftbRGGOMmRFnzowxxhhjjDGmBcy1mbM4k7vYYoul46OPPhqA8ePHp5iyDxdffHGKHXDAAQC88MILXT3PQSJqOmbMmHR8zDHHADBuXLXu8e9//zsAJ510Uoodd9xxQD1LMbcTs43LLLNMOj7yyCMBWG211VLsmWeeAeCQQw5JsR//+MeAsxSR2E6XWGKJdHzggQcCsMYaa6TY3XffDcAXv/jFFHv00Ue7fIaDR9T0ta99bTreZpttAFh55ZVT7JprrgHg2muvTbF//vOf3T7FgSZmyBdffHEA5ptvvhR77LHpe5k+//zzvT2xASe223nnnReo97n/+Mc/APefoyXqK6ylMbPGmTNjjDHGGGOMaQF+OTPGGGOMMcaYFjDX2hqjlfHUU09NxxtssAFQWRygKlqx5ZZbptiUKVMAuPfeav+9uTVdL+vCUkstlWLf/OY30/E73vEOAF7+8penmIpW7LDDDil29tlnA/DUU09172QHBGkarYzf+MY30rFst694xStS7PWvfz0AH//4x1PspptuAuCvf/1r9052QJBdabnllksxWW4B3v72twN1Td/0pjcB8OSTT6aYLI6x8MrcijRddtllU0yWW4ANN9wQgFe+8pUptu222wKw1157pdjll18OzL19aESaRsvtQQcdlI4nTJgAwKtf/eoUu/POOwHYZZddUsz22zrqU1/3utelWNRrxx13BGD++edPsfPPPx+AL3/5yynmpQwzEp+X1lxzzXS8/fbbA7DQQgulmJYy3HzzzSnmwlUzEu21GtsBVl99daBuH7/uuusAePrpp3t0doNJtNnG51G1z5e97GUp9vvf/x6orM29xpkzY4wxxhhjjGkBc13mTDMQF1xwQYqtssoq6VizFf/6179STJmzuCh7k002AeDXv/51isWfmZtQxuyiiy5KsTe/+c3pWLo1FfqIM2rS9KyzzkqxWGp/TifO6qy00koAnHfeeSm29NJLp2NpGjMNmvVZYYUVUkxFWG644YYUm5uyE3H2cd111wXgtNNOS7Ell1wyHUvT+Dm86lWvAqq2CXD66acD8Jvf/Cb/CQ8ATf3g17/+9RSLGZ/4vWLRRRcF4MMf/nCK/fSnPwXgueeey3uyA0LMPCizePjhh6fYG9/4xnQcZ3eFshX77rtvin32s58F+jfz2wZixnbrrbcG6sV94v0fPwOx9957A/V2eeyxxwJzb+GqqOmmm24K1DUdO3ZsOlZ2Io45yvxGh4cylHPT2BSJWRyN2fH+j4W/ov7i2WefBWDSpEkpduONN2Y/z0Eijj16djr00ENTLI7pykLG9ie3zFZbbZVi9913X1fOtQlnzowxxhhjjDGmBfjlzBhjjDHGGGNaQNHLNHJRFH3JWS+88MLpWAsno+0uavCnP/0JqPaNgWpPrgUXXDDFVLQiLmqXxXGUmt5eluW4WX9bnX5puvzyy6dj7VskuxLUF/iqGIW0jUSLztSpUwHYeeedU+yJJ57o5DQHQlPZ6FSMBuDCCy8E6ovTo8Xzb3/7G1DfH0pp/Kj97bffDtQLrzR9DrPBQGg6zzzzANUif6gKqsRCCtGaJE2jfrI6RZ0vu+wyAD72sY+lWIf7dA2EptJCdjmA/fbbD6jsn1AvlCJNmxZi62tQ2SKPOOKIFOvQ0jwQmmqvMu2vCVWbjYVpmjSNth19Nn/+859TTGPT9773vRTrcLwflabQG12b9i6N+2hutNFGQN1CFtuYxvk4JknjadOmpdjEiRMBuO2227Kcd1mWM24GNgJ6oWm0hWv5x/HHH59ib3vb24B6W41tTO1W/XH8nb/73e9STEXDHn/88Vyn3tr7P9pnZQs9+OCDU0zPptG+GNu22mz8bMTDDz+cjtdee22g4/E+0jpNpUvc81F7a8bxecUVVwTqY3/UT2N+1Fnt+J577kmx9dZbD6j6igwMq6kzZ8YYY4wxxhjTAubogiBamH7VVVelmMpoxzfk559/Ph3/8pe/BOBHP/pRimnWRwtZoSqxvccee6TYZz7zGWDOKwwStdLC1KiPMpNxxixqquyNyj1DNbOpmQioZjc+8pGPpJgWGs9ppXbjrI1mYs8888wU0wxPvO4//vGP6Viztir3CpV+MSu81lprAdVsElQFLea0xddxxnufffYB4LDDDksxzZjHbFkskX/LLbcA9dn0ddZZB6gXZNCWGmussUaKqaDFnEacaVQhhFh+XJmFmDmMi6a1lUMsX/6ud70LqBcD+uhHPwrUtzWZU7fUWGSRRdKxCv5oGweo+oa4/YXaJsD1118PwJgxY1JMxS6iu+PTn/40AJdcckmKdZjhbSXSS4UUAM4991ygXkRJxEyCtAT4yU9+AtQLKr33ve8F6u4bjflxnJrTxidld/bcc88U+8IXvgDUS7jruuPYpHse4I477gBg5ZVXTjE9R8X7QH1q3IJnTiA+O6kY3RlnnJFi2mokZtM0PkVN9QwF8MADDwBV0TConqPiOKWvq13PKUTHwBZbbAHUtxnSs2V8xtKY/pe//CXFpCNUGce3vOUtKabnqbhNzBve8Aag7qzrFs6cGWOMMcYYY0wL8MuZMcYYY4wxxrSAWdoai6I4DXg38HRZlisPxRYEzgOWBh4FJpVl2dcNapQ+jnuWaSG09uGCKmX8zDPPpFhc2KsU8N13351iskPF3600/eabb55iJ554IgAPPvjgaC5hnqIorqJFmiotrEWrUKXko31G++jEfZ/ivlqyM8bFvsssswxQpY6hShlHC56sVKNd1Nq2tiqLbLTEHHnkkUB9AbDS77FtXnrppelYafi4MHXjjTcGKm0BFlhgAQAmT56cYtpHbrT227Zpqvsz7rWjYgjR6vj0008DdR0vvvjidPyHP/wBaF40HPc6UZGWXXfdNcV+9rOfAaO3irZNU1mXvv3tb6fYu9/9bqBu1dE9P2XKlBSLlmdZ86ItTPYeFWiAyuIU956JNt/R0AZNo1ayIf7whz9MMfV/sd3Iwvi1r30txdS+oLpvoy1Me0utv/76KaaiTYsvvniKxYIBo6ENmkK9wISsTbG9qN+LfZzu+6OOOirForVJFr1oDVt11VWBqvAFVPslRitaJ3vJtUXTWNRHGsluDFVfGu226h/07APNRbyivVTPUdEuJlvvt771rRTr0Hbf1+cp3ffx2fPyyy8H6tY5fV98vjnuuOOAenvW2BR/Jtpv9awbreL6OxltjX3VVPd8fD485ZRTgLrVVmi8h+o58pxzzmn8usZ5FVEBOPvss4F6ASH1K22xNZ4ObP6i2P7ANWVZjgWuGfq36YzFsKbdwG01P9Y0P9Y0P9Y0P9Y0P9Y0P36eyo817SGzzJyVZXljURRLvyg8Edhw6PgM4Hpgv4znNduoQEdc/KzS7nFWS6X0Y5GQWNJZpYhjZk0zOCqVD9Wsb5wRXn311Wf4vtmY/Xkd07WEPmoaZ3q1yPQ73/lOiilr8Nxz1YSJZs3jjHks/azFmHF2SAVD4gyEirXEmUvNuMViIrNJ39tqXJg6adIkoMqWQTVjGTOLBxxwAFAvBBALVcTPSWiGR4uroZrp0cw6VMUZ4szRbNJ3TeOiYJVzj9taaJY3LqTefffdAXjooYdSLC7k18xcLAt99dVXA1VWEiqdYzEbxTqYQe+7pjFze8IJJwD1tiQuuuiidPzJT34SqLel2Oep7cf+QltvxCyP/rYKWwB897vfBToqttB3TeP4oOIfceZcmR1tJQBw+OGHA/UMRdRU934s7nHzzTcDVQEgqDI7se12mjmjBZpCXUMVkYhFZ6Td/vtXz48qNjNcURTpGjM/d911F1AvaPGa17wGqI9THeraV0113cqMA+y0005AvZ/V+P2hD30oxZSNjEWWmtpq1FTHMZumLHDMRnZYvKavz1MaS2I5dz2jRlT0aLvttkuxW2+9FahrGmnSVM+w0dGkvqepPPwo6aumahvbb799immsjs9GKkilZy2oxvzhtmeRRrHAmmjaWqcXjLZa4yJlWaqU1u+BRYb7xqIodgN2G+XfmZt4qTXtCiNqq9Z0trCm+bGm+bGm+fHYnx9rmh8/T+XHmvaQjkvpl2VZzmyTubIsTwZOhv5tmDxoWNPuMDNdrenosKb5sab5sab58TiVH2uaH2uaH2vafUb7cvaHoigWK8vyqaIoFgNG7Y/qhJjC1WLLuPhZqV4VQIDKrhPtR7J+QWUziQswlV6Otj1ZRWRlhGpx9yjTyP9pg6baGw7g/PPPB+o6y5504IEHpphsTtHKGIsqKMUfi1dI/1icQYuv49/Tnl2yl8Bsp+b73lZj0RO1P1ljoNprS1YSqPbbi2n4mFKXbS+2tUceeQSobBFQWW/iz6rwSge2xr5pqusdP358iml/wWhHVEGfaBeRzk1WRqhbeITsENIWYL755qv9Hyprage2xr5rGm1N2267LVDX5IorrgDqRQJUuKbJyhTjsTCDbN/PPvtsimlvGv0fqs+mA1tj3zSVnVP7ZAG89a1vBerXc9pppwFwyCGHpJja0HD9nH537G9VmCVaIdUmY+EFfTYd2Jv62p+qTcTCMbLax2s/9NBDgXqhmpEWQIp2skcffRSoW+ykXRynOrQ19lVTtScV6gF44YUXgPpeW3vvvTdQPWvB8NY7Ia1iW9WzVfw89PVYwKlDW2Mrnqfic4uKz/zud79LsX333ReoL4UZaX8X+1n1GfHz0GeY0dbYV03VXmKhJI0hsXDaBRdcAFRLlGDk1x3HHxHbblwC1W1GW0r/EkDl3yYDP8hzOnM1f8KadgO31fxY0/xY0/xY0/xY0/xY0/z4eSo/1rSHjKSU/jlMX6y6cFEUTwAHA18Bzi+K4sPAY8Ck4X9DfjS7opKjUJUPj5kGZX6OOOKIFNObb3yTjoUq9HYeZ8A1kxFLxWtGTbOhUC34jLPxszHr+xSwSb80VUEDZXagyrDE2RiVzL3wwgtTTDNccYZGszaROBOmzyFmeVQkRDOhUM36jnL2Z2H62FaVyYmL/XVtcdbwsMMOA+pFT9RuYjGRiPSIn41mkWIRDJWAj5opO3rvvfem2KBoqsIRX/7yl1NMGYI4c65CAHGBrzSL2aB4r+rzipqrHcffo1LQ8d5WFi3ONM8GrdB0zz33TDH1sVqwDnDQQQcB9Xu7qX3Ge1WaxnLE0j/2sfqZeF/oHOLM5WzQV02VqY7bBaj/i7O8Rx99NFC/j6XpcNlI6RdLn6sYRtN4o+xm/N3DLYyfBX3VFKrzj+3p/vvvB+DKK69MMZUhj3o06RpRXxAL4yg7FvXS74z9TYf0VVPpEdulSunHMuxyI0RNZ5WJlaax1LnaarzX5ciZVSZuNujr85SuQ24DgF/84hdAfYxoeh6dFdI0ZsTVZmOb1DNqh9mySF811T0oHaHZYaT2OTvXrb50woQJKSadY6Zz2rRps3vao2Yk1Ro/MMyXNh4mbkbHf8uytKZ5mVaW5bO4rebEmubHmubHmubHmnYBa9oV/DyVH2vaQ0ZrazTGGGOMMcYYk5GOqzX2A+0vNHHixBRTel0WB6gsI9G22JTqjOl1paOb7CGxoIUKBAy3v8egscYaawCwwQYbzPC1e+65Jx3LLhI1U/o3LuaNVqSmxaoi2sWUMtaeJ1Clq5v29Wo72hsnFo1RG7njjjtS7NprrwXqFjGl2WPhkNgmZS2LOsu+FO23sc0KfQ4ZFwr3DNmXl19++RRTG/nZz36WYlpgHYuE6DgufI8WR7VT2Wuh0jQuLpbmMSYtB1HThRZaCKgXU1K70Z5kUGkRC6HoOO75FC1natNxIbX+Xuwv9BnGIjXRcjpoyNYYr1H2mFhQQe1FmkBlfY79YET6Rp3Vx8T2rv4iFl4ZxH40onsq7o95/fXXA3DjjTemmHSI+8zJTheLVcV2qb422sXWWWcdoF5QSWNfHM8yFFrpGzrnWPRI93+04DW1O2kW+9Foe5YtNO61pyJfkV7axXpJHEtka45tpOnZSW0tahrHfrXP3XarKtWrbUcdm/bsmhNosjDGfk1jTlMs6hzty5tvvjkAm2222Qy/O9ooB6EgiDHGGGOMMcaYjAxM5izOXO2zzz4zxDSLrbLaUC2IbMpuxdmL4Y5fTPyaZu7jm7hmfTso/dxT4rlrJ/uoqWbKVZYYqtnfOHvRtJg/aq7vbdI2/h7NFMcZo6ZS3W0mnvv73/9+oJ4BUDuNxWyaZg010xM1jQVVmjJn+hziomDNIGdcaN1zon7vfOc7Z4hpdvfcc89NMc38xiyPZhdjCeyo6TPPPAPUPw/dDzFLps8m3ivKdA5K5iyep/qyeL4qdx2zkZoxHzt2bIopk6liSC/+3VOnTgXqs4+aoW8quhSLAemzi7OVTf13G9F1xGtU3xmzEUsvvTRQ108Z97itSdRUM+KxoI/6naiLNNVnBFWmI87oNxUXaiu6ziZnhgpYQXWPr7322ikmXWOfEO9/aRLHJJXWbso4Lrnkkun4t7/9LVDXUH1UPNc2tltdW3RuKJMbM+Ian2ORm7e85S1Afdxryi7EDKacC1ELZTFiNkNjX1OmKX5GbdRURE3VJ8T7Wn1pzCxK0zjGxevV1k+xvetZLmbJpWU8h6ZCQIOW9Y33osakRRddNMWk35prrpli2s5IzwAvPh4zZgxQdyo1OWmattvpFs6cGWOMMcYYY0wL8MuZMcYYY4wxxrSAgbE1xgXTSltGu8B5550HwM0335xiTQswOyGmPGWXiBZGLZ4fFFtjTOuuttpqQN2WoX1j4kJrpXpnR9OZfW+03KggQbQCaR+LQUm5R8vMuuuuC9Tbw09/+lOg3k5lUYzfJ1vecDa5Jquovnf8+PEppjYbCy088cQTM/xsm4lWF9kaoz3jrrvuAurWOdlho6YqDPTkk0+mWNRAdpwma9IKK6yQYrIzNn0eg0Jc1L/hhhsCdfuLCqo8/PDDKSZ7RyywJCvoQw89lGLR7qnCDdEWqn4n7tMlu4gsO1DZn2LbbbPO0SauAkvReiTdopVObS0W8ZGmsehNLGLz4IMPAtV9DNV+m7FN6nyivWellVYCqnsmnkNbbY2xXS611FJA1bdC9WwQxw21Ve2fBXDfffcB9Xs5fj7qK2a1DEIFRbbZZpsUk50s9i0inkNb+tw4rmiMiMXApG/UXu0pPjdo7Ir2x2jbk6bRote0157svbpvoBr743Oe+pbYB7WxT1B/Fu9htRfZa6HSMto+1b5iXxiL2Ei/+NmozcaCQCoSFAu6xX74xeca+6W2tNOI2mxsf1tvvTUA2267bYqpr4zPYtIl/myTpvG6pUf8DHUc96Rt2odT59rJu4AzZ8YYY4wxxhjTAvxyZowxxhhjjDEtYGBsjdH+pkpMserVWWedBXQnNauU50EHHZRisuBFO4oqm7UxJdxErBCmajex0t9FF100QyyXZVO2quOPPz7FZHOSpQrggQceAAZTU7XZuNeY9jSLVZWa7ES63uH2JWrSQ/aePffcM8VkWYj7Asl6MyiayvIC8La3vQ2oV7H8+c9/DtQtRbI4xfaq6412kEjT12VBGTduXIrp67KfQfV5DoqlOVqPNt10U6De1nQPRruirjHaOHS9qkQIdQuTNI1VrmQJj1UzpWn8vqa23+Y2G61JH/zgB4G6HV8WUVWwhKqicKwIJk1vvfXWFIu2RvWdr33ta1NM1t+ombSMfdJaa60F1PvYJqtTm4jXucsuuwB1q+ZTTz0F1Pd10n6nMSZdVbkNYP3110/HqkYYrWH623FJg45lqYJq7Dr99NNTTJ9zG/eWi7bm97znPUBVXTh+XVZQqPbmvPDCC1NM9632iYLKeg6Vfk3VnaMNWHufffWrX00xjZVXXXVViul5K94vbbE1xr5LlsI99tgjxXSNstxD1SdcccUVKab2tcUWW6RYtOLq78T95PSMEZ8ldthhB6CyMgP8+Mc/BupWRz1Hx2eStmgax2JZZ1VZHCqNopVe/cGvfvWrFFP/GcfxaKvX9TZpGvf//PznPw9Uz8ZQLfuJ9nu9h3RSqdWZM2OMMcYYY4xpAa3PnGmWJc7GaGY2zvhphjfXzGqc/f34xz8OwK677ppimqE48MADUyxm8tqMNJ0wYUKKaRYrZsm06DFXNiDOVEyZMgWoMiFQFWT49Kc/nWJxhqzNSNOJEyemmK43XoOyPDPLlo0EzSjFDIhmc+Iidy1G32+//VIsfsZtRprGhfda0BtnuJRhiBlKtdnYdpv2c4mz2poxf8c73pFi3/zmN4F621XG7Itf/GKKDcq9r3aj2XKo9muKBRVuueUWoF7MQLOBTbPgUec4g6xZxx133DHFlP2Ifaz2VYuZB2V3ZvUZ9htpGvtT9Wtx5lTFgKLbQvo2FaGJsdhOlf3aZJNNUkxFsuJnqP40zpLrXmn63W3bo0+6xkIVm222GVBvO9///vcBuOmmm1JMGbPopFF2MfbHUQftaRazlNIhZuCU8W2aFVf2Lf5M7Bv6nZHQZxzH3Z133hmoF0u4/PLLAbj00ktTTFne2M8qWxwLecTxRRm4+DPaEy4WelKhhVhY5O1vfztQz4oo8xP3oowuin4gTZdddtkU+8QnPgHUXR+6/3/0ox+lmJxBUR/dy8q+QX0fL/WH6jOh6lNihldFgnTPQOUEicWz9Fn/5Cc/SbGY3esn8VlGjqCoi67juuuuSzH1d7FdbLnllkB9b8LYx+keffzxx1NMfXN0RCj7GZ9R1e/HDK+0jC6J2BeNBGfOjDHGGGOMMaYF+OXMGGOMMcYYY1pA622NYsyYMelYloaYCm7aa2CkNO35ERemTp48eYafOe6444AqzQ7tsIKMBF1v3L+haa8L2WJGQ7Q26e+cdtppKSZbRfx7BxxwAFAVVoHB0VQWHFkJoLKKxmvUottZXZc+o7ggNloWtHg72mplfYh2X1lyo41hUDRVG5KNCyrrYVy8rP2emixD8d6WltFOE4sDyPoZLdTaC0WLjKHS9Prrr0+xQSkEoja50UYbpZiuMS5olqWwyYoRLWXSMlp63ve+96XjSZMmAXVbjj4H7dcFlUU0LoyXLaXt7VUabLXVVikmi1i0FKqgQpNVOxZokFV5u+22S7FoQ1WBj9g3yIYUrUmyK1199dUpJktuvFea9kxsA+rvoiVWNqfYn+k+jO1X1xKLiay33noAfOpTn0qxOAbqc4yFfrTXViygoucO2fOgKuwQ99/ScZv2j5Omu+22W4rJehfbjizy8RrVx0Wbl9roRz7ykRSLmusevvvuu1Pssssum+G89DwQ+wk908VnO7X5Nmkqy3ss/qHngGi1/fa3vw3UxxIhSy1U/We0isbxRcW9ogVc+xbG36O9v1ZdddUUU18ff7fuG7V16L+tUeP87rvvnmJabhDb6ZlnngnU732NcbEQip4h4t5m8XrVJm+44YYU030e+whZ8qMtWDbLsWPHppj68BNPPDHFojV6JDhzZowxxhhjjDEtYJaZs6IolgTOBBYBSuDksiyPK4piQeA8YGngUWBSWZbPDfd7RotmSuJC21iC9cWxkS5qjr8jziJ85zvfAeoLObXI+uijj06xww47DMi7wLefmipLEWekFIuaNpUF1kx6LBsdF0x+9KMfBeqZH83M6GtQLezOOYPbK02lVZy5ki5xYaq+L2YfXvz9UO1uv/HGG6fY5z73uXSs0roxQ6lS8poxg2p2aBA1VTYhLuKVRrHwgWLxnpYu0hGq2a699torxWIpbX1vnKVUAZeddtopxTRzOYiazj///EC9f5NWMaOjWCwrrs9DC9YBPvCBDwD1zE5Tifw4E3vqqacC9f5U2YqMmr4BeqOr7nktFoeqLcZMigob6DOAKlOgBetQZSPiTGy8z9X21TYBDj74YKBePlrOh4xjVM80haqPizP/aoNxpl0DML8AAAytSURBVFzaxIzOKqusAtSzj8qIx4IfcbxTMQCVy4ZqkX/MIEvPqGtTf95UlGg4eqWpZvnXXXfdFJOmMUumcT4+I+hnYoEmbWkQi4nEAiinnHIKAMcee2yKqZ+JmV+V5499uIqDxGJMKoIR+//h6JWm0kBbkgz9baDKqEbiM4KK+sSfVT8StbjkkkvSsZ494/Yl6jebiv/E8fO9730vUO//VfiiTZqqncZnGd2rsaCSiNejomzasgWqNnTbbbelWNwaS/d+070addbWErFomEr7R53V/8QscjcyZ/8BPlWW5YrAOsCeRVGsCOwPXFOW5VjgmqF/m9HzCqxpbqxpfqxpfqxpft7gcSo71rQLWNOu4D41P9a0h8zy5awsy6fKsvzl0PHzwP3A4sBE4IyhbzsD2Lr5N5gRMi/WNDfWND/WND/WND9/x+NUbqxpd7Cm+XGfmh9r2kNmqyBIURRLA6sBtwKLlGWplY2/Z7rtMTtKM0ZbjCwEyyyzTIrJvhB37tZC0gUWWCDFZGOKNqWY/pSNJ1p8tIA4LsDswn4lfwWW7YWmSoFHq4YsBnE3dKVrL7jgghSThSQuiJSlKdpDo8VEn1dMD8sOFRd3dmFBes81bSpAEfdu2XfffYH6XieyjsR9i5TWjxaxWDRAfy8WT9HC+bhXRxfouabxutWWYoEg2RPiAl9ZwqJ9RzrH3xetNVoAfOSRR6bYMcccA3R9b7ieaarrjdY62e20Bw7A4YcfDtQXr8tmFot/yKocdYwL+FVIQfc7VIVAuryo/1X0aJySBrHPk40m2mbVlqI+suNES7jaZxxj4udwwgknAHWrWI+Kp/RMU6jsX9Fap6Id0Qaqhf/xvtY4Fm1FaqPRdhcLesnOqAJDMHI9O3we6JmmsiTHMUljuor3AKyzzjpAfSmCNI1FFdSWb7zxxhQ79NBD0/HM9vWMNO1VJktwtIzqMxyBVbRnfaqeH6OtTecX+73x48cD9edRaRotjLJC6j6H+vPCzOyHUWfZFaO1+v777wfqzxX62yMoANczTWVBjnZFtZHYTlXkJ1qVpWkcs1WM5ZxzzkmxkY7p8d6WNTHaTLV/3YYbbphi+vyj/Xp2GfHLWVEUrwEuBPYpy/IvL1rbVRZF0diLFUWxG7Bb09dMjVpvY02zYE3zY03zY03z89uRjlPWdMSMWFOwriPFmnYF96n5saY9pBjJzFBRFC8DLgWuKMvy/4ZiDwIblmX5VFEUiwHXl2X55ln8nlFP66l0OMBJJ50E1GdwXnjhhdr/oZoRiKWzmxbuxlkbzfTEQhWPPPII0PVZyduB+eihpp/97GfTsbIPcfGtMmtxNkaaNhVliTMMsZz72WefDdRn1DSr02V6pqkG1zjbteuuuwJVZgKat3xQm4xtU20ttue4uFgFAFQuG5pnH7tAzzSVHjEbvtlmmwH19qf22ZS1jBkdfd+0adNSLM6AfelLXwKqwirQsxL5PdNUzoDrrrsuxZTxjkUnmjIB0jL2g8o2xtLmRx11VDq+9tprgeaS/F3m9rIsx83uODUaTTULHV0Ayy23HFC/p5vaaZOLQe0v9iVxxnek23F0gVFpCqPTVSWsr7zyyhRTxidqKF2jHuoL45Yb+j2xEM2jjz46w+/pNdOfcXujqTISKkEOVcY3Xr/GqRiTmygWnVEbvfnmm1NsJIUlekBX+9TY/nbYYQeg3q5UXCpu96TMVLzX5SZSMTSAs846C6hneHPf601jZRzrhvl7PdNU2xLoOQeqsSs+W+o5MjrdtG3DySefnGJqs7nu8abCeDFzr7E0PpMN83x2e1mW45q+MMs1Z8X0szgVuF8vZkNcAmgDsMnAD2b1u8wssab5sab5sab5sabdwbrmx5rmx5rmx5rmx5r2iJFUa1wf2AnYqCiKO4f+mwB8BdikKIqHgHcN/duMnvmxprmxpvmxpvmxpvlZ0eNUdqxpF7CmXcF9an6saQ8Zka0x2x/rwIIXF7ArBbzBBhukWJNdUcR0rVLusXjAJz/5yXR85513An2xOAyb3pwZnWgaF1r/4AfTJ0BiUY9oc3oxTQtPp0yZkmJa/A5Ver5HFrFIzzWNxRJkx1txxRVTrElT3YPRDqKFu7Ftat8S6ItNTPRc01h8RvYu2cagWdMmC+O5554LVMUuoG576kP7FD3XNPad3/rWt4D64mvZRqMmWkDdZGGMBQF6ZK+dFT3TVLYgWW4BvvKV6c8ssXCNvi9anVQwRVYmqMY37f0GfbEwNjEqTWF0uuq+3nrrqiCcCiotscQSKaa+MNqRtT9ZtERPnToVaE37TJRlOaNHagSMRlNZr7bffvsUUyGpWNBGWsoiBlVRihhTobaWtM9Iz+5/FU2ZPHlyimnfsjg26Znz9ttvTzH1pbFwRNPSh5bQM01VIGn33XdPMT2bRluj7Pl6bofqPo9LQnrRPpusjpHhrKKjtjUaY4wxxhhjjOk+A5M5i2gBdlwsuNVWWwH1ghYqexmLJmjH+sceeyzF+rUQ+EX0fPY8opmKffbZJ8WkaVzoeNdddwH1BcU33HADUC/F2pKZtJ5rGmdPpOkuu+ySYiqXH/XRjKSyl1CVc25J24z0VVPN7sZS2ppRiyWDL7vsMgDuvffeFFPmpyVtM9KKdjpuXHUKyqqrDwW44447gHoxH8/yVsTiM3J6LLXUUvF3A/VsrrJjUcc+ZnBnRU8zZyLqqkILsRy5MpGxNLZiLdYy0cvMmYgZHT0zxcJpTcUrmgqvtJie3/+xSJU0jTpL05i5HYT2GeiZpuor47NnfLYXuudj/zkg7VM4c2aMMcYYY4wxbcYvZ8YYY4wxxhjTAgbS1jiH0ldb4xyKNc2PNc2PNc2PNc1PX2yNczr9sDXOBbTi/m/a03CA6aumTQU35mRNnTkzxhhjjDHGmBYwfK10Y4wxxhhjzGwzB2R2WsPcpqUzZ8YYY4wxxhjTAvxyZowxxhhjjDEtwC9nxhhjjDHGGNMC/HJmjDHGGGOMMS3AL2fGGGOMMcYY0wL8cmaMMcYYY4wxLcAvZ8YYY4wxxhjTAnq9z9k04G9D/58TWJh81zJmlD9nTYfHmk7HmuanLZo+lvlc+ok1zU8bNIU56/63pt2hDbpa0+GxptPpiaZFrzd2K4ritrIsx/X0j3aJtlxLW84jB225lracRw7aci1tOY8ctOla2nQundCm62jTuXRCm66jTefSCW26jjadS6e05Vrach45aMu1tOU8ctCra7Gt0RhjjDHGGGNagF/OjDHGGGOMMaYF9OPl7OQ+/M1u0ZZract55KAt19KW88hBW66lLeeRgzZdS5vOpRPadB1tOpdOaNN1tOlcOqFN19Gmc+mUtlxLW84jB225lracRw56ci09X3NmjDHGGGOMMWZGbGs0xhhjjDHGmBbQ05ezoig2L4riwaIophZFsX8v/3YnFEWxZFEU1xVFcV9RFPcWRfH/huILFkVxVVEUDw39f4E+nJs1zX9u1jT/uQ2kptBeXa1pV87LmuY/L2ua/7ysaXfObSB1tab56bumZVn25D9gHuA3wLLAvMBdwIq9+vsdnvtiwOpDx/MBvwZWBI4E9h+K7w98tcfnZU2tqTWdC3W1ptbUmlpTa2pdremcqWkvM2drAVPLsny4LMt/AecCE3v490dNWZZPlWX5y6Hj54H7gcWZfv5nDH3bGcDWPT41a5ofa5qfgdUUWqurNc2PNc2PNc2PNe0OA6urNc1PvzXt5cvZ4sBvw7+fGIoNFEVRLA2sBtwKLFKW5VNDX/o9sEiPT8ea5sea5meO0BRapas1zY81zY81zY817Q5zhK7WND/90NQFQWaDoiheA1wI7FOW5V/i18rpOU6XvpxNrGl+rGl3sK75sab5sab5sab5sab5sab56ZemvXw5exJYMvx7iaHYQFAUxcuY/gGdVZbl94fCfyiKYrGhry8GPN3j07Km+bGm+RloTaGVulrT/FjT/FjT/FjT7jDQulrT/PRT016+nP0CGFsUxTJFUcwLvB+4pId/f9QURVEApwL3l2X5f+FLlwCTh44nAz/o8alZ0/xY0/wMrKbQWl2taX6saX6saX6saXcYWF2taX76rmmuyiIj+Q+YwPSKJ78BDuzl3+7wvMczPXX5K+DOof8mAAsB1wAPAVcDC/bh3KypNbWmc6Gu1tSaWlNrak2tqzWd8zQthk7CGGOMMcYYY0wfcUEQY4wxxhhjjGkBfjkzxhhjjDHGmBbglzNjjDHGGGOMaQF+OTPGGGOMMcaYFuCXM2OMMcYYY4xpAX45M8YYY4wxxpgW4JczY4wxxhhjjGkBfjkzxhhjjDHGmBbw/wGFlXpFYVxRsgAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] @@ -282,15 +494,15 @@ " \n", " combined = model.decoder(encoding1 * alpha + encoding2 * beta)[0]\n", " \n", - " show_image(axes[i], combined)" + " show_image(axes[i], combined.cpu())" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "vel", "language": "python", - "name": "python3" + "name": "vel" }, "language_info": { "codemirror_mode": { @@ -302,7 +514,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.7.3" } }, "nbformat": 4, From 3c9a2ffe45d2e407666f12ecb16e5242de80dd08 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 26 Sep 2019 14:22:24 -0700 Subject: [PATCH 097/162] Significant refactoring of optimizers. --- .../cats_vs_dogs_resnet34.yaml | 1 - .../classification/mnist/mnist_cnn_01.yaml | 3 +- .../latent/mnist/mnist_cnn_iwae.yaml | 1 - .../latent/mnist/mnist_cnn_vae.yaml | 1 - .../latent/mnist/mnist_fc_iwae.yaml | 1 - .../latent/mnist/mnist_fc_vae.yaml | 1 - .../classification/imdb_sentiment_gru.yaml | 15 ++- .../mnist/mnist-autoencoder.ipynb | 59 +++++---- vel/api/__init__.py | 6 +- vel/api/info.py | 5 - vel/api/model.py | 60 +++++++-- vel/api/model_config.py | 71 +++++++---- vel/api/optimizer.py | 107 +++++++++++++++- vel/api/source.py | 55 ++++++++ vel/command/list_command.py | 33 +++++ vel/command/phase_train_command.py | 51 ++++---- vel/command/train_command.py | 61 +++++---- vel/data/bucket_loader.py | 68 ++++++++++ vel/data/source/nlp/imdb.py | 30 +++-- vel/data/source/nlp/text_url.py | 8 +- vel/internal/provider.py | 9 +- vel/launcher.py | 6 +- vel/metric/__init__.py | 4 +- vel/metric/base/__init__.py | 4 +- vel/metric/base/averaging_metric.py | 13 ++ vel/model/imagenet/resnet34.py | 6 +- vel/model/latent/cnn_iwae.py | 4 +- vel/model/latent/cnn_vae.py | 4 +- vel/model/latent/fc_iwae.py | 9 +- vel/model/latent/fc_vae.py | 8 +- vel/model/latent/iwae.py | 16 +-- vel/model/latent/vae_base.py | 16 +-- .../multilayer_rnn_sequence_classification.py | 9 +- vel/module/input/embedding.py | 8 +- vel/notebook/loader.py | 8 +- vel/optimizer/adadelta.py | 44 +++++-- vel/optimizer/adam.py | 59 ++++----- vel/optimizer/radam.py | 64 ++++------ vel/optimizer/ranger.py | 119 ++++++++++-------- vel/optimizer/rmsprop.py | 42 +++++-- vel/optimizer/rmsprop_tf.py | 42 +++++-- vel/optimizer/sgd.py | 52 +++++--- vel/rl/algo/distributional_dqn.py | 2 +- vel/rl/api/algo_base.py | 20 --- vel/rl/command/rl_train_command.py | 2 +- vel/scheduler/ladder.py | 4 +- vel/storage/streaming/stdout.py | 4 +- vel/train/phase/cycle.py | 17 +-- vel/train/phase/generic.py | 6 +- vel/train/train_phase.py | 6 +- vel/train/trainer.py | 43 ++----- vel/util/dataloader.py | 21 ++++ vel/util/module_util.py | 16 +++ 53 files changed, 858 insertions(+), 466 deletions(-) create mode 100644 vel/command/list_command.py create mode 100644 vel/data/bucket_loader.py create mode 100644 vel/util/dataloader.py diff --git a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml index 1b2ab425..a53623fb 100644 --- a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml +++ b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml @@ -60,7 +60,6 @@ optimizer: lr: 0.01 weight_decay: 0.0 momentum: 0.9 - layer_groups: on commands: diff --git a/examples-configs/classification/mnist/mnist_cnn_01.yaml b/examples-configs/classification/mnist/mnist_cnn_01.yaml index aaa96cef..d11b5742 100644 --- a/examples-configs/classification/mnist/mnist_cnn_01.yaml +++ b/examples-configs/classification/mnist/mnist_cnn_01.yaml @@ -16,7 +16,7 @@ source: loader: name: vel.data.dataset_loader batch_size: 128 - num_workers: 4 +# num_workers: 4 transformations: - name: vel.data.transformation.image_to_tensor @@ -24,6 +24,7 @@ loader: optimizer: name: vel.optimizer.adadelta + max_grad_norm: 1.0 commands: diff --git a/examples-configs/latent/mnist/mnist_cnn_iwae.yaml b/examples-configs/latent/mnist/mnist_cnn_iwae.yaml index df3164fe..90cb5da7 100644 --- a/examples-configs/latent/mnist/mnist_cnn_iwae.yaml +++ b/examples-configs/latent/mnist/mnist_cnn_iwae.yaml @@ -8,7 +8,6 @@ model: img_channels: 1 channels: [64, 128, 256] representation_length: 50 - max_grad_norm: 1.0 analytical_kl_div: true k: 5 # It's hard to sample many samples for this slightly larger network diff --git a/examples-configs/latent/mnist/mnist_cnn_vae.yaml b/examples-configs/latent/mnist/mnist_cnn_vae.yaml index 76cc3f90..118ad430 100644 --- a/examples-configs/latent/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/latent/mnist/mnist_cnn_vae.yaml @@ -8,7 +8,6 @@ model: img_channels: 1 channels: [64, 128, 256] representation_length: 50 - max_grad_norm: 1.0 analytical_kl_div: true diff --git a/examples-configs/latent/mnist/mnist_fc_iwae.yaml b/examples-configs/latent/mnist/mnist_fc_iwae.yaml index e4ca4abb..215906dd 100644 --- a/examples-configs/latent/mnist/mnist_fc_iwae.yaml +++ b/examples-configs/latent/mnist/mnist_fc_iwae.yaml @@ -8,7 +8,6 @@ model: img_channels: 1 layers: [200, 200] representation_length: 50 - max_grad_norm: 1.0 analytical_kl_div: true k: 50 # Because it's such a small network we can try many importance samples diff --git a/examples-configs/latent/mnist/mnist_fc_vae.yaml b/examples-configs/latent/mnist/mnist_fc_vae.yaml index 96653a6e..1fa51447 100644 --- a/examples-configs/latent/mnist/mnist_fc_vae.yaml +++ b/examples-configs/latent/mnist/mnist_fc_vae.yaml @@ -8,7 +8,6 @@ model: img_channels: 1 layers: [200, 200] representation_length: 50 - max_grad_norm: 1.0 analytical_kl_div: true diff --git a/examples-configs/nlp/classification/imdb_sentiment_gru.yaml b/examples-configs/nlp/classification/imdb_sentiment_gru.yaml index 1c486db6..3e85dac8 100644 --- a/examples-configs/nlp/classification/imdb_sentiment_gru.yaml +++ b/examples-configs/nlp/classification/imdb_sentiment_gru.yaml @@ -2,16 +2,21 @@ name: 'imdb_sentiment_gru' source: - name: vel.sources.nlp.imdb + name: vel.data.source.nlp.imdb vectors: "glove.6B.100d" # precomputed 100-dimensional embeddings + + +loader: + name: vel.data.bucket_loader batch_size: 32 + model: - name: vel.models.rnn.multilayer_rnn_sequence_classification + name: vel.model.rnn.multilayer_rnn_sequence_classification input_block: - name: vel.modules.input.embedding + name: vel.module.input.embedding alphabet_size: 25_002 # Size of the alphabet output_dim: 100 # Embedding dimension @@ -31,7 +36,7 @@ model: optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: [1.0e-4, 1.0e-3, 1.0e-2, 1.0e-2] weight_decay: [0.0, 0.0001, 0.001, 0.001] epsilon: 1.0e-5 @@ -41,6 +46,6 @@ optimizer: commands: train: - name: vel.commands.train_command + name: vel.command.train_command max_grad_norm: 5.0 epochs: 10 diff --git a/examples-notebooks/autoencoders/mnist/mnist-autoencoder.ipynb b/examples-notebooks/autoencoders/mnist/mnist-autoencoder.ipynb index ad7a6b1e..ea642bf8 100644 --- a/examples-notebooks/autoencoders/mnist/mnist-autoencoder.ipynb +++ b/examples-notebooks/autoencoders/mnist/mnist-autoencoder.ipynb @@ -31,22 +31,14 @@ "metadata": {}, "outputs": [], "source": [ - "config = nb.load_config('examples-configs/autoencoders/mnist/mnist_cnn_autoencoder.yaml', run_number=4, device='cpu')" + "config = nb.load_config('examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml', run_number=1, device='cpu')" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:root:Setting up a new session...\n" - ] - } - ], + "outputs": [], "source": [ "model = config.load_trained_model()" ] @@ -63,22 +55,23 @@ "MnistCnnAutoencoder(\n", " (encoder): Sequential(\n", " (0): Conv2d(1, 8, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", - " (1): ReLU(inplace)\n", + " (1): ReLU(inplace=True)\n", " (2): Conv2d(8, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", - " (3): ReLU(inplace)\n", + " (3): ReLU(inplace=True)\n", " (4): Conv2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", " (5): Flatten()\n", " (6): Linear(in_features=784, out_features=16, bias=True)\n", " )\n", " (decoder): Sequential(\n", " (0): Linear(in_features=16, out_features=784, bias=True)\n", - " (1): ReLU(inplace)\n", - " (2): Reshape()\n", + " (1): ReLU(inplace=True)\n", + " (2): Reshape(sizes=(16, 7, 7), batch_dims=1)\n", " (3): ConvTranspose2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", - " (4): ReLU(inplace)\n", + " (4): ReLU(inplace=True)\n", " (5): ConvTranspose2d(16, 8, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", - " (6): ReLU(inplace)\n", + " (6): ReLU(inplace=True)\n", " (7): ConvTranspose2d(8, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (8): Sigmoid()\n", " )\n", ")\n", "----------------------------------------------------------------------------------------------------\n", @@ -97,8 +90,10 @@ "metadata": {}, "outputs": [], "source": [ - "data_source = config.provide('source')\n", - "train_dataset = data_source.train_dataset" + "data_loader = config.provide('loader')\n", + "data_source = data_loader.transformed_source\n", + "train_dataset = data_source.train\n", + "validation_dataset = data_source.validation" ] }, { @@ -108,7 +103,7 @@ "outputs": [], "source": [ "def get_sample(idx):\n", - " return train_dataset[idx][0]" + " return train_dataset[idx]['x'].to(config.device)" ] }, { @@ -118,7 +113,7 @@ "outputs": [], "source": [ "def show_image(axis, sample):\n", - " axis.imshow(train_dataset.denormalize(sample)[:, :, 0], cmap='gray')" + " axis.imshow(train_dataset.denormalize_item(sample, 'x'), cmap='gray')" ] }, { @@ -128,7 +123,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAACzCAYAAAD48u9xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAHTJJREFUeJzt3XuQVdX55vHnRYOXOKiIQUqjmARNaQrbiEocSkhA4xgTUROVUhFjiRVvJKUUxhCHjMEQRafEaOIlgBdGtIJENOOoI15iFAokJj9FETXRH9jBK3LRyChr/uCkgr6r6d3nutbu76eKovth77PXPv3QfVbv3qsthCAAAAAAQDp6tHoAAAAAAIBPYqIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAImpaaJmZkeZ2TIze8nMLq7XoIBGobPIEb1FbugsckNnkSILIVS3o9lWkl6UdISkFZIWSRoVQli6hX2qOxhQEUKwavels2iFWjordb23dBZ18FYIYddqd6azaIGmdrayD71FTYq8Pqjlitohkl4KIbwSQtggabakY2t4PKDR6CxyRG/RbK/WuD+dRbPRWZRSLRO13SX952bvr6hkQKroLHJEb5EbOovc0FkkaetGH8DMxkoa2+jjAPVCZ5EbOovc0FnkiN6i2WqZqK2U9PnN3t+jkn1CCOFGSTdK/DwvWo7OIked9pbOIjF0Frnh9QGSVMuPPi6SNMDM9jaznpJOljSvPsMCGoLOIkf0Frmhs8gNnUWSqr6iFkL4yMzOk/SApK0kTQ8hPFe3kQF1RmeRI3qL3NBZ5IbOIlVVL89f1cG4TIwa1brUeVfRWdSKziJDT4cQBjXrYHQWddDUzkr0FrVr9PL8AAAAAIAGYKIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJ2brVAwBQPgcddJDLzjvvPJeNHj3aZbfeeqvLrr322uhxlixZUsXoAAAA0scVNQAAAABIDBM1AAAAAEgMEzUAAAAASExN96iZ2d8lrZX0saSPQgiD6jEooJHoLXJDZ5EbOovc0FmkyEII1e+8qdSDQghvFdy++oNlbKuttnLZjjvuWNNjxhZm2H777V227777uuzcc8+NPubUqVNdNmrUKJf985//dNmUKVNc9rOf/Sx6nFqEEKzWx+hKb7trZ4tqa2uL5vPnz3dZr169qj7Oe++9F8132WWXqh+zWegsNjd8+HCXzZo1y2VDhw512bJlyxoypoina32RSmfTNnHiRJd19DW7Rw//w1fDhg1z2WOPPVbzuGrQ1M5Wtqe3qEmR1wf86CMAAAAAJKbWiVqQ9KCZPW1mY+sxIKAJ6C1yQ2eRGzqL3NBZJKfW36M2JISw0sw+J+khM3shhPD45htUyk7hkZIt9pbOIkF0Frmhs8gNr2mRnJquqIUQVlb+fkPSXEmHRLa5MYQwiJsykYrOektnkRo6i9zQWeSG17RIUdVX1Mzss5J6hBDWVt4+UtL/qNvIWmTPPfeM5j179nTZYYcd5rIhQ4a4bKeddnLZCSecUMXoum7FihUumzZtWnTb4447zmVr16512V/+8heXtfgm4sLK2ttmOOQQ9zVLc+bMiW4bWywntnBRrF8bNmxwWUeLhgwePNhlS5YsKfSYuUihs4cffrjLYh+TuXPnNmM4WTv44INdtmjRohaMpHFS6Cz+bcyYMS6bMGGCyzZu3Fj4MWtZiC5FdBapquVHH/tKmmtm/3qc/xVC+D91GRXQOPQWuaGzyA2dRW7oLJJU9UQthPCKpAPqOBag4egtckNnkRs6i9zQWaSK5fkBAAAAIDFM1AAAAAAgMbUuz5+1trY2l82fPz+6bWxxhNTEbgSeOHGiy9atWxfdf9asWS5rb2932bvvvuuyZcuWFRkiErT99tu77Ktf/arLbr/9dpf169evpmMvX77cZVdccYXLZs+eHd3/T3/6k8tinf/FL35RxejwL8OGDXPZgAEDXMZiIv/Wo0f8+6B77723y/baay+XVe6VAWoW69e2227bgpGgDA499FCXnXrqqS4bOnRodP/999+/0HEuuugil73++usuiy3iJ8VfsyxcuLDQsVPCFTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASEy3Xkzktddec9nbb78d3bYZi4l0dJPj6tWrXfb1r3/dZRs2bHDZbbfdVvvAUGo33HCDy0aNGtWUY8cWLdlhhx1c9thjj0X3jy1yMXDgwJrHhU8aPXq0y5566qkWjCQfHS20c9ZZZ7ksdtP7Cy+8UPcxofxGjBjhsvPPP7/Qvh117phjjnHZqlWrujYwZOmkk05y2TXXXOOyPn36uKyjBZEeffRRl+26664uu/LKKwuMsOPjxB7z5JNPLvSYKeGKGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSmW6/6+M4777hs/Pjx0W1jqx79+c9/dtm0adMKHfuZZ55x2RFHHBHddv369S7bf//9XTZu3LhCx0b3ddBBB7nsW9/6lss6WkXp0zpajfHee+912dSpU132+uuvuyz2/+rdd9+NHucb3/iGy4qOHcX16MH39Lrq5ptvLrzt8uXLGzgSlNWQIUNcNmPGDJcVXbW6o1X2Xn311a4NDMnbemv/8n/QoEEuu+mmm1y2/fbbu+zxxx932WWXXRY99hNPPOGybbbZxmV33XWXy4488sjoY8YsXry48LYp46svAAAAACSGiRoAAAAAJIaJGgAAAAAkptOJmplNN7M3zOzZzbLeZvaQmS2v/L1zY4cJdA29RW7oLHJDZ5EbOovcWAhhyxuYHS5pnaRbQwhfqWRXSHonhDDFzC6WtHMIYUKnBzPb8sES1qtXL5etXbvWZTfccIPLzjzzTJedeuqpLrvjjjuqHF33EUIotFJEvXqbc2fb2tpcNn/+fJfFuh1z//33u2zUqFHRbYcOHeqygQMHuiy24MKbb75ZaDyS9PHHH7vs/fffLzSeJUuWFD5OLXLrbOzj9NRTT7ns7rvvdtlpp51Wy6FL5cknn4zmgwcPdtlhhx3msgULFtR9TF3wdAjBryzwKal0truKLfTw/e9/v9C+jz76qMuGDx9e65BaqamdreyXbW/HjBnjsqILID300EMuO+mkk1y2Zs2awuOJvSaeOXNmoX1XrlwZzWOLo3Tl9UUzFHl90OkVtRDC45I+vTzisZJuqbx9i6SRXR4d0ED0Frmhs8gNnUVu6CxyU+09an1DCO2Vt/8hqW+dxgM0Er1FbugsckNnkRs6i2TV/HvUQghhS5d/zWyspLG1Hgeopy31ls4iRXQWuaGzyA2vaZGaaq+orTKzfpJU+fuNjjYMIdwYQhhU5GeHgQYr1Fs6i4TQWeSGziI3vKZFsqq9ojZP0umSplT+vqduI0pU0Zsi33vvvULbnXXWWS678847o9tu3Lix0GOiU6Xs7T777BPNx48f77Idd9zRZW+99ZbL2tvbXXbLLbe4bN26ddFj/+EPfyiUNcJ2223nsgsvvNBlp5xySjOGU6umd/boo492Wew5xb/17et/UmrvvfcuvH9HN8NnqpSfZ1upT58+0Ty2cEjs9cLq1atd9vOf/7z2gZVHaTt72WWXRfNLLrnEZbHFBa+//nqXTZw40WVdWTgk5ic/+UnV+15wwQXRPLWFQ6pVZHn+OyQ9JWlfM1thZmdqU5mPMLPlkkZU3geSQW+RGzqL3NBZ5IbOIjedXlELIcTX35ayXscV5UZvkRs6i9zQWeSGziI31d6jBgAAAABoECZqAAAAAJCYmpfnxydNmjTJZQcddJDLhg4d6rIRI0ZEH/PBBx+seVwoh2222cZlU6dOjW4bWxRi7dq1Lhs9erTLFi9e7LKcF5TYc889Wz2EbOy7776FtnvuuecaPJJ8xP4PxhYYkaQXX3zRZbH/l+ie+vfv77I5c+bU9JjXXnutyx555JGaHhPpufTSS10WWzREkjZs2OCyBx54wGUTJkxw2QcffFBoPNtuu200P/LII10W+xptZi6LLYJzzz2lWfsliitqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYFhOps/Xr17vsrLPOctmSJUtcdtNNN0UfM3bTb2yxh+uuu85lsd80j3wdeOCBLostGtKRY4891mWPPfZYTWNC97Ro0aJWD6GuevXq5bKjjjrKZaeeeqrLYjfHd+Syyy5z2erVqwvvj3KLdW7gwIGF93/44Yddds0119Q0JqRnp512ctk555zjso5eA8YWDhk5cmTV4/nSl77kslmzZkW3jS2wF/O73/3OZVdccUXXBlYCXFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEMFEDAAAAgMSwmEgTvPzyyy4bM2aMy2bMmBHd/7TTTiuUffazn3XZrbfe6rL29vbocZC+q6++2mVmFt02tkhI2RYO6dHDf69p48aNLRhJ99O7d++6P+YBBxzgsli/R4wY4bI99tjDZT179nTZKaecEj12rEsffPCByxYuXOiyDz/80GVbbx3/8vr0009Hc3Q/scUbpkyZUnj/J554wmWnn366y957772uDQzJi31u69OnT+H9L7jgApd97nOfc9kZZ5zhsu985zsu+8pXvuKyHXbYIXrs2AInsez22293WWzBvrLjihoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJKbTiZqZTTezN8zs2c2ySWa20syeqfw5urHDBIqjs8gRvUVu6CxyQ2eRmyKrPs6U9CtJn14+8H+GEKbWfUTdxNy5c122fPny6Laxlf6GDx/usssvv9xle+21l8smT54cPc7KlSujeYZmqgSdPeaYY1zW1tbmsthqSZI0b968uo8pNbEVHmPPxzPPPNOM4dRqphLobWylw9hz+pvf/MZll1xySU3HHjhwoMtiqz5+9NFHLnv//fddtnTpUpdNnz49euzFixe7LLZK6qpVq1y2YsUKl2233XbR47zwwgvRPFMzlUBnc9C/f3+XzZkzp6bHfOWVV1wW6yc+YaZK0NkNGza47M0333TZrrvuGt3/b3/7m8s6ei1RxOuvv+6yNWvWRLft16+fy9566y2X3XvvvVWPp0w6vaIWQnhc0jtNGAtQF3QWOaK3yA2dRW7oLHJTyz1q55nZXyuXkXfuaCMzG2tmi83Mf7sSaC46ixx12ls6i8TQWeSG1wdIUrUTtV9L+qKkNkntkq7qaMMQwo0hhEEhhEFVHguoBzqLHBXqLZ1FQugscsPrAySrqolaCGFVCOHjEMJGSTdJOqS+wwLqi84iR/QWuaGzyA2dRcqKLCbimFm/EEJ75d3jJD27pe1RzLPPxp/GE0880WXf/va3XTZjxgyXnX322S4bMGBA9DhHHHFEZ0PMVo6djS1G0LNnT5e98cYb0f3vvPPOuo+pGbbZZhuXTZo0qfD+8+fPd9mPf/zjWobUMq3o7TnnnOOyV1991WWHHXZY3Y/92muvuez3v/+9y55//nmXLViwoO7jiRk7dqzLYjfsxxZ66A5y/FzbDBMmTHBZbDGkrpgyZUpN+2OTHDu7evVql40cOdJl9913X3T/3r17u+zll1922T333OOymTNnuuydd/xtf7Nnz44eO7aYSEfbosBEzczukDRMUh8zWyHpv0saZmZtkoKkv0vyswGgRegsckRvkRs6i9zQWeSm04laCGFUJP5tA8YC1AWdRY7oLXJDZ5EbOovc1LLqIwAAAACgAZioAQAAAEBiqlpMBM0Vu2n0tttuc9nNN9/ssq239h/iww8/PHqcYcOGuezRRx/tfIBoqQ8//DCat7e3R/OUxBYOmThxosvGjx8f3X/FihUuu+oqv7LyunXrqhgd/uWXv/xlq4eQjOHDhxfabs6cOQ0eCVLV1tbmsiOPPLLqx4st6CBJy5Ytq/oxUT4LFy50WWyho0aIva4cOnRodNvYIjrddfGlIriiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYTGRhAwcODCaf/e733XZwQcf7LLYwiExS5cujeaPP/54of2Rlnnz5rV6CIXEbrCPLRJy0kknuayjm+lPOOGE2gcGNMDcuXNbPQS0yIMPPuiynXfeudC+CxYscNmYMWNqHRLQUNttt53LYouGSFIIwWWzZ8+u+5jKgitqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYFhNpgn333ddl5513nsuOP/746P677bZb1cf++OOPXdbe3h7dtqMbP9EaZlYoGzlyZHT/cePG1X1MRf3oRz9y2U9/+lOX7bjjji6bNWuWy0aPHl2fgQFAg+2yyy4uK/r19frrr3fZunXrah4T0EgPPPBAq4dQWlxRAwAAAIDEMFEDAAAAgMQwUQMAAACAxHQ6UTOzz5vZI2a21MyeM7Nxlby3mT1kZssrfxf7bY5Ag9FZ5IbOIkf0Frmhs8hNkStqH0m6MISwn6TBks41s/0kXSzp4RDCAEkPV94HUkBnkRs6ixzRW+SGziIrna76GEJol9ReeXutmT0vaXdJx0oaVtnsFkmPSprQkFEmKrYa46hRo1wWW+Gxf//+dR/P4sWLXTZ58mSXzZs3r+7HTklZOhtCKJR1tCrotGnTXDZ9+nSXvf322y4bPHiwy0477TSXHXDAAdFj77HHHi577bXXXBZbKSq26lnZlaWz3VVsNdZ99tknuu2CBQsaPZymobfSjBkzXNajR/V3lTz55JO1DAedoLON8c1vfrPVQyitLn02MbP+kg6UtFBS30rhJekfkvrWdWRAHdBZ5IbOIkf0Frmhs8hB4d+jZmY7SJoj6YchhDWbfwcxhBDMzH+rf9N+YyWNrXWgQFfRWeSGziJH1fSWzqKV+FyLXBS6omZmn9GmQs8KIdxdiVeZWb/Kv/eT9EZs3xDCjSGEQSGEQfUYMFAEnUVu6CxyVG1v6Sxahc+1yEmRVR9N0m8lPR9CuHqzf5on6fTK26dLuqf+wwO6js4iN3QWOaK3yA2dRW6K/Ojjf5V0mqT/MLNnKtklkqZIusvMzpT0qqQTGzPE5urbN/5jyfvtt5/LfvWrX7nsy1/+ct3HtHDhQpddeeWVLrvnHv95ZePGjXUfTwa6VWe32mqraH7OOee47IQTTnDZmjVrXDZgwICaxhS7If6RRx5x2aWXXlrTcUqkW3W2bGKL/NSyoERGuk1v29raovmIESNcFvu6u2HDBpddd911Llu1alUVo0MXdJvONtMXvvCFVg+htIqs+viEJL+k1SbD6zscoHZ0Frmhs8gRvUVu6Cxy0y2+5QcAAAAAOWGiBgAAAACJYaIGAAAAAIkp/HvUcte7d2+X3XDDDS7r6Ibhet8oGVts4aqrropu+8ADD7jsgw8+qOt4kJ6nnnrKZYsWLXLZwQcfXPgxd9ttN5d1tIDOp7399tsumz17dnTbcePGFR4TUEZf+9rXovnMmTObOxDUxU477RTNY59TY1auXOmyiy66qKYxAan44x//6LKOFlTqpovcVY0ragAAAACQGCZqAAAAAJAYJmoAAAAAkBgmagAAAACQmOwXEzn00ENdNn78eJcdcsghLtt9993rPp7333/fZdOmTXPZ5Zdf7rL169fXfTzI14oVK1x2/PHHu+zss8+O7j9x4sSqj33NNde47Ne//rXLXnrppaqPAZSFWUe/PxcAyu/ZZ5912fLly6Pbxhbn++IXv+iyN998s/aBlQBX1AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMdkvJnLccccVyopaunRpNL/vvvtc9tFHH7nsqquuctnq1aurHg+wufb2dpdNmjQpum1HOYDq3X///S773ve+14KRoJleeOGFaP7kk0+6bMiQIY0eDpC82KJ5knTzzTe7bPLkyS47//zzXdbRa/Qy44oaAAAAACSGiRoAAAAAJIaJGgAAAAAkptOJmpl93sweMbOlZvacmY2r5JPMbKWZPVP5c3Tjhwt0js4iN3QWuaGzyBG9RW4shLDlDcz6SeoXQlhiZv9F0tOSRko6UdK6EMLUwgcz2/LBgE6EEKyzbegsUkJnkaGnQwiDtrQBnUViOu2sRG+bqVevXtH8rrvuctmIESNcdvfdd7vsjDPOcNn69eurGF0airw+6HTVxxBCu6T2yttrzex5SbvXPjygMegsckNnkRs6ixzRW+SmS/eomVl/SQdKWliJzjOzv5rZdDPbuc5jA2pGZ5EbOovc0FnkiN4iB4Unama2g6Q5kn4YQlgj6deSviipTZu+O+F/gdim/caa2WIzW1yH8QKF0Vnkhs4iN3QWOaK3yEWhiZqZfUabCj0rhHC3JIUQVoUQPg4hbJR0k6RDYvuGEG4MIQwq8rPDQL3QWeSGziI3dBY5orfISZFVH03SbyU9H0K4erO832abHSfp2foPD+g6Oovc0Fnkhs4iR/QWuSmy6uMQSX+U9B+SNlbiSySN0qZLxEHS3yWdXblJc0uPxQo5qEnBFfToLJJBZ5GhIqs+0lmkpOiqj/S2xWKrQU6ePNllP/jBD1w2cOBAly1durQ+A2uBeq36+ISk2AP972oGBTQanUVu6CxyQ2eRI3qL3HRp1UcAAAAAQOMxUQMAAACAxDBRAwAAAIDEdLqYSF0Pxo2XqFGRGy/ric6iVnQWGSq0MEO90FnUQVM7K9Fb1K7I6wOuqAEAAABAYpioAQAAAEBimKgBAAAAQGKYqAEAAABAYjr9hdd19pakVytv96m8XwZlOhcp3fPZqwXHpLN5SPV86Gz9lOlcpLTPp9m9LWtnpXKdT8rn0srPtSk/L9Uo0/mkfC6FOtvUVR8/cWCzxc1eoadRynQuUvnOp17K9LyU6Vyk8p1PvZTpeSnTuUjlO596KdvzUqbzKdO51FPZnpcynU8ZzoUffQQAAACAxDBRAwAAAIDEtHKidmMLj11vZToXqXznUy9lel7KdC5S+c6nXsr0vJTpXKTynU+9lO15KdP5lOlc6qlsz0uZzif7c2nZPWoAAAAAgDh+9BEAAAAAEtP0iZqZHWVmy8zsJTO7uNnHr5WZTTezN8zs2c2y3mb2kJktr/y9cyvHWJSZfd7MHjGzpWb2nJmNq+RZnk+j0Nl00Nli6Gw66GxxOfe2TJ2V6G1ROXdWKldvy9rZpk7UzGwrSddJ+m+S9pM0ysz2a+YY6mCmpKM+lV0s6eEQwgBJD1fez8FHki4MIewnabCkcysfj1zPp+7obHLobCfobHLobAEl6O1MlaezEr3tVAk6K5Wrt6XsbLOvqB0i6aUQwishhA2SZks6tsljqEkI4XFJ73wqPlbSLZW3b5E0sqmDqlIIoT2EsKTy9lpJz0vaXZmeT4PQ2YTQ2ULobELobGFZ97ZMnZXobUFZd1YqV2/L2tlmT9R2l/Sfm72/opLlrm8Iob3y9j8k9W3lYKphZv0lHShpoUpwPnVEZxNFZztEZxNFZ7eojL0txceY3naojJ2VSvAxLlNnWUykzsKmZTSzWkrTzHaQNEfSD0MIazb/txzPB12T48eYznZvOX6M6Wz3luvHmN52bzl+jMvW2WZP1FZK+vxm7+9RyXK3ysz6SVLl7zdaPJ7CzOwz2lToWSGEuytxtufTAHQ2MXS2U3Q2MXS2kDL2NuuPMb3tVBk7K2X8MS5jZ5s9UVskaYCZ7W1mPSWdLGlek8fQCPMknV55+3RJ97RwLIWZmUn6raTnQwhXb/ZPWZ5Pg9DZhNDZQuhsQuhsYWXsbbYfY3pbSBk7K2X6MS5tZ0MITf0j6WhJL0p6WdJPmn38Ooz/Dkntkv6fNv088pmSdtGmlWSWS/q/knq3epwFz2WINl0C/qukZyp/js71fBr4PNHZRP7Q2cLPE51N5A+d7dJzlW1vy9TZyvnQ22LPU7adrYy/NL0ta2etcnIAAAAAgESwmAgAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAk5v8DVMTDbjI6QLUAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2cAAACxCAYAAABAxMXKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAdJUlEQVR4nO3de5AV1dnv8ecBg2I4gIhByhvGIClN4RgQCYcSEpB4jIkoiUoJiLHACl5ISimMIR5yCIYo+BYYNSoBvHBEK0BA8/qiR24xAsUlJC9yETWBF5iAoNyNHGCdP9g5QZ+1mZ7dvfdeq+f7qaKY+dG9e/WeHzN7TU+vUeecAAAAAACqq1G1BwAAAAAAYHIGAAAAAEFgcgYAAAAAAWByBgAAAAABYHIGAAAAAAFgcgYAAAAAAUg1OVPVq1R1g6q+q6r3ZTUooFzoLGJEbxEbOovY0FmEQkv9PWeq2lhE3hGRK0Vki4gsF5H+zrm1J9iHX6qGVJxzWuq+dBbVkKazIvXvLZ1FBnY6584odWc6iyqoaGcL+9BbpFLs9UGaK2ddRORd59z7zrlDIjJDRK5N8XhAudFZxIjeotI2pdyfzqLS6CxyI83k7CwR+a/j3t9SyIBQ0VnEiN4iNnQWsaGzCMZJ5T6Aqg4VkaHlPg6QFTqL2NBZxIbOIkb0FpWQZnK2VUTOOe79swvZpzjnnhKRp0T4+VxUHZ1FjOrsLZ1FYOgsYsPrAwQjzY81LheR9qp6vqo2EZGbRGRuNsMCyoLOIkb0FrGhs4gNnUUwSr5y5pw7rKp3isg8EWksIlOcc29nNjIgY3QWMaK3iA2dRWzoLEJS8lL6JR2MS8BIKe2y5PVFZ5EWnUWEVjrnOlfqYHQWGahoZ0XoLdIrx1L6AAAAAICMMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAnFTtAQDIn06dOpnszjvvNNmgQYNM9uyzz5rs0Ucf9R5n1apVJYwOAAAgTFw5AwAAAIAAMDkDAAAAgAAwOQMAAACAAKS650xV/yYi+0TkiIgcds51zmJQQDnRW8SGziI2dBaxobMIhTrnSt/5WJE7O+d2Jty+9INFrHHjxiZr0aJFqsf0La5w6qmnmqxDhw4mu+OOO7yPOX78eJP179/fZP/4xz9MNm7cOJP97Gc/8x4nDeecpn2M+vS2oXY2qZqaGm8+f/58kzVv3rzk4+zZs8ebn3766SU/ZqXQWRyvV69eJps+fbrJevToYbINGzaUZUweK9O+MKWzYRs1apTJin3NbtTI/pBVz549TbZo0aLU40qhop0tbE9vkUqx1wf8WCMAAAAABCDt5MyJyGuqulJVh2YxIKAC6C1iQ2cRGzqL2NBZBCHt7znr7pzbqqpfEJHXVXW9c27x8RsUCk7JEZIT9pbOIkB0FrGhs4gNr2kRhFRXzpxzWwt/7xCR2SLSxbPNU865ztxYiVDU1Vs6i9DQWcSGziI2vKZFKEq+cqaqnxeRRs65fYW3+4jI/8psZFVy7rnnevMmTZqYrFu3bibr3r27yVq2bGmyfv36lTC6+tuyZYvJJk2a5N32uuuuM9m+fftM9uc//9lkVb4ROLG89rYSunQxX6dk5syZ3m19C974Fh/y9evQoUMmK7bwR9euXU22atWqRI8ZixA6e8UVV5jM9zGZPXt2JYYTtcsuu8xky5cvr8JIyieEzuJfBg8ebLKRI0ea7OjRo4kfM81iciGiswhJmh9rbCMis1X1n4/zv51z/5HJqIDyobeIDZ1FbOgsYkNnEYySJ2fOufdF5JIMxwKUHb1FbOgsYkNnERs6i5CwlD4AAAAABIDJGQAAAAAEIO1S+lGrqakx2fz5873b+hY4CI3vZt5Ro0aZbP/+/d79p0+fbrLa2lqTffTRRybbsGFDkiEiQKeeeqrJvvrVr5rs+eefN1nbtm1THXvjxo0me+ihh0w2Y8YM7/5//OMfTebr/C9+8YsSRod/6tmzp8nat29vMhYE+ZdGjfzf+zz//PNNdt5555mscO8LkJqvX6ecckoVRoI8uPzyy002YMAAk/Xo0cO7/8UXX5zoOPfee6/Jtm3bZjLfQnwi/tcsy5YtS3TsauPKGQAAAAAEgMkZAAAAAASAyRkAAAAABIDJGQAAAAAEoEEvCLJ582aT7dq1y7ttJRYEKXaj4u7du0329a9/3WSHDh0y2XPPPZd+YMi1J5980mT9+/evyLF9C480a9bMZIsWLfLu71uoomPHjqnHhU8bNGiQyZYsWVKFkcSj2GI5Q4YMMZnvxvX169dnPibkX+/evU121113Jdq3WOeuueYak23fvr1+A0OUbrzxRpNNnDjRZK1btzZZsUWNFi5caLIzzjjDZA8//HCCERY/ju8xb7rppkSPWW1cOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAANOjVGj/88EOTjRgxwrutb7WiP/3pTyabNGlSomOvXr3aZFdeeaV32wMHDpjs4osvNtnw4cMTHRsNV6dOnUz2rW99y2TFVj/6rGKrKL788ssmGz9+vMm2bdtmMt//q48++sh7nG984xsmSzp2JNeoEd/Hq6/Jkycn3nbjxo1lHAnyqnv37iabOnWqyZKuNl1sdbxNmzbVb2AI3kkn2Zf/nTt3NtnTTz9tslNPPdVkixcvNtmYMWO8x37zzTdNdvLJJ5vspZdeMlmfPn28j+mzYsWKxNuGhq+4AAAAABAAJmcAAAAAEAAmZwAAAAAQgDonZ6o6RVV3qOqa47JWqvq6qm4s/H1aeYcJ1A+9RWzoLGJDZxEbOosYqHPuxBuoXiEi+0XkWefcVwrZQyLyoXNunKreJyKnOedG1nkw1RMfLGDNmzc32b59+0z25JNPmuy2224z2YABA0z2wgsvlDi6hsM5l2i1h6x6G3Nna2pqTDZ//nyT+brt8+qrr5qsf//+3m179Ohhso4dO5rMt2jCBx98kGg8IiJHjhwx2cGDBxONZ9WqVYmPk0ZsnfV9nJYsWWKyWbNmmWzgwIFpDp0rb731ljfv2rWrybp162aypUuXZj6meljpnLOrA3xGKJ1tqHyLNXz/+99PtO/ChQtN1qtXr7RDqqaKdrawX7S9HTx4sMmSLmL0+uuvm+zGG2802d69exOPx/eaeNq0aYn23bp1qzf3LXBSn9cXlVDs9UGdV86cc4tF5LPLGl4rIs8U3n5GRPqmGh2QMXqL2NBZxIbOIjZ0FjEo9Z6zNs652sLbfxeRNhmNBygneovY0FnEhs4iNnQWQUn9e86cc+5El3ZVdaiIDE17HCBLJ+otnUWI6CxiQ2cRG17TIgSlXjnbrqptRUQKf+8otqFz7innXOckPwsMlFmi3tJZBITOIjZ0FrHhNS2CUuqVs7kicouIjCv8PSezEQUq6Y2Ne/bsSbTdkCFDTPbiiy96tz169Giix0SdctnbCy+80JuPGDHCZC1atDDZzp07TVZbW2uyZ555xmT79+/3Hvv3v/99oqwcmjZtarJ77rnHZDfffHMlhpNWxTt79dVXm8z3nOJf2rSxPwV1/vnnJ96/2A3tkcrl59lqat26tTf3Lf7he72we/duk/385z9PP7D8yG1nx4wZ483vv/9+k/kWCHz88cdNNmrUKJPVZ/EPn5/85Ccl73v33Xd789AW/6iPJEvpvyAiS0Skg6puUdXb5FiBr1TVjSLSu/A+EAx6i9jQWcSGziI2dBYxqPPKmXPOv1a2SNRrriLf6C1iQ2cRGzqL2NBZxKDUe84AAAAAABlicgYAAAAAAUi9lD4+bfTo0Sbr1KmTyXr06GGy3r17ex/ztddeSz0u5MPJJ59ssvHjx3u39S3ssG/fPpMNGjTIZCtWrDBZzItCnHvuudUeQjQ6dOiQaLu33367zCOJh+//oG+REBGRd955x2S+/5domNq1a2eymTNnpnrMRx991GQLFixI9ZgIzwMPPGAy38IfIiKHDh0y2bx580w2cuRIk3388ceJxnPKKad48z59+pjM9zVaVU3mW8hmzpzcrN/y/3HlDAAAAAACwOQMAAAAAALA5AwAAAAAAsDkDAAAAAACwIIgGTtw4IDJhgwZYrJVq1aZ7Omnn/Y+pu/GXd+CDY899pjJfL/xHfG69NJLTeZb+KOYa6+91mSLFi1KNSY0TMuXL6/2EDLVvHlzk1111VUmGzBggMl8N7gXM2bMGJPt3r078f7IN1/nOnbsmHj/N954w2QTJ05MNSaEp2XLliYbNmyYyYq9BvQt/tG3b9+Sx/OlL33JZNOnT/du61skz+e3v/2tyR566KH6DSxSXDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgACwIEgFvPfeeyYbPHiwyaZOnerdf+DAgYmyz3/+8yZ79tlnTVZbW+s9DsL3yCOPmExVvdv6FvrI2+IfjRrZ7y8dPXq0CiNpeFq1apX5Y15yySUm8/W7d+/eJjv77LNN1qRJE5PdfPPN3mP7uvTxxx+bbNmyZSb75JNPTHbSSf4vrytXrvTmaHh8CzCMGzcu8f5vvvmmyW655RaT7dmzp34DQ/B8n9tat26deP+7777bZF/4whdMduutt5rsO9/5jsm+8pWvmKxZs2beY/sWKfFlzz//vMl8i+7lEVfOAAAAACAATM4AAAAAIABMzgAAAAAgAEzOAAAAACAAdU7OVHWKqu5Q1TXHZaNVdauqri78ubq8wwSSo7OIEb1FbOgsYkNnEYMkqzVOE5Ffichnl/37N+fc+MxH1EDMnj3bZBs3bvRu61uhr1evXiZ78MEHTXbeeeeZbOzYsd7jbN261ZtHaJrkoLPXXHONyWpqakzmW+VIRGTu3LmZjyk0vpUZfc/H6tWrKzGctKZJAL31rVDoe05//etfm+z+++9PdeyOHTuazLda4+HDh0128OBBk61du9ZkU6ZM8R57xYoVJvOtbrp9+3aTbdmyxWRNmzb1Hmf9+vXePFLTJIDOxqBdu3YmmzlzZqrHfP/9903m6yc+ZZrkoLOHDh0y2QcffGCyM844w7v/X//6V5MVey2RxLZt20y2d+9e77Zt27Y12c6dO0328ssvlzye2NV55cw5t1hEPqzAWIBM0FnEiN4iNnQWsaGziEGae87uVNW/FC4Rn1ZsI1UdqqorVNV+WxKoLDqLGNXZWzqLwNBZxIbXBwhGqZOzJ0TkAhGpEZFaEZlQbEPn3FPOuc7Ouc4lHgvIAp1FjBL1ls4iIHQWseH1AYJS0uTMObfdOXfEOXdURJ4WkS7ZDgvIFp1FjOgtYkNnERs6i9AkWRDEUNW2zrnawrvXiciaE22PZNas8T+NN9xwg8m+/e1vm2zq1Kkmu/32203Wvn1773GuvPLKuoYYrRg761tQoEmTJibbsWOHd/8XX3wx8zFVwsknn2yy0aNHJ95//vz5Jvvxj3+cZkhVU43eDhs2zGSbNm0yWbdu3TI/9ubNm032u9/9zmTr1q0z2dKlSzMfj8/QoUNN5rvp3rdYQ0MQ4+faShg5cqTJfAsa1ce4ceNS7Y9jYuzs7t27Tda3b1+TvfLKK979W7VqZbL33nvPZHPmzDHZtGnTTPbhh/Y2vhkzZniP7VsQpNi2DVWdkzNVfUFEeopIa1XdIiL/U0R6qmqNiDgR+ZuI2BkAUCV0FjGit4gNnUVs6CxiUOfkzDnX3xP/pgxjATJBZxEjeovY0FnEhs4iBmlWawQAAAAAZITJGQAAAAAEoKQFQVBZvhs/n3vuOZNNnjzZZCedZD/EV1xxhfc4PXv2NNnChQvrHiCq6pNPPvHmtbW13jwkvsU/Ro0aZbIRI0Z499+yZYvJJkywqyDv37+/hNHhn375y19WewjB6NWrV6LtZs6cWeaRIFQ1NTUm69OnT8mP51uUQURkw4YNJT8m8mfZsmUm8y1WVA6+15U9evTwbutbCKehLqBUDFfOAAAAACAATM4AAAAAIABMzgAAAAAgAEzOAAAAACAALAgSkI4dO3rz7373uya77LLLTOZb/MNn7dq13nzx4sWJ9kdY5s6dW+0hJOK7Sd630MeNN95osmI3xPfr1y/9wIAymD17drWHgCp57bXXTHbaaacl2nfp0qUmGzx4cNohAWXVtGlTk/kW/hARcc6ZbMaMGZmPKWZcOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAALAgSAV06NDBZHfeeafJrr/+eu/+Z555ZsnHPnLkiMlqa2u92xa7eRPVoaqJsr59+3r3Hz58eOZjSupHP/qRyX7605+arEWLFiabPn26yQYNGpTNwACgzE4//XSTJf36+vjjj5ts//79qccElNO8efOqPYRc4coZAAAAAASAyRkAAAAABIDJGQAAAAAEoM7Jmaqeo6oLVHWtqr6tqsMLeStVfV1VNxb+TvYbFoEyo7OIDZ1FjOgtYkNnEYMkV84Oi8g9zrmLRKSriNyhqheJyH0i8oZzrr2IvFF4HwgBnUVs6CxiRG8RGzqL4NW5WqNzrlZEagtv71PVdSJylohcKyI9C5s9IyILRWRkWUYZKN8qiv379zeZb2XGdu3aZT6eFStWmGzs2LEmmzt3bubHDkleOuucS5QVW81z0qRJJpsyZYrJdu3aZbKuXbuabODAgSa75JJLvMc+++yzTbZ582aT+VZ48q1Wlnd56WxD5VtF9cILL/Ruu3Tp0nIPp2LorcjUqVNN1qhR6XeMvPXWW2mGgzrQ2fL45je/We0h5Eq9PoOoajsRuVRElolIm0LJRUT+LiJtMh0ZkAE6i9jQWcSI3iI2dBahSvx7zlS1mYjMFJEfOuf2Hv+dQuecU1X7Lf1j+w0VkaFpBwrUF51FbOgsYlRKb+ksqonPtQhZoitnqvo5OVbi6c65WYV4u6q2Lfx7WxHZ4dvXOfeUc66zc65zFgMGkqCziA2dRYxK7S2dRbXwuRahS7Jao4rIb0RknXPukeP+aa6I3FJ4+xYRmZP98ID6o7OIDZ1FjOgtYkNnEYMkP9b430VkoIj8p6quLmT3i8g4EXlJVW8TkU0ickN5hlhZbdr4f8z4oosuMtmvfvUrk335y1/OfEzLli0z2cMPP2yyOXPs55KjR49mPp4INKjONm7c2JsPGzbMZP369TPZ3r17Tda+fftUY/Ld1L5gwQKTPfDAA6mOkyMNqrN541uoJ82iEBFpML2tqanx5r179zaZ7+vuoUOHTPbYY4+ZbPv27SWMDvXQYDpbSV/84herPYRcSbJa45siYpeiOqZXtsMB0qOziA2dRYzoLWJDZxGDBvGtPQAAAAAIHZMzAAAAAAgAkzMAAAAACEDi33MWu1atWpnsySefNFmxm36zvtnRt2DChAkTvNvOmzfPZB9//HGm40F4lixZYrLly5eb7LLLLkv8mGeeeabJii2C81m7du0y2YwZM7zbDh8+PPGYgDz62te+5s2nTZtW2YEgEy1btvTmvs+pPlu3bjXZvffem2pMQCj+8Ic/mKzYokgNdKG6euHKGQAAAAAEgMkZAAAAAASAyRkAAAAABIDJGQAAAAAEIPoFQS6//HKTjRgxwmRdunQx2VlnnZX5eA4ePGiySZMmmezBBx802YEDBzIfD+K1ZcsWk11//fUmu/322737jxo1quRjT5w40WRPPPGEyd59992SjwHkhWqx32kLAPm3Zs0ak23cuNG7rW+BvQsuuMBkH3zwQfqBRYorZwAAAAAQACZnAAAAABAAJmcAAAAAEAAmZwAAAAAQgOgXBLnuuusSZUmtXbvWm7/yyismO3z4sMkmTJhgst27d5c8HuB4tbW1Jhs9erR322I5gNK9+uqrJvve975XhZGgktavX+/N33rrLZN179693MMBgudb+E5EZPLkySYbO3asye666y6TFXuNnjdcOQMAAACAADA5AwAAAIAAMDkDAAAAgADUOTlT1XNUdYGqrlXVt1V1eCEfrapbVXV14c/V5R8uUDc6i9jQWcSGziJG9BYxUOfciTdQbSsibZ1zq1T1v4nIShHpKyI3iMh+59z4xAdTPfHBgDo457SubegsQkJnEaGVzrnOJ9qAziIwdXZWhN5WUvPmzb35Sy+9ZLLevXubbNasWSa79dZbTXbgwIESRheGYq8P6lyt0TlXKyK1hbf3qeo6ETkr2+EB2aGziA2dRWzoLGJEbxGDet1zpqrtRORSEVlWiO5U1b+o6hRVPS3jsQGp0VnEhs4iNnQWMaK3CFXiyZmqNhORmSLyQ+fcXhF5QkQuEJEaOfZdCPsLvo7tN1RVV6jqigzGCyRGZxEbOovY0FnEiN4iZIkmZ6r6OTlW4unOuVkiIs657c65I865oyLytIh08e3rnHvKOdc5yc8CA1mhs4gNnUVs6CxiRG8RuiSrNaqI/EZE1jnnHjkub3vcZteJyJrshwfUH51FbOgsYkNnESN6ixgkWa2xu4j8QUT+U0SOFuL7RaS/HLv860TkbyJye+FGyxM9FivbIJWEK9/RWQSDziJCSVZrpLMISdLVGultlflWcRw7dqzJfvCDH5isY8eOJlu7dm02A6uCNKs1vikivp3/Pe2ggHKgs4gNnUVs6CxiRG8Rg3qt1ggAAAAAKA8mZwAAAAAQACZnAAAAABCAOhcEyfRg3DyJlJIsrpAlOou06CwilGhxhazQWWSgop0VobdIr9jrA66cAQAAAEAAmJwBAAAAQACYnAEAAABAAJicAQAAAEAA6vwl1BnbKSKbCm+3LryfB3k6F5Fwz+e8KhyTzsYh1POhs9nJ07mIhH0+le5tXjsrkq/zCflcqvm5NuTnpRR5Op+Qz6VoZyu6WuOnDqy6otIr65RLns5FJH/nk5U8PS95OheR/J1PVvL0vOTpXETydz5ZydvzkqfzydO5ZClvz0uezifWc+HHGgEAAAAgAEzOAAAAACAA1ZycPVXFY2ctT+cikr/zyUqenpc8nYtI/s4nK3l6XvJ0LiL5O5+s5O15ydP55OlcspS35yVP5xPluVTtnjMAAAAAwL/wY40AAAAAEICKT85U9SpV3aCq76rqfZU+flqqOkVVd6jqmuOyVqr6uqpuLPx9WjXHmJSqnqOqC1R1raq+rarDC3mU51MudDYcdDYZOhsOOptczL3NU2dF6G1SMXdWJF+9zVNnKzo5U9XGIvKYiPwPEblIRPqr6kWVHEMGponIVZ/J7hORN5xz7UXkjcL7MTgsIvc45y4Ska4ickfh4xHr+WSOzgaHztaBzgaHziaQg95Ok/x0VoTe1ikHnRXJV29z09lKXznrIiLvOufed84dEpEZInJthceQinNusYh8+Jn4WhF5pvD2MyLSt6KDKpFzrtY5t6rw9j4RWSciZ0mk51MmdDYgdDYROhsQOptY1L3NU2dF6G1CUXdWJF+9zVNnKz05O0tE/uu497cUsti1cc7VFt7+u4i0qeZgSqGq7UTkUhFZJjk4nwzR2UDR2aLobKDo7Anlsbe5+BjT26Ly2FmRHHyMY+8sC4JkzB1b/jKqJTBVtZmIzBSRHzrn9h7/bzGeD+onxo8xnW3YYvwY09mGLdaPMb1t2GL8GOehs5WenG0VkXOOe//sQha77araVkSk8PeOKo8nMVX9nBwr8XTn3KxCHO35lAGdDQydrROdDQydTSSPvY36Y0xv65THzopE/DHOS2crPTlbLiLtVfV8VW0iIjeJyNwKj6Ec5orILYW3bxGROVUcS2KqqiLyGxFZ55x75Lh/ivJ8yoTOBoTOJkJnA0JnE8tjb6P9GNPbRPLYWZFIP8a56qxzrqJ/RORqEXlHRN4TkZ9U+vgZjP8FEakVkf8rx36++DYROV2OrQCzUUT+j4i0qvY4E55Ldzl2efcvIrK68OfqWM+njM8TnQ3kD51N/DzR2UD+0Nl6PVfR9jZPnS2cD71N9jxF29nC+HPT2zx1VgsnBAAAAACoIhYEAQAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAD8P8a5unNTxUnoAAAAAElFTkSuQmCC\n", "text/plain": [ "
" ] @@ -154,7 +149,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAAGgCAYAAADbx5TwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzt3Xu41WWd///XDShHURBFBBVPaegQGio6flMTzRwLjyVTitUVXZplc5XpmPW1HBvHtEmzmjyiRR4mPDd90zyEpJKAZooiiqLgFvCAbA6KwP37g9VvkPd7sT97r8O+789+Pq6Li82Ltdbn/qz1Xp/1ufda93uFGKMAAAAAAOno1tkDAAAAAAB8EBM1AAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEhMTRO1EMJRIYQ5IYQXQgjn1mtQQKNQs8gRdYvcULPIDTWLFIUYY8euGEJ3Sc9LOkLSAkmPSxofY5y9iet0bGNARYwxdPS61Cw6Qy01K7W/bqlZ1MEbMcZtOnplahadoKk1W7kOdYuaFDk/qOUdtf0lvRBjnBdjXC3pZknjarg9oNGoWeSIukWzza/x+tQsmo2aRSnVMlEbKunVDf69oJIBqaJmkSPqFrmhZpEbahZJ6tHoDYQQJkqa2OjtAPVCzSI31CxyQ80iR9Qtmq2WidpCSTts8O9hlewDYoxXSbpK4vO86HTULHLUZt1Ss0gMNYvccH6AJNXy0cfHJe0eQtg5hLC5pJMl3VWfYQENQc0iR9QtckPNIjfULJLU4XfUYoxrQghnSvqDpO6SrosxPlO3kQF1Rs0iR9QtckPNIjfULFLV4fb8HdoYbxOjRrW2Om8vaha1omaRoZkxxtHN2hg1izpoas1K1C1q1+j2/AAAAACABmCiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAienR2QMAUD4f/ehHTXbmmWea7NRTTzXZjTfeaLKf/vSn7nZmzZrVgdEBAACkj3fUAAAAACAxTNQAAAAAIDFM1AAAAAAgMTWtUQshvCypVdJaSWtijKPrMSigkahb5IaaRW6oWeSGmkWKQoyx41deX9SjY4xvFLx8xzeWse7du5tsyy23rOk2vcYMffr0Mdkee+xhsq9+9avubV566aUmGz9+vMneffddk1188cUm+/73v+9upxYxxlDrbbSnbrtqzRY1atQoN3/ggQdM1r9//w5v55133nHzrbfeusO32SzULDZ0+OGHm2zy5MkmO+SQQ0w2Z86chozJMbPWk1RqNm3nn3++yaq9ZnfrZj98deihh5rsT3/6U83jqkFTa7ZyeeoWNSlyfsBHHwEAAAAgMbVO1KKke0MIM0MIE+sxIKAJqFvkhppFbqhZ5IaaRXJq/R61g2OMC0MI20q6L4TwXIxx6oYXqBQ7BY+UbLJuqVkkiJpFbqhZ5IZzWiSnpnfUYowLK38vlnS7pP2dy1wVYxzNokykoq26pWaRGmoWuaFmkRvOaZGiDr+jFkLoK6lbjLG18vORkn5Qt5F1kh133NHNN998c5MddNBBJjv44INNttVWW5nshBNO6MDo2m/BggUmu+KKK9zLHnfccSZrbW012V//+leTdfIi4sLKWrfNsP/+5jVLU6ZMcS/rNcvxGhd59bV69WqTVWsaMmbMGJPNmjWr0G3mIoWa/djHPmYy7zG5/fbbmzGcrO23334me/zxxzthJI2TQs3if5122mkmO+ecc0y2bt26wrdZSyO6FFGzSFUtH30cLOn2EMLfb+c3Mcb/V5dRAY1D3SI31CxyQ80iN9QsktThiVqMcZ6kj9RxLEDDUbfIDTWL3FCzyA01i1TRnh8AAAAAEsNEDQAAAAASU2t7/qyNGjXKZA888IB7Wa85Qmq8hcDnn3++yZYvX+5ef/LkySZraWkx2dtvv22yOXPmFBkiEtSnTx+T7bvvvib79a9/bbIhQ4bUtO25c+ea7JJLLjHZzTff7F7/z3/+s8m8mv/3f//3DowOf3fooYeabPfddzcZzUT+V7du/u9Bd955Z5PttNNOJquslQFq5tVXr169OmEkKIMDDjjAZJ///OdNdsghh7jX32uvvQpt51vf+pbJXnvtNZN5Tfwk/5xl+vTphbadEt5RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEdOlmIq+88orJ3nzzTfeyzWgmUm2R49KlS0122GGHmWz16tUm+9WvflX7wFBqv/zlL002fvz4pmzba1rSr18/k/3pT39yr+81uRg5cmTN48IHnXrqqSZ79NFHO2Ek+ajWaOfLX/6yybxF788991zdx4TyGzt2rMm+9rWvFbputZo75phjTLZo0aL2DQxZ+uxnP2uyyy+/3GSDBg0yWbWGSA899JDJttlmG5P96Ec/KjDC6tvxbvPkk08udJsp4R01AAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASEyX7vr41ltvmezss892L+t1PXriiSdMdsUVVxTa9pNPPmmyI444wr3sihUrTLbXXnuZ7Kyzziq0bXRdH/3oR032T//0Tyar1kVpY9W6Md59990mu/TSS0322muvmcx7Xr399tvudj7+8Y+brOjYUVy3bvxOr72uueaawpedO3duA0eCsjr44INNdv3115usaNfqal325s+f376BIXk9etjT/9GjR5vs6quvNlmfPn1MNnXqVJNdeOGF7ranTZtmsp49e5rs1ltvNdmRRx7p3qZnxowZhS+bMl59AQAAACAxTNQAAAAAIDFM1AAAAAAgMW1O1EII14UQFocQnt4gGxhCuC+EMLfy94DGDhNoH+oWuaFmkRtqFrmhZpGbEGPc9AVC+Jik5ZJujDHuXckukfRWjPHiEMK5kgbEGM9pc2MhbHpjCevfv7/JWltbTfbLX/7SZF/60pdM9vnPf95kN910UwdH13XEGAt1iqhX3eZcs6NGjTLZAw88YDKvtj2///3vTTZ+/Hj3socccojJRo4caTKv4cKSJUsKjUeS1q5da7KVK1cWGs+sWbMKb6cWudWs9zg9+uijJrvttttMdsopp9Sy6VJ55JFH3HzMmDEmO+igg0z22GOP1X1M7TAzxmg7C2wklZrtqrxGD1/84hcLXfehhx4y2eGHH17rkDpTU2u2cr1s6/a0004zWdEGSPfdd5/JPvvZz5ps2bJlhcfjnRNPmjSp0HUXLlzo5l5zlPacXzRDkfODNt9RizFOlbRxe8Rxkm6o/HyDpGPbPTqggahb5IaaRW6oWeSGmkVuOtqef3CMsaXy8+uSBle7YAhhoqSJHdwOUE+F6paaRUKoWeSGmkVuOKdFsmr+HrUYY9zU278xxqskXSXl/TYxymVTdUvNIkXULHJDzSI3nNMiNR3t+rgohDBEkip/L67fkICGoW6RG2oWuaFmkRtqFsnq6Dtqd0maIOniyt931m1EiSq6KPKdd94pdLkvf/nLJrvlllvcy65bt67QbaJNpazbD33oQ25+9tlnm2zLLbc02RtvvGGylpYWk91www0mW758ubvt3/3ud4WyRujdu7fJvvnNb5rsc5/7XDOGU6um1+zRRx9tMu8+xf8aPNh+UmrnnXcufP1qi+EzVcrjbGcaNGiQm3uNQ7zzhaVLl5rs3/7t32ofWHmUtmYvvPBCNz/vvPNM5jUX/PnPf26y888/32TtaRzi+c53vtPh6379619389Qah3RUkfb8N0l6VNIeIYQFIYQvaX0xHxFCmCtpbOXfQDKoW+SGmkVuqFnkhppFbtp8Ry3G6PfflrLu44pyo26RG2oWuaFmkRtqFrnp6Bo1AAAAAECDMFEDAAAAgMTU3J4fH3TBBReY7KMf/ajJDjnkEJONHTvWvc1777235nGhHHr27GmySy+91L2s1xSitbXVZKeeeqrJZsyYYbKcG0rsuOOOnT2EbOyxxx6FLvfMM880eCT58J6DXoMRSXr++edN5j0v0TUNHz7cZFOmTKnpNn/605+a7MEHH6zpNpGe733veybzmoZI0urVq032hz/8wWTnnHOOyVatWlVoPL169XLzI4880mTea3QIwWReE5w77yxN7xcX76gBAAAAQGKYqAEAAABAYpioAQAAAEBimKgBAAAAQGJoJlJnK1asMNmXv/xlk82aNctkV199tXub3qJfr9nDz372M5N53zSPfO2zzz4m85qGVDNu3DiT/elPf6ppTOiaHn/88c4eQl3179/fZEcddZTJPv/5z5vMWxxfzYUXXmiypUuXFr4+ys2ruZEjRxa+/v3332+yyy+/vKYxIT1bbbWVyc444wyTVTsH9BqHHHvssR0ez2677WayyZMnu5f1Gux5fvvb35rskksuad/ASoB31AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMTQTaYIXX3zRZKeddprJrr/+evf6p5xySqGsb9++JrvxxhtN1tLS4m4H6fvxj39sshCCe1mvSUjZGod062Z/17Ru3bpOGEnXM3DgwLrf5kc+8hGTefU9duxYkw0bNsxkm2++uck+97nPudv2amnVqlUmmz59usnee+89k/Xo4b+8zpw5083R9XjNGy6++OLC1582bZrJJkyYYLJ33nmnfQND8rxj26BBgwpf/+tf/7rJtt12W5N94QtfMNmnP/1pk+29994m69evn7ttr8GJl/361782mdewr+x4Rw0AAAAAEsNEDQAAAAASw0QNAAAAABLDRA0AAAAAEtPmRC2EcF0IYXEI4ekNsgtCCAtDCE9W/hzd2GECxVGzyBF1i9xQs8gNNYvcFOn6OEnSlZI2bh/4nzHGS+s+oi7i9ttvN9ncuXPdy3qd/g4//HCT/fCHPzTZTjvtZLKLLrrI3c7ChQvdPEOTVIKaPeaYY0w2atQok3ndkiTprrvuqvuYUuN1ePTujyeffLIZw6nVJCVQt16nQ+8+/a//+i+TnXfeeTVte+TIkSbzuj6uWbPGZCtXrjTZ7NmzTXbddde5254xY4bJvC6pixYtMtmCBQtM1rt3b3c7zz33nJtnapISqNkcDB8+3GRTpkyp6TbnzZtnMq8+8QGTVIKaXb16tcmWLFlism222ca9/ksvvWSyaucSRbz22msmW7ZsmXvZIUOGmOyNN94w2d13393h8ZRJm++oxRinSnqrCWMB6oKaRY6oW+SGmkVuqFnkppY1ameGEJ6qvI08oNqFQggTQwgzQgj215VAc1GzyFGbdUvNIjHULHLD+QGS1NGJ2i8k7SpplKQWSZdVu2CM8aoY4+gY4+gObguoB2oWOSpUt9QsEkLNIjecHyBZHZqoxRgXxRjXxhjXSbpa0v71HRZQX9QsckTdIjfULHJDzSJlRZqJGCGEITHGlso/j5P09KYuj2Keftq/Gz/zmc+Y7FOf+pTJrr/+epN95StfMdnuu+/ubueII45oa4jZyrFmvWYEm2++uckWL17sXv+WW26p+5iaoWfPnia74IILCl//gQceMNm//uu/1jKkTtMZdXvGGWeYbP78+SY76KCD6r7tV155xWR33HGHyZ599lmTPfbYY3Ufj2fixIkm8xbse40euoIcj7XNcM4555jMa4bUHhdffHFN18d6Odbs0qVLTXbsscea7J577nGvP3DgQJO9+OKLJrvzzjtNNmnSJJO99ZZd9nfzzTe72/aaiVS7LApM1EIIN0k6VNKgEMICSf9X0qEhhFGSoqSXJdnZANBJqFnkiLpFbqhZ5IaaRW7anKjFGMc78bUNGAtQF9QsckTdIjfULHJDzSI3tXR9BAAAAAA0ABM1AAAAAEhMh5qJoLm8RaO/+tWvTHbNNdeYrEcP+xB/7GMfc7dz6KGHmuyhhx5qe4DoVO+9956bt7S0uHlKvMYh559/vsnOPvts9/oLFiww2WWX2c7Ky5cv78Do8Hf/8R//0dlDSMbhhx9e6HJTpkxp8EiQqlGjRpnsyCOP7PDteQ0dJGnOnDkdvk2Uz/Tp003mNTpqBO+88pBDDnEv6zXR6arNl4rgHTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAzNRBIycuRINz/xxBNNtt9++5nMaxzimT17tptPnTq10PWRlrvuuquzh1CIt8DeaxLy2c9+1mTVFtOfcMIJtQ8MaIDbb7+9s4eATnLvvfeabMCAAYWu+9hjj5nstNNOq3VIQEP17t3bZF7TEEmKMZrs5ptvrvuYyoJ31AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMTQTaYI99tjDZGeeeabJjj/+ePf62223XYe3vXbtWpO1tLS4l6228BOdI4RQKDv22GPd65911ll1H1NR//Iv/2Ky7373uybbcsstTTZ58mSTnXrqqfUZGAA02NZbb22yoq+vP//5z022fPnymscENNIf/vCHzh5CafGOGgAAAAAkhokaAAAAACSGiRoAAAAAJKbNiVoIYYcQwoMhhNkhhGdCCGdV8oEhhPtCCHMrfxf7NkegwahZ5IaaRY6oW+SGmkVuiryjtkbSN2OMIySNkfTVEMIISedKuj/GuLuk+yv/BlJAzSI31CxyRN0iN9QsstJm18cYY4uklsrPrSGEZyUNlTRO0qGVi90g6SFJ5zRklInyujGOHz/eZF6Hx+HDh9d9PDNmzDDZRRddZLK77rqr7ttOSVlqNsZYKKvWFfSKK64w2XXXXWeyN99802Rjxowx2SmnnGKyj3zkI+62hw0bZrJXXnnFZF6nKK/rWdmVpWa7Kq8b64c+9CH3so899lijh9M01K10/fXXm6xbt46vKnnkkUdqGQ7aQM02xic+8YnOHkJptetoEkIYLmkfSdMlDa4UvCS9LmlwXUcG1AE1i9xQs8gRdYvcULPIQeHvUQsh9JM0RdI3YozLNvwNYowxhhDsr/rXX2+ipIm1DhRoL2oWuaFmkaOO1C01i87EsRa5KPSOWghhM60v6Mkxxtsq8aIQwpDK/w+RtNi7bozxqhjj6Bjj6HoMGCiCmkVuqFnkqKN1S82is3CsRU6KdH0Mkq6V9GyM8ccb/NddkiZUfp4g6c76Dw9oP2oWuaFmkSPqFrmhZpGbIh99/EdJp0j6WwjhyUp2nqSLJd0aQviSpPmSPtOYITbX4MH+x5JHjBhhsiuvvNJke+65Z93HNH36dJP96Ec/Mtmdd9rjyrp16+o+ngx0qZrt3r27m59xxhkmO+GEE0y2bNkyk+2+++41jclbEP/ggw+a7Hvf+15N2ymRLlWzZeM1+amloURGukzdjho1ys3Hjh1rMu91d/Xq1Sb72c9+ZrJFixZ1YHRohy5Ts820yy67dPYQSqtI18dpkmxLq/UOr+9wgNpRs8gNNYscUbfIDTWL3HSJX/kBAAAAQE6YqAEAAABAYpioAQAAAEBiCn+PWu4GDhxosl/+8pcmq7ZguN4LJb1mC5dddpl72T/84Q8mW7VqVV3Hg/Q8+uijJnv88cdNtt9++xW+ze22285k1RrobOzNN9802c033+xe9qyzzio8JqCMDjzwQDefNGlScweCuthqq63c3DumehYuXGiyb33rWzWNCUjFww8/bLJqDZW6aJO7DuMdNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABITPbNRA444ACTnX322Sbbf//9TTZ06NC6j2flypUmu+KKK0z2wx/+0GQrVqyo+3iQrwULFpjs+OOPN9lXvvIV9/rnn39+h7d9+eWXm+wXv/iFyV544YUObwMoixCqfX8uAJTf008/bbK5c+e6l/Wa8+26664mW7JkSe0DKwHeUQMAAACAxDBRAwAAAIDEMFEDAAAAgMQwUQMAAACAxGTfTOS4444rlBU1e/ZsN7/nnntMtmbNGpNddtllJlu6dGmHxwNsqKWlxWQXXHCBe9lqOYCO+/3vf2+yk046qRNGgmZ67rnn3PyRRx4x2cEHH9zo4QDJ85rmSdI111xjsosuushkX/va10xW7Ry9zHhHDQAAAAASw0QNAAAAABLDRA0AAAAAEtPmRC2EsEMI4cEQwuwQwjMhhLMq+QUhhIUhhCcrf45u/HCBtlGzyA01i9xQs8gRdYvchBjjpi8QwhBJQ2KMs0IIW0iaKelYSZ+RtDzGeGnhjYWw6Y0BbYgxhrYuQ80iJdQsMjQzxjh6UxegZpGYNmtWom6bqX///m5+6623mmzs2LEmu+2220z2hS98wWQrVqzowOjSUOT8oM2ujzHGFkktlZ9bQwjPShpa+/CAxqBmkRtqFrmhZpEj6ha5adcatRDCcEn7SJpeic4MITwVQrguhDCgzmMDakbNIjfULHJDzSJH1C1yUHiiFkLoJ2mKpG/EGJdJ+oWkXSWN0vrfTtgvEFt/vYkhhBkhhBl1GC9QGDWL3FCzyA01ixxRt8hFoYlaCGEzrS/oyTHG2yQpxrgoxrg2xrhO0tWS9veuG2O8KsY4ushnh4F6oWaRG2oWuaFmkSPqFjkp0vUxSLpW0rMxxh9vkA/Z4GLHSXq6/sMD2o+aRW6oWeSGmkWOqFvkpkjXx4MlPSzpb5LWVeLzJI3X+reIo6SXJX2lskhzU7dFhxzUpGAHPWoWyaBmkaEiXR+pWaSkaNdH6raTed0gL7roIpOdfvrpJhs5cqTJZs+eXZ+BdYJ6dX2cJsm7of/pyKCARqNmkRtqFrmhZpEj6ha5aVfXRwAAAABA4zFRAwAAAIDEMFEDAAAAgMS02Uykrhtj4SVqVGThZT1Rs6gVNYsMFWrMUC/ULOqgqTUrUbeoXZHzA95RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEtPmF13X2hqT5lZ8HVf5dBmXaFynd/dmpE7ZJzeYh1f2hZuunTPsipb0/za7bstasVK79SXlfOvNYm/L90hFl2p+U96VQzTa16+MHNhzCjGZ36GmUMu2LVL79qZcy3S9l2hepfPtTL2W6X8q0L1L59qdeyna/lGl/yrQv9VS2+6VM+1OGfeGjjwAAAACQGCZqAAAAAJCYzpyoXdWJ2663Mu2LVL79qZcy3S9l2hepfPtTL2W6X8q0L1L59qdeyna/lGl/yrQv9VS2+6VM+5P9vnTaGjUAAAAAgI+PPgIAAABAYpo+UQshHBVCmBNCeCGEcG6zt1+rEMJ1IYTFIYSnN8gGhhDuCyHMrfw9oDPHWFQIYYcQwoMhhNkhhGdCCGdV8iz3p1Go2XRQs8VQs+mgZovLuW7LVLMSdVtUzjUrlatuy1qzTZ2ohRC6S/qZpE9KGiFpfAhhRDPHUAeTJB21UXaupPtjjLtLur/y7xyskfTNGOMISWMkfbXyeOS6P3VHzSaHmm0DNZscaraAEtTtJJWnZiXqtk0lqFmpXHVbyppt9jtq+0t6IcY4L8a4WtLNksY1eQw1iTFOlfTWRvE4STdUfr5B0rFNHVQHxRhbYoyzKj+3SnpW0lBluj8NQs0mhJothJpNCDVbWNZ1W6aalajbgrKuWalcdVvWmm32RG2opFc3+PeCSpa7wTHGlsrPr0sa3JmD6YgQwnBJ+0iarhLsTx1Rs4miZquiZhNFzW5SGeu2FI8xdVtVGWtWKsFjXKaapZlIncX1bTSzaqUZQugnaYqkb8QYl234fznuD9onx8eYmu3acnyMqdmuLdfHmLrt2nJ8jMtWs82eqC2UtMMG/x5WyXK3KIQwRJIqfy/u5PEUFkLYTOsLenKM8bZKnO3+NAA1mxhqtk3UbGKo2ULKWLdZP8bUbZvKWLNSxo9xGWu22RO1xyXtHkLYOYSwuaSTJd3V5DE0wl2SJlR+niDpzk4cS2EhhCDpWknPxhh/vMF/Zbk/DULNJoSaLYSaTQg1W1gZ6zbbx5i6LaSMNStl+hiXtmZjjE39I+loSc9LelHSd5q9/TqM/yZJLZLe1/rPI39J0tZa30lmrqQ/ShrY2eMsuC8Ha/1bwE9JerLy5+hc96eB9xM1m8gfarbw/UTNJvKHmm3XfZVt3ZapZiv7Q90Wu5+yrdnK+EtTt2Wt2VDZOQAAAABAImgmAgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAialpohZCOCqEMCeE8EII4dx6DQpoFGoWOaJukRtqFrmhZpGiEGPs2BVD6C7peUlHSFog6XFJ42OMs6tdp1u3brFbN97EQ8esW7dO69atCx29fkdqNoRAzaLD1q1bpxhjh2tWan/dhhBiCDVtEl1cjPFl3Dx6AAAgAElEQVSNGOM2Hb1+R2qW4yxqsW7duqbWrMQ5LWpT9Jy2Rw3b2F/SCzHGeZIUQrhZ0jhJmypq9evXr4ZNoitbvnx5rTfRoZrt27dvrdtFF7VixYp63Ey76jaEoF69etVju+iiVq1aNb/Gm2hXzXbr1k29e/eucZPoylasWNHUmpXW1+0WW2xR42bRVbW2tha6XC2/Chgq6dUN/r2gkgGpomaRI+oWuaFmkRtqFkmq5R21QkIIEyVNrPzc6M0BNaNmkRtqFrmhZpEj6hbNVstEbaGkHTb497BK9gExxqskXSVJPXr06NiCOKA+2l2z3bt3p2bR2dqs2w1rtlu3btQsOlu7apbjLBLAOS2SVMtHHx+XtHsIYecQwuaSTpZ0V32GBTQENYscUbfIDTWL3FCzSFKH31GLMa4JIZwp6Q+Suku6Lsb4TN1GBtQZNYscUbfIDTWL3FCzSFWH2/N3RI8ePSJdH9FRy5cv15o1a5r6ofDu3btHuj6io1asWKG1a9c2tWa7desW6fqIWqxatWpmjHF0s7bXvXv3SNdH1GLFihVNrVlp/TktXR/RUa2trYXOafkCCAAAAABIDBM1AAAAAEhMw9vzIz3duvnzc+9jsGvXri18feDvvLbFXi2tXLnSZF4d9ujhH6r4uBQA1Ed72s03c9kM0JVxxg0AAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJoetjnRXtiNiejkneZb3uTN7lvOzdd991t9O9e3eTed32inb0a08HKaTPezyrPcbvv/++yfr372+yfv36mczrBLlq1Sp3O0WfGwDQVRU9L1m3bp2be+cG3mW9Y2+12wRQDO+oAQAAAEBimKgBAAAAQGKYqAEAAABAYpioAQAAAEBiaCZSkLcY12tk4C2c9S7nNd/wGndI/kJe7/pe1qtXL5N5DRwkqU+fPibbaqutTLZ8+XKTtba2msxrAOGNEXko2kRGkrbZZhuTnX766SY74IADTPb000+b7Oqrr3a389JLL5nMew4WXUwPtKVos5pqx/OePXsWuux7771nsmpNddA1FW0q5h0TveZO1Y6T3nnAsmXLTObVp9e8rD3N1JCv9jzONAGrjrMXAAAAAEgMEzUAAAAASAwTNQAAAABITE1r1EIIL0tqlbRW0poY4+h6DApoJOoWuaFmkRtqFrmhZpGiejQTOSzG+EYdbqfpvMWL1RbTeg09vIWSq1evLrTtQYMGFcokadtttzWZt0B3++23N9khhxxisj322MPdzg477GCy999/32R/+ctfTHbDDTeYbO7cuSZbunSpu+1OkG3d1pvXyMCrba8W9t57b/c2v/nNb5rsiCOOMNmaNWtMVu154Ln88stNtnjx4kLXzXBBOzWbAO81wmvW0LdvX/f6e+65p8m8Y/ef//znQtvxXgsSWphPzTaZd5z2GticdtppJjv66KPd2/SaifzgBz8w2bRp00zmNcVJ/NhLzW6gaCOu9jQb8845ijbN8y7nHQOl4k0AU8dHHwEAAAAgMbVO1KKke0MIM0MIE+sxIKAJqFvkhppFbqhZ5IaaRXJq/ejjwTHGhSGEbSXdF0J4LsY4dcMLVIp9YuXnGjcH1MUm65aaRYKoWeSGmkVuOKdFcmp6Ry3GuLDy92JJt0va37nMVTHG0THG0XzpLFLQVt1uWLMciJGC9tRsZ4wP2BjHWeSGc1qkqMPvqIUQ+krqFmNsrfx8pCS7ujQRRV8IttlmGzf3Gm3svvvuJttll11MtvPOO5vMaxBSbYzeonTvsgMGDDDZwIEDTeYtLJb8RZZvvvmmybyGKa+//rrJEmoc8v/LrW5rUfRFZNWqVSbr3bu3yT72sY+Z7JxzznFvc+TIkSbzFrl7i4C9eh8zZoy7nenTp5vsoYceMllra6vJcllUnELNbrbZZibr37+/ybzj0sqVK01WremS11wmNV5DD++5tu+++7rX//a3v20y7zh92WWXmeyOO+4wmbe43htjM6VQs12V91z953/+Z5OdccYZJhs2bJh7my0tLSbzGkJ4ryUcZ9NUtNGG19DDy7zzSq8JjVS9zja2fPlyk/Xq1ctkXn1Kfj16rz3e61ZKdVvLRx8HS7q9soM9JP0mxvj/6jIqoHGoW+SGmkVuqFnkhppFkjo8UYsxzpP0kTqOBWg46ha5oWaRG2oWuaFmkSo+YAsAAAAAiWGiBgAAAACJqbU9fzb69OljMq9hwhe/+EX3+mPHjjWZt1DSW2DrLZD3FoBvvvnm7ra9xZMrVqwwWb9+/UzmNVHwGoRI0oIFC0x2//33m+zXv/61yRYtWuTe5sboktQ83mJYr6GHV4tHHXWUyc4880yT7bHHHu62vWYG3sJgr469xfDe81eSjjvuOJM9/PDDJnvvvfdM5j1Xu1L3uWr76tWD1/xo7733NtlLL71kMu9x97Yh+YvUU1rULflj9Orde65J/v542XPPPWcyr/mO93zp7GYiqK/2HJd22mknk5100kkm857T1RqAPfLIIyZ74IEHTFb0udGVjrOp8h4r7/jiHatHjBhhshNPPNFk3nmz5J/rvvPOO+5lN+ad5z7xxBPuZW+//XaTPfXUUyZbtmxZoW13Fs6aAQAAACAxTNQAAAAAIDFM1AAAAAAgMUzUAAAAACAxXaaZSC3fri5J/fv3N5m3iNtbOO8tCve2s8MOO7jb9i7rNWFYuHChyf72t7+ZzGsaIkl/+ctfTPb888+b7PXXXzeZ1ySExiGdy1vE7S3iPfbYY032rW99y2RefVZrWuDVzdSpU0329ttvF9rOnnvu6W5n1KhRJvvEJz5hsltvvdVkq1evNlm1Jhdl5B2/JL9xy4QJE0w2btw4k3lNB6688kqTzZ8/v8gQk+Qd17zXkmrH2ZaWFpN5dedd32vC4DWrQr68+qp2XPKa0Hz84x83mXec9Bosea/tkjR58mSTeecgNA7pXF6dVGvG5DUO8c41P/WpT5nMa7r34Q9/2GRLlixxt+2dE7/44osm23XXXU02ZswYkx144IHudrxz4nnz5pnMO2/3jumdhTNpAAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASEyX6frodbjxuhZdc8017vXff/99k+2yyy4me+GFF0zmdVLabbfdTPbGG2+4237llVdMNmvWLJN5HR5fffVVk22xxRbudrz7qEcPWyJehuYo2nFO8rsajh8/3mRf//rXTbbTTjuZzOvo9fjjj7vbvv3220322GOPmWzp0qUm22677Ux23HHHudsZOXKkybwulnfeeafJvHr37t+ydi2r9jweOHCgyYYMGWIy7zjidTT0jn/V7tMcusZ54/E6q3nPIUnafffdTeZ1ePS68nld3VLqTobGqNbZc/jw4SY76aSTTLb11lubzOvI53XHlaRp06aZzKv51J6rqN71cfvttzfZKaecYrJTTz3VZF5n4D//+c8mu/vuu91tP/HEEybbaqutTHb88cebzHsuVOsw3qtXL5N55/2pH1d5Rw0AAAAAEsNEDQAAAAASw0QNAAAAABLT5kQthHBdCGFxCOHpDbKBIYT7QghzK38PaOwwgfahbpEbaha5oWaRG2oWuSnSFWKSpCsl3bhBdq6k+2OMF4cQzq38+5z6D6+xvAWIL7/8snvZOXPmmKx///4m8xYgbrnllibr3bu3yebNm+du+3e/+53JvAYjRRefe00Uql222kLUDExSSet2Y16jG0n6xCc+YbIzzjjDZHvuuafJvFr64x//aLKf/OQn7rZffPFFk3nPDa++WltbTTZ16lR3O+PGjTPZhz/8YZN5jR1mz55tMm+h8mabbeZuuwEmqYk1W23hv/eYeI1Hli9fbjLvOOkdZ71jzabGlBLv/vEW148dO9a9/jbbbGMyb3H9qlWrTOY91737rIn34yR1keNsrYo+Jl4TqGqNf0477TSTHXjggSbzjr3e+cZtt93mbmfZsmUm857DOTx/VeKa9V6/qjU1Ou+880x2+OGHm8yrPa/pzI033mgy7/VA8pt8HHXUUSbzXst79uxpMq+JnyTdcsstJvPOL1LX5jtqMcapkt7aKB4n6YbKzzdIsm3WgE5E3SI31CxyQ80iN9QsctPRNWqDY4x/78P8uqTBdRoP0EjULXJDzSI31CxyQ80iWTV/IVaMMYYQqn4+LoQwUdLEys+1bg6oi03VLTWLFFGzyA01i9xwTovUdPQdtUUhhCGSVPl7cbULxhivijGOjjGOrvaldECTFKrbDWuWAzE6WbtrtqmjAyyOs8gN57RIVkffUbtL0gRJF1f+vrNuI0rU66+/bjKvyYC3+NFrOuItSJ81a5a77UWLFpnMW3DsaWIjhBxkX7fe4z54sP8pja997Wsm8+pz3bp1Jrv99ttNduGFF5psyZIl7rZraUzj1ezMmTPdyy5cuNBkhxxyiMk+85nPmOySSy4x2ZtvvlloPE3U9Jr1Gh15C7CnT59uMu/+22677UzmHdMkv4FGaifxXmOG3XbbzWRHHnmke31vcb73+uLxatFrINDJsj/ONot3TPQyr9GC5B/XvHp45513THbllVearFpThmrNTEok6Zr1Xk+9x9l7nE4//XT3Ng877DCTecehyZMnm+y3v/2tybxjunduIUn77LOPybwa33XXXU3m1fK1117rbsdrDui9niR4DP2AIu35b5L0qKQ9QggLQghf0vpiPiKEMFfS2Mq/gWRQt8gNNYvcULPIDTWL3LT5a5IY4/gq/2X7eAKJoG6RG2oWuaFmkRtqFrnhA7YAAAAAkBgmagAAAACQmNKvEN0Ub1FhtYYHs2fPNtnee+9tstGjbdO1bbfd1mTe4tBqTQv69u1rsrfe2vj7Gv1F7ptvvrnJUlucLxVrNFG0GUWOitZiv379THbeeee5tzlmzJhC27nrrrtMdsUVV5hs2bJlhW5Pqu2x8ur4vffecy/7yiuvmMxruOI9L73nRlm7eHn7Va1BgHe82nrrrU322muvmWzo0KEm8x6PV1991d32u+++a7JevXqZrJbHqdaa9fZnzz33NNkWW2zhXv+NN94w2Ysvvmgy7/HxXjc83nMI6fGaGAwYMMBkJ598snt977n6/vvvm+yxxx4z2R//+EeTec8/SerZs6fJUjyPKCuvKYf3+uU1Dhk7dqx7my+99JLJfvOb35jMayy2dOlSk3nHK+91Q5KOPvpok40YMcJk3jnHr371K5M9+OCD7na884aix9CUlPOsBAAAAAAyxkQNAAAAABLDRA0AAAAAEsNEDQAAAAAS06WbiXiqLTRcvny5ybxFjTfccIPJzjjjDJOdcsopJjvzzDPdbR900EEm874t/oknnjCZ903zvXv3drfTmc06vOYAG4+nzIuXvfveW0B88MEHm+zQQw91b9Or5UcffdRk3/3ud03mNXvozAXl3gJ5Sdpyyy1N5tW3dzlv7GVtJuIt9PbqS/IXenvNWBYuXGgyb/H4ihUrTLZq1Sp32/379zeZ93gOGzbMZF6TDu9xX7Jkibtt7znoLZofPHiwybz7zFvsL0lTp0412W233WYyr+a9+qz2OCItRRtGHX/88SY77LDD3Nv06mHBggUmu/LKK03m1XafPn3c7VBjncurE+/4sN9++5nMO3eV/HOB3/72tybzGnJ4deI1wTn22GPdbZ9wwgkm85oveU1CbrzxRpNVa4JTltfzcuwFAAAAAJQIEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMzUQ2Uq05grdYceXKlSbzFnheddVVJmttbTXZxIkT3W2feOKJJvMW919zzTUmu+eee0w2f/58dztr1qwxmdeEoBGKLFbuzGYnjebtm9e845Of/KTJhgwZ4t7mokWLTPaTn/zEZG+//bbJvEYkjWgc4tWXd1/stttu7vV33nlnk3mNKt58802Tefu4du1adzu5857b1RoneQ1BNttsM5MNGjTIZN5jN3z4cJN5xy/JP36edNJJJvv4xz9uMq9pifccWL16tbvtp556ymTz5s0z2b777msy73np3WeSNHfuXJN5+120wRA6V7Xn0ca8x+7AAw802amnnmoyr1GDJL311lsm8xqNzZw502TtOf6VuZFXrnbZZReTecfaag01vMd64MCBJvOaInnNnLymZuPGjXO37R0bZ8+ebbIpU6aYbNmyZYVuTyrP8ZJ31AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMW1O1EII14UQFocQnt4guyCEsDCE8GTlz9GNHSZQHDWLHFG3yA01i9xQs8hNkZZ+kyRdKenGjfL/jDFeWvcRJcrreuR1TfI67Hhdam666SaTeZ2/JOm0004z2R577GGyCRMmmGzUqFEm+8Y3vuFuZ+HChSbr37+/yRrRAarIbbZju5NUgpr1ujmOGTPGZF5HP0m69957TfbYY4+ZzOuM1Kxun97YvQ6r++yzj3v9rbbaymReVz+vo5/3fKvWIatJJqlBdet1EOzTp497Wa/Ll1cjXqctL+vdu7fJvG6d1S47YsQIk3lj944PLS0thTLJP57vuuuuJtt///1NtsUWW5jM6+wrSXPmzDGZV7Pe45CgSSrBsbYW3nPDO65tv/32JjvjjDNM5r22Vzs3mDp1qsluvvlmk3nPf++5Wm07JTNJmdWs93rsHXO8TuRed17Jf03t16+fybwuwEOHDi00nmrHsOXLl5vM6/r4zDPPmMw79lfr7thluj7GGKdKsj1ggURRs8gRdYvcULPIDTWL3NTy6+MzQwhPVd5G9r/kQ1IIYWIIYUYIYUZZZrfIVrtrtszf3YZstFm3G9ZsswcHONpVsxxnkQDOaZGkjk7UfiFpV0mjJLVIuqzaBWOMV8UYR8cYR3fyx4rQtXWoZvmiT3SyQnW7Yc02c3CAo901y3EWnYxzWiSrQ1UWY1wUY1wbY1wn6WpJ9kP7QEKoWeSIukVuqFnkhppFyjrUMSCEMCTG+PcV2cdJenpTl+9KvN8Metmbb75psmuuuca9TW9R+umnn26ygQMHmsxbMPqDH/zA3c4ll1xistdff929bG5Sr1nvoz9e04IBA+wnMlasWOHe5rRp00zmLRb3tt2I33B7C+y9be+1114m++IXv+je5nbbbWeyv/71rya75ZZbTOY9B70F9p2pXnXrPe7vvPOOe9mHH3640PV33HFHk3mNk1555ZVCmeQ3cznvvPNMtnTpUvf6G1u1apXJvAX3kr9I3Wvo4zU38RbHV2taMn/+fJN5i+6b9byst9SPtfXmffzNezy9Jl6f+tSnTOY9xi+//LK77SuvvNJkL730ksm8JhHVmlB1RanX7HvvvWey5557zmSTJk0y2dixY93bHDZsmMkOOOAAk/Xs2dNkXj16xzWvMY7kN416/vnnTfbaa6+ZrDObn3WWNvcuhHCTpEMlDQohLJD0fyUdGkIYJSlKelnSVxo4RqBdqFnkiLpFbqhZ5IaaRW7anKjFGMc78bUNGAtQF9QsckTdIjfULHJDzSI3rIQEAAAAgMQwUQMAAACAxJR7BV4ivEXhvXr1Mlm1RgbeAntvIbC34NTbttcEoNplPd5iZ74Hpzbdu3c3mdcow6ubat/lsnbtWpMVrSVPe1oRe7XoNWvwGjN85zvfMdlBBx3kbsdrdnPttfZTLN5C5aINHMrAu++9RhuSdP3115vsjjvuMNn/+T//x2SDBg0ymbfI/O2333a37TUj8cbpZV69t+d7jrxte01U5syZY7JPfvKTJqvWtMRr/uMd+71tIz3vvvuuybznxpFHHmkyr1HD4sWLTfab3/zG3fajjz5qMhqHlI93fuDV3d13322yBx980L1Nr/nc1ltvbbJXX33VZF6NfuQjHzHZ97//fXfbe+65p8m81wSvbr3zT+/YXya8owYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiaGZSJ15DRdWr15tMm/Rvdc8QvK/WX7IkCEm8xbOe4vUBwwY4G7Ha67gXd/bH2+BJ3zefeU1e/AW+3qLZpcsWeJux2u04TUo8BpoeHXsXbdas4Zhw4aZ7NRTTzXZySefbLKdd97ZZNUWC0+ZMsVk9957r8m8xg5ebZe1jr3nbLXHzmvU4S1mf/jhh03mNTLw6strqCFJy5cvLzSeRjR98fbRazDiNYDwrustuJf8sdM4JC3e41mtIYd3rPSaH3mv795zcNasWSa75ppr3G17xysah3QN3mPvHVerHVu8ZiQLFiwwmdcYzKvbuXPnutvxeK/nXtMS73lY9sYhHt5RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDE0EykIG9RY9FvSPcaf0yYMMFk48aNc7c9fPjwAiOUli5dajJvcX61RZ/e9b39KWvDhWbxmlh4DQa23HJLk3kLg6s1Zii6eL1v377u9TfmLZrfe++93cueddZZJjvssMNM5jWw8Zqg3HPPPe52fvjDH5rMawDRo4c91HX1OvbqsBpv4bnXoMS774s2sJGqNzhpBm/bXmMG73jsPVe9JiiSf0xl0XxavMfdew5I0i677GKyY445xmReox3v2O0d66pt22tCVe2yKD/vuOo1A9lUXkT//v1N5jVP8mpeKt64yXuN9q5bdryjBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJaXOiFkLYIYTwYAhhdgjhmRDCWZV8YAjhvhDC3MrfAxo/XKBt1CxyQ80iR9QtckPNIjdFuj6ukfTNGOOsEMIWkmaGEO6TdJqk+2OMF4cQzpV0rqRzGjfU6p3CivA6enmZ19muWt6zZ0+TDR482GTf/va3TXbiiSeazOtMJ/mdoebNm2eyp556ymReh8fp06e721m5cqXJOrMLWw2SqVmPVzfe/ex1VurVq5fJtt12W3c7RxxxhMkWLlxoMq9L2M4772yyo446ymRjx451tz106FCTeXU8c+ZMk91xxx0mu/XWW93ttLa2msx7rmbQ4THpmvV4NZtzxznv9WXQoEEm23HHHU1WtAOw5Hct8zpjVnstSkx2dVuE9xhV6zb3yU9+0mReJ0ivq/Jf//pXk917770m8zqsStXPGbBJpazZZvKOV95rsdfJUfK7kXtdI73OuV634rJ3gmzzlSDG2BJjnFX5uVXSs5KGShon6YbKxW6QdGyjBgm0BzWL3FCzyBF1i9xQs8hNu34dE0IYLmkfSdMlDY4xtlT+63VJ9q2k9deZKGli5eeOjhPoEGoWuaFmkaP21i01i87GsRY5KPzZihBCP0lTJH0jxviB9zPj+s+MuJ9LjDFeFWMcHWMcnclHOVAS9ahZDsRopnrUbBOGCXxAR+qW4yw6E+e0yEWhKgshbKb1BT05xnhbJV4UQhhS+f8hkuwHTIFOQs0iN9QsckTdIjfULHLS5kcfw/pfdV0r6dkY4483+K+7JE2QdHHl7zsbMsINeL+98BaA9+nTp1DWu3fvQtuQ/OYKo0aNMpnXXGG//fZzb3NjS5YscfMZM2aY7OabbzbZ1KlTTbZmzRqTVVvw7+XeYuXUf/uZUs16vCYD3n366quvmsxbVL7FFlu42znwwANN9uEPf9hkXhOZ3XbbzWQDBtgmWF59SdIzzzxjsr/85S8mu+KKK0zm7Xe1pjbe/ZZ6fXpSr9muylsg//jjj5tsxIgRJvMWx0v+a1auv5kva916x5Bqx7rhw4cXuk2vHu677z6TvfTSSybbaqut3Nv0mjpg08pas83knYd4xzWvgZjkNwnxzj+98/auWPNF1qj9o6RTJP0thPBkJTtP64v51hDClyTNl/SZxgwRaDdqFrmhZpEj6ha5oWaRlTYnajHGaZKq/Yr68PoOB6gdNYvcULPIEXWL3FCzyE2en7cAAAAAgBJjogYAAAAAien0r7X3FiBW07NnT5MNGjTIZAcddJDJjjnmGJMNHmy/JsP71nNJ2nLLLU3mNVfwsrfffttkc+bMMdn//M//uNueMmWKybzGI17jD68JQ7VF0dX2HfXlLaT1GnpMnjzZZF7Tgn333dfdzrBhw0w2dOhQk3mLczfffHOTLVu2zGRegxBJ+tGPfmSyF1980WReswavsUK1BiE5Ng5BPrxF888//7zJvEXz3nUlqVevXrUPDA3lvW56DcUk6R//8R9N5jUqe+ONN0w2bdo0k3mv49UaKFRrsgQ0kvca3bdvX5N558OSfwwcOXKkyR566CGTeecmXoO2MuEdNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABITKc3E/F4C3ElacyYMSb79Kc/bTKvmYi3EPi9994zWbUF4N5iRa8RQktLi8n++7//22S33HKLyRYuXOhu2xundx9VaxKCtHgNdLxGOQsWLDDZFVdcYTKvUY4kHXrooSbzFqq/9dZbJvMaf9x4440mq9ZMxHu+eAuQu3fv7l4fSIG3cH3evHkm8+p9u+22c2+zPQ200Dm8Y9UBBxzgXtY7t/COa6+99prJvFrymnp15aYhNIxKj1ejW2+9deHre81E/uEf/sFkTzzxhMneeeedwtspC95RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEdHozEW+haLVvGfcadUyfPt1kS5cuNZm3UNFbEOk1VpCk2bNnm2zWrFkmmzt3rsm8JiFe05JqC4a9cbLAtly8xevvvvuuyR555BGTVWvo4TUo8RrOeAvfvQY2K1euLHRdyW9aAuTGey16+eWXTbZ48eLCt+k9j7zjOU1HOk/R5mGS9PTTT5usX79+JvOainkNRvr06WOyrvx6z/MgD97j9Oqrr7qX3WWXXUy2/fbbm8x7Lrz99tsmK/vzg3fUAAAAACAxTNQAAAAAIDFM1AAAAAAgMW1O1EIIO4QQHgwhzA4hPBNCOKuSXxBCWBhCeLLy5+jGDxdoGzWL3FCzyPImTC4AAAcaSURBVA01ixxRt8hNkRX/ayR9M8Y4K4SwhaSZIYT7Kv/3nzHGS+s9KK/hgeQ36vAWcd95550m8xYHe40QVqxY4W7ba/7hLWD0Gn94jSJottBQTa/ZRii6QLbaYuv333+/0PW92vaeL15zEtRNKWq2bLzXCK/h1G9+8xuTec2AJP+55b1GVGuqlZDS1qx3TPUal0nShAkTTOadw3hNZGgU1ilKW7edyTtezZgxw73sXnvtZTKvmV7//v1N5p3zV5szlEWbs4UYY4uklsrPrSGEZyUNbfTAgI6iZpEbaha5oWaRI+oWuWnXGrUQwnBJ+0j6+6+WzgwhPBVCuC6EMKDOYwNqRs0iN9QsckPNIkfULXJQeKIWQugnaYqkb8QYl0n6haRdJY3S+t9OXFblehNDCDNCCDOqfU8Y0Aj1qFm+wwXNVI+abdpgAXGcRZ44p0UuCk3UQgibaX1BT44x3iZJMcZFMca1McZ1kq6WtL933RjjVTHG0THG0d7n8IFGqFfNsl4AzVKvmm3eiNHVcZxFjjinRU6KdH0Mkq6V9GyM8ccb5EM2uNhxkp6u//CA9qNmkRtqFrmhZpEj6ha5CW195CCEcLCkhyX9TdLf3+c9T9J4rX+LOEp6WdJXKos0q+rRo0fs169fjUP+IK9rktepy+Pte3s6bZW904zkdyPzePdbvX9Lunz5cq1Zs6bNG61nzXbv3j327du3lmGjC1uxYoXWrl3b1Jrt1q1b7NWrVy3DxiZ4x7oBA+xyllWrVrnX97qsFtWsd55WrVo1s613Z+t9nO3du3etw24o71xD8js4ex+J884XvMvxMdCOWbFiRZs1K9X/nHaLLbaoZdhZ8s4LvVrefvvt3evvt99+Jps/f77JZs+ebTLv+JtBh1xXa2troXPaIl0fp0nybuh/OjIwoNGoWeSGmkVuqFnkiLpFbviALQAAAAAkhokaAAAAACSGiRoAAAAAJKbNNWqpe//99wtl6BhvAbS32LloAxcAyJl3rGttba3pNmlPn75q5xWcb6Cr8RqHeA09Fi1a5F5/2rRpJluxYkWh28y1cUgteEcNAAAAABLDRA0AAAAAEsNEDQAAAAASw0QNAAAAABITvMYQDdtYCEsk/f3rxwdJeqNpG2+sMu2LlO7+7BRj3KaZG6Rms5Hq/lCz9VOmfZHS3p+m1m2Ja1Yq1/6kvC+deaxN+X7piDLtT8r7UqhmmzpR+8CGQ5gRYxzdKRuvszLti1S+/amXMt0vZdoXqXz7Uy9lul/KtC9S+fanXsp2v5Rpf8q0L/VUtvulTPtThn3ho48AAAAAkBgmagAAAACQmM6cqF3ViduutzLti1S+/amXMt0vZdoXqXz7Uy9lul/KtC9S+fanXsp2v5Rpf8q0L/VUtvulTPuT/b502ho1AAAAAICPjz4CAAAAQGKaPlELIRwVQpgTQnghhHBus7dfqxDCdSGExSGEpzfIBoYQ7gshzK38PaAzx1hUCGGHEMKDIYTZIYRnQghnVfIs96dRqNl0ULPFULPpoGaLy7luy1SzEnVbVM41K5Wrbstas02dqIUQukv6maRPShohaXwIYUQzx1AHkyQdtVF2rqT7Y4y7S7q/8u8crJH0zRjjCEljJH218njkuj91R80mh5ptAzWbHGq2gBLU7SSVp2Yl6rZNJahZqVx1W8qabfY7avtLeiHGOC/GuFrSzZLGNXkMNYkxTpX01kbxOEk3VH6+QdKxTR1UB8UYW2KMsyo/t0p6VtJQZbo/DULNJoSaLYSaTQg1W1jWdVummpWo24KyrlmpXHVb1ppt9kRtqKRXN/j3gkqWu8ExxpbKz69LGtyZg+mIEMJwSftImq4S7E8dUbOJomaromYTRc1uUhnrthSPMXVbVRlrVirBY1ymmqWZSJ3F9W00s2qlGULoJ2mKpG/EGJdt+H857g/aJ8fHmJrt2nJ8jKnZri3Xx5i67dpyfIzLVrPNnqgtlLTDBv8eVslytyiEMESSKn8v7uTxFBZC2EzrC3pyjPG2Spzt/jQANZsYarZN1GxiqNlCyli3WT/G1G2bylizUsaPcRlrttkTtccl7R5C2DmEsLmkkyXd1eQxNMJdkiZUfp4g6c5OHEthIYQg6VpJz8YYf7zBf2W5Pw1CzSaEmi2Emk0INVtYGes228eYui2kjDUrZfoYl7ZmY4xN/SPpaEnPS3pR0neavf06jP8mSS2S3tf6zyN/SdLWWt9JZq6kP0oa2NnjLLgvB2v9W8BPSXqy8ufoXPengfcTNZvIH2q28P1EzSbyh5pt132Vbd2WqWYr+0PdFrufsq3ZyvhLU7dlrdlQ2TkAAAAAQCJoJgIAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAIlhogYAAAAAiWGiBgAAAACJ+f8A/hEPExCPZKIAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2cAAAGeCAYAAAAQSXmdAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3debyd473///clkhAZCBppEmIIRU9EG8NRD3IIVdRYNK25D6FoaUupaqvHeBRtDR1iiqqv4RQVQ4tjbksqQokQMYXElggi8yTX74+s/pr6fFb2nb3WXuu67v16Ph4eSd7WWvd17/3O2uvK2vdnhxijAAAAAADNtVqzFwAAAAAAYHMGAAAAAElgcwYAAAAACWBzBgAAAAAJYHMGAAAAAAlgcwYAAAAACahpcxZC2CuEMCmE8GoI4cx6LQpoL3QWOaK3yA2dRW7oLFIR2vpzzkIInSS9ImkPSVMlPS1pRIxx4kruww9VQ01ijKGt96WzaIZaOiutem/pLOpgZoxx/bbemc6iCRra2cp96C1qUu31QS3vnG0v6dUY4+sxxsWSbpG0fw2PB7Q3Oosc0Vs02pQa709n0Wh0FqVRy+asn6S3V/jz1EoGpIrOIkf0Frmhs8gNnUUyVm/vA4QQRkoa2d7HAeqFziI3dBa5obPIEb1FI9SyOZsmacAKf+5fyf5NjHGUpFES35+LpqOzyFGrvaWzSAydRW54fYBk1PJtjU9LGhRC2DiE0EXSVyWNqc+ygHZBZ5Ejeovc0Fnkhs4iGW1+5yzGuDSEcLKk+yV1knRdjPHFuq0MqDM6ixzRW+SGziI3dBYpafMo/TYdjLeAUaNax5KvKjqLWtFZZOiZGOPQRh2MzqIOGtpZid6idu0xSh8AAAAAUCdszgAAAAAgAWzOAAAAACABbM4AAAAAIAFszgAAAAAgAWzOAAAAACABbM4AAAAAIAFszgAAAAAgAWzOAAAAACABbM4AAAAAIAFszgAAAAAgAWzOAAAAACABbM4AAAAAIAGrN3sBAMrn85//vMlOPvlkkx155JEm+93vfmeyK664wj3O+PHj27A6AACANPHOGQAAAAAkgM0ZAAAAACSAzRkAAAAAJKCma85CCG9KmiPpY0lLY4xD67EooD3RW+SGziI3dBa5obNIRYgxtv3Oy4s8NMY4s+Dt236wjHXq1MlkvXr1qukxveEK3bp1M9kWW2xhspNOOsl9zEsuucRkI0aMMNnChQtNdtFFF5nspz/9qXucWsQYQ62PsSq97aidLWrIkCFu/vDDD5usZ8+ebT7ORx995Obrrrtumx+zUegsVrT77rub7KabbjLZrrvuarJJkya1y5ocz9T6wpTOpu3ss882WbWv2autZr/JatiwYSZ77LHHal5XDRra2crt6S1qUu31Ad/WCAAAAAAJqHVzFiU9EEJ4JoQwsh4LAhqA3iI3dBa5obPIDZ1FEmr9OWc7xxinhRA+JenBEMLLMcbHV7xBpeCUHClZaW/pLBJEZ5EbOovc8JoWSajpnbMY47TKrzMk3Slpe+c2o2KMQ7mwEqlorbd0Fqmhs8gNnUVueE2LVLT5nbMQwlqSVosxzqn8fk9J/123lTXJhhtu6OZdunQx2U477WSynXfe2WRrr722yQ4++OA2rG7VTZ061WSXX365e9sDDzzQZHPmzDHZP/7xD5M1+ULgwsra20bYfnvzdUq33367e1tv4I03fMjr1+LFi01WbfDHjjvuaLLx48cXesxcpNDZXXbZxWTe5+TOO+9sxHKytt1225ns6aefbsJK2k8KncW/HH300SY744wzTLZs2bLCj1nLMLkU0VmkpJZva+wj6c4Qwj8f5//FGP9cl1UB7YfeIjd0Frmhs8gNnUUy2rw5izG+LmmbOq4FaHf0Frmhs8gNnUVu6CxSwih9AAAAAEgAmzMAAAAASECto/SzNmTIEJM9/PDD7m29AQep8S7mPfvss002d+5c9/433XSTyVpaWkz24YcfmmzSpElFlogEdevWzWSf+9znTPb73//eZH379q3p2JMnTzbZxRdfbLJbbrnFvf9f//pXk3mdv/DCC9uwOvzTsGHDTDZo0CCTMRDkX1Zbzf+3z4033thkG220kckq174ANfP6tcYaazRhJSiDHXbYwWSHH364yXbddVf3/ltvvXWh45x22mkme+edd0zmDeKT/NcsY8eOLXTsZuOdMwAAAABIAJszAAAAAEgAmzMAAAAASACbMwAAAABIQIceCPLWW2+Z7P3333dv24iBINUuVJw1a5bJ/uu//stkixcvNtmNN95Y+8JQar/97W9NNmLEiIYc2xs80r17d5M99thj7v29QRWDBw+ueV34d0ceeaTJnnzyySasJB/VhuUcd9xxJvMuXH/55ZfrviaU3/Dhw032rW99q9B9q3Vu3333Ndn06dNXbWHI0mGHHWayX/7ylyZbb731TFZtqNGjjz5qsvXXX99kP/vZzwqssPpxvMf86le/Wugxm413zgAAAAAgAWzOAAAAACABbM4AAAAAIAFszgAAAAAgAWzOAAAAACABHXpa4wcffGCy008/3b2tN63o2WefNdnll19e6NjPPfecyfbYYw/3tvPmzTPZ1ltvbbJTTjml0LHRcX3+85832T777GOyatOPPqnaFMW7777bZJdcconJ3nnnHZN5f68+/PBD9zi77babyYquHcWtthr/jreqrrnmmsK3nTx5cjuuBGW18847m+z66683WdFp09Wm402ZMmXVFobkrb66ffk/dOhQk1199dUm69atm8kef/xxk5177rnusf/yl7+YrGvXria77bbbTLbnnnu6j+kZN25c4dumhq+4AAAAAJAANmcAAAAAkAA2ZwAAAACQgFY3ZyGE60IIM0IIE1bIeocQHgwhTK78uk77LhNYNfQWuaGzyA2dRW7oLHIQYowrv0EIu0iaK+l3McbPVrKLJX0QY7wohHCmpHVijGe0erAQVn6whPXs2dNkc+bMMdlvf/tbk33jG98w2eGHH26ym2++uY2r6zhijIWmPdSrtzl3dsiQISZ7+OGHTeZ12/OnP/3JZCNGjHBvu+uuu5ps8ODBJvOGJrz33nuF1iNJH3/8scnmz59faD3jx48vfJxa5NZZ7/P05JNPmuyOO+4w2RFHHFHLoUvlb3/7m5vvuOOOJttpp51M9tRTT9V9TavgmRijnQ7wCal0tqPyhjUce+yxhe776KOPmmz33XevdUnN1NDOVu6XbW+PPvpokxUdYvTggw+a7LDDDjPZ7NmzC6/He008evToQvedNm2am3sDTlbl9UUjVHt90Oo7ZzHGxyV9cqzh/pJuqPz+BkkH1LQ6oM7oLXJDZ5EbOovc0FnkoK2j9PvEGFsqv39XUp9qNwwhjJQ0so3HAeqpUG/pLBJCZ5EbOovc8JoWSan555zFGOPK3tqNMY6SNErK+y1glMvKektnkSI6i9zQWeSG17RIQVunNU4PIfSVpMqvM+q3JKDd0Fvkhs4iN3QWuaGzSEpb3zkbI+koSRdVfr2rbitKVNELGz/66KNCtzvuuONMduutt7q3XbZsWaHHRKtK2dvNN9/czU8//XST9erVy2QzZ840WUtLi8luuOEGk82dO9c99r333lsoaw9rrrmmyb73ve+Z7Otf/3ojllOrhnd27733Npn3McW/9Oljvwtq4403Lnz/ahe0Z6qUz7PNtN5667m5N/zDe70wa9Ysk5133nm1L6w8StvZc889183POussk3kDAn/1q1+Z7OyzzzbZqgz/8Pzwhz9s832//e1vu3lqwz9WRZFR+jdLelLSFiGEqSGEb2h5gfcIIUyWNLzyZyAZ9Ba5obPIDZ1FbugsctDqO2cxRn9WtpT1zFWUG71FbugsckNnkRs6ixy09ZozAAAAAEAdsTkDAAAAgATUPEof/+6cc84x2ec//3mT7brrriYbPny4+5gPPPBAzetCOXTt2tVkl1xyiXtbb7DDnDlzTHbkkUeabNy4cSbLeSjEhhtu2OwlZGOLLbYodLsXX3yxnVeSD+/voDckRJJeeeUVk3l/L9ExDRw40GS33357TY95xRVXmOyRRx6p6TGRnh//+Mcm8wZ/SNLixYtNdv/995vsjDPOMNmCBQsKrWeNNdZw8z333NNk3tfoEILJvEE2d91Vmvkt/z/eOQMAAACABLA5AwAAAIAEsDkDAAAAgASwOQMAAACABDAQpM7mzZtnsuOOO85k48ePN9nVV1/tPqZ34a43sOGqq64ymfcT35Gvbbfd1mTe4I9q9t9/f5M99thjNa0JHdPTTz/d7CXUVc+ePU221157mezwww83mXeBezXnnnuuyWbNmlX4/ig3r3ODBw8ufP+HHnrIZL/85S9rWhPSs/baa5vsxBNPNFm114De8I8DDjigzevZbLPNTHbTTTe5t/WG5Hn+8Ic/mOziiy9etYVlinfOAAAAACABbM4AAAAAIAFszgAAAAAgAWzOAAAAACABDARpgNdee81kRx99tMmuv/569/5HHHFEoWyttdYy2e9+9zuTtbS0uMdB+i677DKThRDc23qDPso2/GO11ey/Ly1btqwJK+l4evfuXffH3GabbUzm9Xv48OEm69+/v8m6dOlisq9//evusb0uLViwwGRjx4412aJFi0y2+ur+l9dnnnnGzdHxeAMYLrroosL3/8tf/mKyo446ymQfffTRqi0MyfOe29Zbb73C9//2t79tsk996lMmO+aYY0y23377meyzn/2sybp37+4e2xtS4mW///3vTeYN3Ssj3jkDAAAAgASwOQMAAACABLA5AwAAAIAEsDkDAAAAgAS0ujkLIVwXQpgRQpiwQnZOCGFaCOG5yn97t+8ygeLoLHJEb5EbOovc0FnkoMi0xtGSrpT0ybF/P48xXlL3FXUQd955p8kmT57s3tab0Lf77rub7IILLjDZRhttZLLzzz/fPc60adPcPEOjVYLO7rvvviYbMmSIybwpR5I0ZsyYuq8pNd5kRu/j8dxzzzViObUarQR6600o9D6mv/nNb0x21lln1XTswYMHm8yb1rh06VKTzZ8/32QTJ0402XXXXecee9y4cSbzpptOnz7dZFOnTjXZmmuu6R7n5ZdfdvNMjVYCnc3BwIEDTXb77bfX9Jivv/66ybx+4t+MVgk6u3jxYpO99957Jlt//fXd+7/xxhsmq/Zaooh33nnHZLNnz3Zv27dvX5PNnDnTZHfffXeb15O7Vt85izE+LumDBqwFqAs6ixzRW+SGziI3dBY5qOWas5NDCM9X3iJep9qNQggjQwjjQgj2nyWBxqKzyFGrvaWzSAydRW54fYBktHVz9mtJm0oaIqlF0qXVbhhjHBVjHBpjHNrGYwH1QGeRo0K9pbNICJ1Fbnh9gKS0aXMWY5weY/w4xrhM0tWStq/vsoD6orPIEb1FbugsckNnkZoiA0GMEELfGGNL5Y8HSpqwstujmAkT/A/joYcearIvf/nLJrv++utNdvzxx5ts0KBB7nH22GOP1paYrRw76w0U6NKli8lmzJjh3v/WW2+t+5oaoWvXriY755xzCt//4YcfNtkPfvCDWpbUNM3o7YknnmiyKVOmmGynnXaq+7Hfeustk/3xj3802UsvvWSyp556qu7r8YwcOdJk3kX33rCGjiDH59pGOOOMM0zmDTRaFRdddFFN98dyOXZ21qxZJjvggANMds8997j37927t8lee+01k911110mGz16tMk++MBexnfLLbe4x/YGglS7bUfV6uYshHCzpGGS1gshTJX0E0nDQghDJEVJb0qyOwCgSegsckRvkRs6i9zQWeSg1c1ZjHGEE1/bDmsB6oLOIkf0Frmhs8gNnUUOapnWCAAAAACoEzZnAAAAAJCANg0EQWN5F37eeOONJrvmmmtMtvrq9lO8yy67uMcZNmyYyR599NHWF4imWrRokZu3tLS4eUq84R9nn322yU4//XT3/lOnTjXZpZfaKchz585tw+rwT//zP//T7CUkY/fddy90u9tvv72dV4JUDRkyxGR77rlnmx/PG8ogSZMmTWrzY6J8xo4dazJvWFF78F5X7rrrru5tvUE4HXWAUjW8cwYAAAAACWBzBgAAAAAJYHMGAAAAAAlgcwYAAAAACWAgSEIGDx7s5l/5yldMtt1225nMG/7hmThxops//vjjhe6PtIwZM6bZSyjEu0jeG/Rx2GGHmazaBfEHH3xw7QsD2sGdd97Z7CWgSR544AGTrbPOOoXu+9RTT5ns6KOPrnVJQLtac801TeYN/pCkGKPJbrnllrqvKWe8cwYAAAAACWBzBgAAAAAJYHMGAAAAAAlgcwYAAAAACWAgSANsscUWJjv55JNNdtBBB7n332CDDdp87I8//thkLS0t7m2rXbyJ5gghFMoOOOAA9/6nnHJK3ddU1He+8x2T/ehHPzJZr169THbTTTeZ7Mgjj6zPwgCgna277romK/r19Ve/+pXJ5s6dW/OagPZ0//33N3sJpcI7ZwAAAACQADZnAAAAAJAANmcAAAAAkIBWN2chhAEhhEdCCBNDCC+GEE6p5L1DCA+GECZXfi32ExaBdkZnkRs6ixzRW+SGziIHRd45WyrpezHGrSTtKOmkEMJWks6U9FCMcZCkhyp/BlJAZ5EbOosc0Vvkhs4iea1Oa4wxtkhqqfx+TgjhJUn9JO0vaVjlZjdIelTSGe2yykR5UxRHjBhhMm8y48CBA+u+nnHjxpns/PPPN9mYMWPqfuyUlKWzMcZCWbVpnpdffrnJrrvuOpO9//77Jttxxx1NdsQRR5hsm222cY/dv39/k7311lsm8yY8edPKyq4sne2ovCmqm2++uXvbp556qr2X0zD0Vrr++utNttpqbb9i5G9/+1sty0Er6Gz7+OIXv9jsJZTKKj2DhBAGStpW0lhJfSoll6R3JfWp68qAOqCzyA2dRY7oLXJDZ5Gqwj/nLITQXdLtkk6NMc5e8V8KY4wxhGD/SX/5/UZKGlnrQoFVRWeRGzqLHLWlt3QWzcRzLVJW6J2zEEJnLS/xTTHGOyrx9BBC38r/7ytphnffGOOoGOPQGOPQeiwYKILOIjd0Fjlqa2/pLJqF51qkrsi0xiDpWkkvxRgvW+F/jZF0VOX3R0m6q/7LA1YdnUVu6CxyRG+RGzqLHBT5tsYvSDpC0gshhOcq2VmSLpJ0WwjhG5KmSDq0fZbYWH36+N9mvNVWW5nsyiuvNNlnPvOZuq9p7NixJvvZz35msrvuss8ly5Ytq/t6MtChOtupUyc3P/HEE0128MEHm2z27NkmGzRoUE1r8i5qf+SRR0z24x//uKbjlEiH6mzZeIN6ahkKkZEO09shQ4a4+fDhw03mfd1dvHixya666iqTTZ8+vQ2rwyroMJ1tpE022aTZSyiVItMa/yLJjqJabvf6LgeoHZ1FbugsckRvkRs6ixx0iH/aAwAAAIDUsTkDAAAAgASwOQMAAACABBT+OWe56927t8l++9vfmqzaRb/1vtjRG5hw6aWXure9//77TbZgwYK6rgfpefLJJ0329NNPm2y77bYr/JgbbLCByaoNwfmk999/32S33HKLe9tTTjml8JqAMvrP//xPNx89enRjF4K6WHvttd3ce071TJs2zWSnnXZaTWsCUvHEE0+YrNpQpA46qG6V8M4ZAAAAACSAzRkAAAAAJIDNGQAAAAAkgM0ZAAAAACQg+4EgO+ywg8lOP/10k22//fYm69evX93XM3/+fJNdfvnlJrvgggtMNm/evLqvB/maOnWqyQ466CCTHX/88e79zz777DYf+5e//KXJfv3rX5vs1VdfbfMxgLIIodrPtAWA8pswYYLJJk+e7N7WG7C36aabmuy9996rfWGZ4p0zAAAAAEgAmzMAAAAASACbMwAAAABIAJszAAAAAEhA9gNBDjzwwEJZURMnTnTze+65x2RLly412aWXXmqyWbNmtXk9wIpaWlpMds4557i3rZYDaLs//elPJjvkkEOasBI00ssvv+zmf/vb30y28847t/dygOR5g+8k6ZprrjHZ+eefb7JvfetbJqv2Gr1seOcMAAAAABLA5gwAAAAAEsDmDAAAAAAS0OrmLIQwIITwSAhhYgjhxRDCKZX8nBDCtBDCc5X/9m7/5QKto7PIDZ1FbugsckRvkYMQY1z5DULoK6lvjHF8CKGHpGckHSDpUElzY4yXFD5YCCs/GNCKGGNo7TZ0Fimhs8jQMzHGoSu7AZ1FYlrtrERvG6lnz55uftttt5ls+PDhJrvjjjtMdswxx5hs3rx5bVhdGqq9Pmh1WmOMsUVSS+X3c0IIL0nqV9/lAfVDZ5EbOovc0FnkiN4iB6t0zVkIYaCkbSWNrUQnhxCeDyFcF0JYp85rA2pGZ5EbOovc0FnkiN4iVYU3ZyGE7pJul3RqjHG2pF9L2lTSEC3/Vwj7A76W329kCGFcCGFcHdYLFEZnkRs6i9zQWeSI3iJlhTZnIYTOWl7im2KMd0hSjHF6jPHjGOMySVdL2t67b4xxVIxxaJHvBQbqhc4iN3QWuaGzyBG9ReqKTGsMkq6V9FKM8bIV8r4r3OxASRPqvzxg1dFZ5IbOIjd0Fjmit8hBkWmNO0t6QtILkpZV4rMkjdDyt3+jpDclHV+50HJlj8VkG9Sk4OQ7Ootk0FlkqMi0RjqLlBSd1khvm8yb4nj++eeb7Jvf/KbJBg8ebLKJEyfWZ2FNUMu0xr9I8u58X62LAtoDnUVu6CxyQ2eRI3qLHKzStEYAAAAAQPtgcwYAAAAACWBzBgAAAAAJaHUgSF0PxsWTqFGR4Qr1RGdRKzqLDBUarlAvdBZ10NDOSvQWtav2+oB3zgAAAAAgAWzOAAAAACABbM4AAAAAIAFszgAAAAAgAa3+EOo6mylpSuX361X+XAZlOhcp3fPZqAnHpLN5SPV86Gz9lOlcpLTPp9G9LWtnpXKdT8rn0szn2pQ/Lm1RpvNJ+Vyqdrah0xr/7cAhjGv0ZJ32UqZzkcp3PvVSpo9Lmc5FKt/51EuZPi5lOhepfOdTL2X7uJTpfMp0LvVUto9Lmc4n13Ph2xoBAAAAIAFszgAAAAAgAc3cnI1q4rHrrUznIpXvfOqlTB+XMp2LVL7zqZcyfVzKdC5S+c6nXsr2cSnT+ZTpXOqpbB+XMp1PlufStGvOAAAAAAD/wrc1AgAAAEACGr45CyHsFUKYFEJ4NYRwZqOPX6sQwnUhhBkhhAkrZL1DCA+GECZXfl2nmWssKoQwIITwSAhhYgjhxRDCKZU8y/NpL3Q2HXS2GDqbDjpbXM69LVNnJXpbVM6dlcrV2zJ1tqGbsxBCJ0lXSfqSpK0kjQghbNXINdTBaEl7fSI7U9JDMcZBkh6q/DkHSyV9L8a4laQdJZ1U+Xzkej51R2eTQ2dbQWeTQ2cLKEFvR6s8nZXobatK0FmpXL0tTWcb/c7Z9pJejTG+HmNcLOkWSfs3eA01iTE+LumDT8T7S7qh8vsbJB3Q0EW1UYyxJcY4vvL7OZJektRPmZ5PO6GzCaGzhdDZhNDZwrLubZk6K9HbgrLurFSu3paps43enPWT9PYKf55ayXLXJ8bYUvn9u5L6NHMxbRFCGChpW0ljVYLzqSM6myg6WxWdTRSdXaky9rYUn2N6W1UZOyuV4HOce2cZCFJncfn4y6xGYIYQuku6XdKpMcbZK/6/HM8HqybHzzGd7dhy/BzT2Y4t188xve3Ycvwcl6Gzjd6cTZM0YIU/969kuZseQugrSZVfZzR5PYWFEDpreYlvijHeUYmzPZ92QGcTQ2dbRWcTQ2cLKWNvs/4c09tWlbGzUsaf47J0ttGbs6clDQohbBxC6CLpq5LGNHgN7WGMpKMqvz9K0l1NXEthIYQg6VpJL8UYL1vhf2V5Pu2EziaEzhZCZxNCZwsrY2+z/RzT20LK2Fkp089xqTobY2zof5L2lvSKpNck/bDRx6/D+m+W1CJpiZZ/f/E3JK2r5RNgJkv6P0m9m73Ogueys5a/vfu8pOcq/+2d6/m048eJzibyH50t/HGis4n8R2dX6WOVbW/L1NnK+dDbYh+nbDtbWX9pelumzobKCQEAAAAAmoiBIAAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkICaNmchhL1CCJNCCK+GEM6s16KA9kJnkSN6i9zQWeSGziIVIcbYtjuG0EnSK5L2kDRV0tOSRsQYJ67kPm07GFARYwxtvS+dRTPU0llp1XtLZ1EHM2OM67f1znQWTdDQzlbuQ29Rk2qvD2p552x7Sa/GGF+PMS6WdIuk/Wt4PKC90VnkiN6i0abUeH86i0ajsyiNWjZn/SS9vVC8aSoAACAASURBVMKfp1YyIFV0Fjmit8gNnUVu6CySsXp7HyCEMFLSyPY+DlAvdBa5obPIDZ1FjugtGqGWzdk0SQNW+HP/SvZvYoyjJI2S+P5cNB2dRY5a7S2dRWLoLHLD6wMko5Zva3xa0qAQwsYhhC6SvippTH2WBbQLOosc0Vvkhs4iN3QWyWjzO2cxxqUhhJMl3S+pk6TrYowv1m1lQJ3RWeSI3iI3dBa5obNISZtH6bfpYLwFjBrVOpZ8VdFZ1IrOIkPPxBiHNupgdBZ10NDOSvQWtWuPUfoAAAAAgDphcwYAAAAACWBzBgAAAAAJYHMGAAAAAAlgcwYAAAAACWBzBgAAAAAJYHMGAAAAAAlgcwYAAAAACVi92QsA0HGttpr996EY7c/19DIAAICy4Z0zAAAAAEgAmzMAAAAASACbMwAAAABIAJszAAAAAEgAA0GwUp06dTJZly5dTLZo0SKTLVu2rF3WhHSEENy8T58+Jvv+979vso033rjQY5522mnucV577TWTMTwEZVDt79Yn0XfUyxprrOHm66yzjslmzpxpsiVLltR9TUBHxDtnAAAAAJAANmcAAAAAkAA2ZwAAAACQgJquOQshvClpjqSPJS2NMQ6tx6KA9kRvkRs6i9zQWeSGziIV9RgI8l8xRntlaKaqXYTtXSjrDcvwLpz1BmOsv/76Jps1a1bhY3fv3t1km222mcn22Wcfk3360592j+MNZ5g3b57J3n33XZNdcMEFJnvqqadMtnDhQvfYTbiovVS9bZaePXu6+RVXXGGyPfbYw2Te36GPP/7YZFdeeaV7nK9+9asmq/b3qATobEZWW83/xpQNNtjAZDvssIPJnn/+eZNNmTLFZEuXLm3D6hqGzibA6+Ihhxxisuuvv969/4IFC0x29NFHm+yee+4xWYYDa+gsmo5vawQAAACABNS6OYuSHgghPBNCGFmPBQENQG+RGzqL3NBZ5IbOIgm1flvjzjHGaSGET0l6MITwcozx8RVvUCk4JUdKVtpbOosE0Vnkhs4iN7ymRRJqeucsxjit8usMSXdK2t65zagY41AurEQqWustnUVq6CxyQ2eRG17TIhVtfucshLCWpNVijHMqv99T0n/XbWV15l0Q261bN5Ntuumm7v33228/k332s5812UYbbWSytdZay2Rrr722yVZf3f90eENKvEEK3pCQzp07m2zJkiXucTyLFi0ymbfOgQMHmuzJJ580WbMvDs6ttylZd911TfaLX/zCva03/GPNNdc0mdcH7+/q1ltv7R7HGwhy9dVXm8wbMpKLRnfWe76pNijpk7zPZ7P/zteb97Ho0qWLyfr27eve/5JLLjGZ93Xj7LPPNtkbb7xRZIlNx/Ns83j97NOnj8kuvvhik3nP0ZI0d+5ck73++uttWF266Gz9de3a1c2916Ve5r3+9FS7Xc5f92v5tsY+ku6sPBGsLun/xRj/XJdVAe2H3iI3dBa5obPIDZ1FMtq8OYsxvi5pmzquBWh39Ba5obPIDZ1FbugsUsIofQAAAABIAJszAAAAAEhAraP0s+EN0Nh8881NduKJJ7r3HzZsmMm8oR7eBbXLli0rsMLqA0G8+3sXOnoXAi9YsMBks2fPdo/jXfT7yiuvmOyuu+4y2X333WeyohdzIj3rrbeeye68806TDR3qD6zyhnp4XZwzZ47JvN5Uu1D9a1/7msn+/Gd7mcCbb77p3h+WdxG391zpXcA9ZcoUk82aNctkS5cubePqGsvrcc+ePU3mDWPabrvt3Mf0hk55Q6M8ZRuu0pF4X59r/Xx6j9mjRw+TnXHGGSYbMGBA4eO8/PLLJvOG09DPjsF7Dvz6179ushNOOMG9v/da0+uy9zrX67f3NV+SzjvvvELHThHvnAEAAABAAticAQAAAEAC2JwBAAAAQALYnAEAAABAAjrMQBDvwu5u3bqZzLvYUPIvfvce07vQ3bsA0bvvvHnz3GO/9dZbJvOGK3jH9m732GOPucd54IEHTPbOO++YzBvYUHToCZrLu+h2yy23NNk999xjMu8Ccu/xJH/ozLhx40z2/PPPm2yjjTYy2Wc/+1n3OFtvvbXJzj33XJONHDnSZN7fDfjDVw455BCTeQORrr32WpNVG0CUA+95zXs+9y5c9563JamlpcVkm222mcl69eplsvYYKoHGaI/PkzfobJ999jHZUUcdZTKvSwsXLnSP861vfctk8+fPL7JEZMTrxC677GKyq6++2mR9+/Y1WbXBT95zo9c9r98bbrihyQ466CD3OP/4xz9M9r//+7+F19lMvHMGAAAAAAlgcwYAAAAACWBzBgAAAAAJYHMGAAAAAAlgcwYAAAAACegw0xq9CYMvvPCCyS688EL3/qeeeqrJtthiC5Otv/76Jps1a5bJvElx3jQ7Sfr73/9uMm+K4pQpU0y2ePFik73//vvucbyPEcpl++23N9mf/vQnk/Xs2dNk3kS6GTNmuMe58cYbTXbfffeZzOvxzjvvbLJ11lnHPU7//v1N9sUvftFkw4YNM9mf//xnkzH5Ttp8881N5k2A++ijj0y21lprtcuaUuJNcFyyZInJqnWpS5cuhW77yiuvFH5MlJs33VmSPvOZz5js4osvNpk3WdXr0qhRo9zjeK+VkDevU6effrrJzjrrrEKP9+KLL5rshhtucG/rTWvcaqutTHbooYearGvXribr3r27exzva1m1CdOp4Z0zAAAAAEgAmzMAAAAASACbMwAAAABIQKubsxDCdSGEGSGECStkvUMID4YQJld+9S8IAZqE3iI3dBa5obPIDZ1FDooMBBkt6UpJv1shO1PSQzHGi0IIZ1b+fEb9l9e+vAvavYsaJemKK64w2ciRI0225ZZbmuxTn/qUybyLcd9991332M8++6zJpk2bZrJ58+aZzLt4vYMYrZL2tqjBgweb7N577zWZd7G4N0jGuyj8sssuc499zz33mKzo0ARvAM5uu+3mHscbcNKrVy+T7bvvviZ74IEHTOYNPWmg0UqgswceeKDJBg0aZLKWlhaTbbzxxiZ75pln6rOwhHXr1s1ke+21l3vbTTbZxGSvvfaayd58802TJTgQZLQS6GzZeUMQJOnss882WZ8+fQo9pvda5wc/+IF725K9jhitDtTZasNkTjvtNJOdc845JuvUqZPJvNcRxx9/vMm819iSP3DM+xqz2WabmWz11e22Zfr06e5xvHV6r0NS1Oo7ZzHGxyV98Il4f0n/HMNyg6QD6rwuoCb0Frmhs8gNnUVu6Cxy0NZrzvrEGP/5z6bvSir2TzVAc9Fb5IbOIjd0Frmhs0hKzT/nLMYYQwhVv9cihDBSkv3+P6CJVtZbOosU0Vnkhs4iN7ymRQra+s7Z9BBCX0mq/Or/JFpJMcZRMcahMcahbTwWUC+FektnkRA6i9zQWeSG17RISlvfORsj6ShJF1V+vatuK2qyahcLFh2GcNRRRxXKvIsaq/2U89mzZ5ts/vz5JivZRbvtoZS9HTBggJs/9thjJvOGf8ydO9dkv//970121llnmazaBb9FuxhCMJk37OaJJ55w77/ffvuZbI011jDZWmutZTLvQukmDwTxtFtnvY+9JM2cOdNkCxYsMJn3HOR9/KpdkJ4r7wL5XXfd1WRHHnmke3+vi/fdd5/JvL+XmSjl82yjeP06/PDD3dvus88+JuvcubPJvC7tueeeJvP+TncQpeis97m/5ppr3NuOGDGi0GPefPPNJjv55JNN5nWs2teY9ddf32Tf/OY3TeYNDvEG3/33f/+3e5zx48e7eQ6KjNK/WdKTkrYIIUwNIXxDywu8RwhhsqThlT8DyaC3yA2dRW7oLHJDZ5GDVt85izFW217vXue1AHVDb5EbOovc0Fnkhs4iB+X6fhMAAAAAyBSbMwAAAABIQM2j9DuKxYsXm2zSpEkmu/LKK0223Xbbmaxr164mq3YxrneRp3fRsDeEIcaqE2GRoS5duphs1KhR7m179eplMq/H559/vsl+/vOfm2zRokVFlrhKvH56fw+efPJJ9/7esJxu3bqZrH///iYr26CKVVXt/F955RWTvfjiiyZ79tlnTeZdgO11LkXexete1qNHD5PttttuJuvXr597nDfffNNkDz74oMkY8FR+Xr+22GILk5122mnu/b3nOm8ozwUXXGCylpYWkyEfXnfOO+88kx1yyCHu/b2v57/4xS9MduGFF5rMGxDl8fopST/5yU9Mtummm5rMe31w6623mmzMmDHucXJ+Du3Yr04AAAAAIBFszgAAAAAgAWzOAAAAACABbM4AAAAAIAEMBKmzd955x2THHnusybzBIQMHDnQf8ytf+Uqh49x7770m8wYmIF9bbrmlyb7whS8Uvv+4ceNMdsUVV5isPYZ/FOVdxDtr1iz3tgsXLiz0mN79c75YuB68C8olf+iMN2Sgd+/eJuvevbvJig4vkmobYLTGGmuYbOuttzZZtYvZ1113XZO99dZbJvOGywwbNsxk1c7x0UcfNdkbb7xhMoY5lZ83XMYb0FTttYFn2rRpJrv88stXaV1I34ABA0x25JFHmqza4Kenn37aZNdee63JlixZYjLvOX3ttdc2mTdYTJIOPPDAQo/pPf9+97vfNZn39Sl3vHMGAAAAAAlgcwYAAAAACWBzBgAAAAAJYHMGAAAAAAlgIEgDvP322ya77bbbTHbSSSe59/cunuzcubPJnnjiCZMdd9xxJps+fbp7HKTFG9iw9957m8wbhCBJM2fONNk3v/lNk82bN68Nq2uspUuXurk33MG7sPj99983WUcfCLLWWmu5+Y477mgy7+Jz7znogAMOMJk3QMP7fEhSt27dTLbBBhuYrG/fvoWOvdlmm5nszTffdI89f/58k/3f//2fybxBKJ/+9KdNVm3wyB//+EeTeYNtGAhSLt7z+T777GOyXXbZxWSrr+6/VPM6+/3vf99kOTzHozqvO9tuu63JvIFM1Z5HvOcxb3jd3//+d5N5wz+++MUvmmy//fZzj+2dz+LFi012wgknmGzOnDnuY5YN75wBAAAAQALYnAEAAABAAticAQAAAEAC2JwBAAAAQAJa3ZyFEK4LIcwIIUxYITsnhDAthPBc5T87pQBoEjqLHNFb5IbOIjd0FjkoMq1xtKQrJf3uE/nPY4yX1H1FHcSrr75qsmoT6bxpfN60G2/K089//nOTffe733WPM2PGDJNlOtFutErQ2R49epjssMMOM1m1z5E3Fe7FF1+sfWHtzJtM5k2mkqRNNtnEZEuWLDHZo48+arIEuz1adertJydWes8XG220kXvfDz/80GTPPPOMyT7++GOT7bDDDibzJj163Zb8z4k3cdGb7NW1a1eTvfHGGyYbO3ase2xvAtnGG29ssmHDhpnMm5I2ceJE9zj/+Mc/TJZgF4sarRI81zaCN3X0vPPOM1nPnj1NVm3invf30nvex78ZrRJ09r333jPZu+++azLveU3ye3bMMceYbMSIESbzJpBvuOGGJqvWW++17rPPPmuyhx56yL1/R9DqO2cxxsclfdCAtQB1QWeRI3qL3NBZ5IbOIge1XHN2cgjh+cpbxOtUu1EIYWQIYVwIYVwNxwLqgc4iR632ls4iMXQWueH1AZLR1s3ZryVtKmmIpBZJl1a7YYxxVIxxaIxxaBuPBdQDnUWOCvWWziIhdBa54fUBktKmzVmMcXqM8eMY4zJJV0vavr7LAuqLziJH9Ba5obPIDZ1FaooMBDFCCH1jjC2VPx4oacLKbg9r8uTJJjv22GPd237pS18y2W677WYy72L84cOHm+y+++5zj3P00UebbMIE+6nN8eL11DvrDWzwPp/egAJvAIYkjR492mQ5fO769etnslGjRrm3XWuttUzmDWK45557TFbtYuWU1Ku3nTt3Nlm1AUTe33nv/v379zfZ7NmzTeYNwHjllVfcY0+bNs1k3oXvc+bMMZn398AbWlLtvL2hHoceeqjJvvKVr5jskwNYJH+wiiR99NFHJsuhi0Wl/lzbCN5Qo2uvvdZk3hAFz5QpU9z8+OOPN9miRYsKPSb+JfXOes8P3vPqqaeearL999/ffcxBgwaZzBsS4j1fLliwwGRTp041We/evd1je693vOE23uCnjqLVzVkI4WZJwyStF0KYKuknkoaFEIZIipLelGSfIYAmobPIEb1FbugsckNnkYNWN2cxRjtHU7L/BAQkgs4iR/QWuaGzyA2dRQ5qmdYIAAAAAKgTNmcAAAAAkIA2DQRB7bwL5+fPn+/e9t577zWZN9jhc5/7nMl69Ohhsk022cQ9zte+9jWT/eQnPzEZFxzXn3eB7NChdlLvaqvZf0/xBiZI0ksvvWSyZg4e8M7R6+Ldd99tMm9IiOT/PTrooINM5g2QKLNPPj8sXLjQ3GbSpEnufT/4wP581l69erV6DEl6/PHHTeY9f3lDMaTqwzoawevS/fffb7IuXbqYzBsIUq1z3pASlMuXv/xlk+26664m83rjDVv4zW9+4x7n1VdfbcPqUAbz5s0zmTfs7cEHH3Tvv/baa5tsjTXWMNmaa65pMq+3Xr//4z/+wz22NzBn7Nix7m07Kt45AwAAAIAEsDkDAAAAgASwOQMAAACABLA5AwAAAIAEMBCkAbxBCF7WvXt39/6HH364yQ455BCTbbDBBibzLtr3LviXpBdeeMFkzRwg0dF5H3vvQty5c+e69/c61ijeOnfccUeTecM/vAuVvYvkJemkk04y2WuvvVZkiaVW5O9ttcEUM2bMMNkdd9xhMm9IiPfc4g0Q8p6XUtTS0mKyJUuWmKxz584mmzBhgvuYzRx6gvrzBiZceOGFJvOGLXimTp1qshtvvNG9LcNlsCLveX/x4sXubb3neY/3OsL7+u79PfAGf1S7vzfgpCPjnTMAAAAASACbMwAAAABIAJszAAAAAEgAmzMAAAAASAADQerMuwDSu1By8803N9mPfvQj9zF32mknk/Xo0cNk3oX3EydONNkVV1zhHueee+4xGRevN4Z30W2/fv1Mttpq9t9TvGEEkjRgwACTecNDvAEHHu8i3t69e7u3Pe+880x2xBFHmMy7SH7+/PkmO/74493j3HLLLSZjiE1tvGEd3sXa3ucpl0EfRRX9++Z17o033nAfk36Wy6abbmqyvn37FrqvN+joqquuMlnR4Q1AvXnPV97rwrfffttk3vNnNQsXLly1hZUc75wBAAAAQALYnAEAAABAAticAQAAAEACWt2chRAGhBAeCSFMDCG8GEI4pZL3DiE8GEKYXPl1nfZfLtA6Oovc0FnkiN4iN3QWOSjyztlSSd+LMW4laUdJJ4UQtpJ0pqSHYoyDJD1U+TOQAjqL3NBZ5IjeIjd0FslrdVpjjLFFUkvl93NCCC9J6idpf0nDKje7QdKjks5ol1UmwJum502a22abbUz2ne98x2R77bWXybp161b42N6Up3Hjxpns2GOPNdmUKVPc45RliliOnfWmGs2ePdtk3jS8tdde233MAw44wGTdu3c32UcffWSyXr16meyYY44x2X777ecee7311jOZ1y9vCpn3mH//+9/d49DZhq2vUFY266+/vsm6dOliMu85uk+fPu5jerfN9WOZem/ryfu8SdIee+xhsmoTdD/J+1rsTaAt2xTUZupInW0kb1p4tWnfXv7hhx/WfU05W6VrzkIIAyVtK2mspD6VkkvSu5L8r0RAE9FZ5IbOIkf0Frmhs0hV4Z9zFkLoLul2SafGGGev+K9IMcYYQnD/6S+EMFLSyFoXCqwqOovc0FnkqC29pbNoJp5rkbJC75yFEDpreYlvijHeUYmnhxD6Vv5/X0nuT0mMMY6KMQ6NMQ6tx4KBIugsckNnkaO29pbOoll4rkXqikxrDJKulfRSjPGyFf7XGElHVX5/lKS76r88YNXRWeSGziJH9Ba5obPIQZFva/yCpCMkvRBCeK6SnSXpIkm3hRC+IWmKpEPbZ4kr5w1S6Nmzp8m8oQneRYneRY2StM46dqrqgQceaLLjjjvOZAMGDDCZd1F5tYvCp06darKRI+276o8++qjJFi5c6D5mySXdWc+SJUtMduedd5rshBNOMJk3vEOSjj76aJMddthhJvN65w1C8IZ8VLtI3htYc8kll5jsggsuMBmdzaOzHcHqq9svkd7XDe/r0DvvvOM+pnfbjAc+dJjeel+zJenQQ+2pebf1PsdPPvmkyT744IM2rA6roMN0tpGqDf8oyhuw15EVmdb4F0n+KzBp9/ouB6gdnUVu6CxyRG+RGzqLHKzStEYAAAAAQPtgcwYAAAAACWBzBgAAAAAJKPxzzlLQu3dvkw0fPtxkI0aMMNncuXMLHWOzzTZz84EDB5rMGzLiXQjsXSj53nvvmcwbACFJp59+uslmz57t3hbl8cILL5hs0qRJJhs61J/o6w2i8YYReD7++GOTeUM+Hn/8cff+3mAcb7ANkLJqA2+K+NznPufmt956q8lqvZge7c97PpWkrbbaymReb7znzzvuuMNk3nMvkDqv397rXEnq0aOHyb70pS+ZzHt9kfHwpFXCO2cAAAAAkAA2ZwAAAACQADZnAAAAAJAANmcAAAAAkICGDwT55IWyMUZzm06dOrn39S68PeGEE0zmXYi9ZMkSk3Xu3Nlk3bt3d4/tDVLw1v7++++b7Kc//anJRo8ebbL58+e7x/aOg/JbtGiRyfbbbz+T3XTTTe79qw0KKXKcu+++22Q//vGPTTZjxgz3MeksymDevHkm8/6+eF9LttxyS/cxN9hgA5NNmTLFZAyGaB5voEe151PvtYH3uZs5c6bJnn32WZPx3Imy8AaYSVL//v1Nts0225isW7duJis63C93vHMGAAAAAAlgcwYAAAAACWBzBgAAAAAJYHMGAAAAAAlo+ECQIhe7VvsJ4BMmTDDZY489ZjLvguuFCxeazLvod/XV/Q/J888/b7ILL7zQZBMnTjRZR/mJ5mh/3kXle++9t3tbb7jNggULTOZdvM4wAkD66KOPTPbXv/7VZAMHDjTZm2++6T6mN1CEIRBp8T4f48ePd2/r9WHdddc12S9+8QuTec/nQI6817mTJ092bzts2DCTbbTRRib78pe/bLI77rjDZN5zau545wwAAAAAEsDmDAAAAAASwOYMAAAAABLQ6uYshDAghPBICGFiCOHFEMIplfycEMK0EMJzlf/8C1+ABqOzyA2dRW7oLHJEb5GD0NqFyCGEvpL6xhjHhxB6SHpG0gGSDpU0N8Z4SeGDhVD3q547depUKFuyZElNx+GC7TTEGO0Ul09IvbPoWOhsuXz60582mTd8Z+nSpe793377bZPV+vWpHTwTYxy6shvQ2eW6dOliMm84gpd5ryt4rdFmrXZWoreNtO+++7r5r3/9a5N5Q8h+85vfmOzqq6822YcffugeJ4dhfNVeH7Q6rTHG2CKppfL7OSGElyT1q+/ygPqhs8gNnUVu6CxyRG+Rg1W65iyEMFDStpLGVqKTQwjPhxCuCyGsU+e1ATWjs8gNnUVu6CxyRG+RqsKbsxBCd0m3Szo1xjhb0q8lbSppiJb/K8SlVe43MoQwLoQwrg7rBQqjs8gNnUVu6CxyRG+RskKbsxBCZy0v8U0xxjskKcY4Pcb4cYxxmaSrJW3v3TfGOCrGOLTI9wID9UJnkRs6i9zQWeSI3iJ1RaY1BknXSnopxnjZCnnfFW52oKQJ9V8esOroLHJDZ5EbOosc0VvkoMi0xp0lPSHpBUn/HH1ylqQRWv72b5T0pqTjKxdaruyxmGyDmhScfEdnkQw6iwwVmdZIZ5GSotMa6W2D9OjRw80PPvhgky1atMhkf/3rX002bdo0k3mTHnNRy7TGv0jy7nxfrYsC2gOdRW7oLHJDZ5EjeoscrNK0RgAAAABA+2BzBgAAAAAJYHMGAAAAAAlo9ZozAAAAAChq7ty5bv6HP/zBZN5wwsWLF5ts2bJlJisj3jkDAAAAgASwOQMAAACABLA5AwAAAIAEsDkDAAAAgAQ0eiDITElTKr9fr/LnMijTuUjpns9GTTgmnc1DqudDZ+unTOcipX0+je5tWTsrlet8Uj6XZj7XpvxxaYu6nI835EOqPiiknaT8uana2VDtg9feQgjjYoxDm3LwOivTuUjlO596KdPHpUznIpXvfOqlTB+XMp2LVL7zqZeyfVzKdD5lOpd6KtvHpUznk+u58G2NAAAAAJAANmcAAAAAkIBmbs5GNfHY9Vamc5HKdz71UqaPS5nORSrf+dRLmT4uZToXqXznUy9l+7iU6XzKdC71VLaPS5nOJ8tzado1ZwAAAACAf+HbGgEAAAAgAQ3fnIUQ9gohTAohvBpCOLPRx69VCOG6EMKMEMKEFbLeIYQHQwiTK7+u08w1FhVCGBBCeCSEMDGE8GII4ZRKnuX5tBc6mw46WwydTQedLS7n3papsxK9LSrnzkrl6m2ZOtvQzVkIoZOkqyR9SdJWkkaEELZq5BrqYLSkvT6RnSnpoRjjIEkPVf6cg6WSvhdj3ErSjpJOqnw+cj2fuqOzyaGzraCzyaGzBZSgt6NVns5K9LZVJeisVK7elqazjX7nbHtJr8YYX48xLpZ0i6T9G7yGmsQYH5f0wSfi/SXdUPn9DZIOaOii2ijG2BJjHF/5/RxJL0nqp0zPp53Q2YTQ2ULobELobGFZ97ZMnZXobUFZd1YqV2/L1NlGb876SXp7hT9PrWS56xNjbKn8/l1JfZq5mLYIIQyUtK2ksSrB+dQRnU0Una2KziaKzq5UGXtbis8xva2qjJ2VSvA5zr2zDASps7h8/GVWIzBDCN0l3S7p1Bjj7BX/X47ng1WT4+eYznZsOX6O6WzHluvnmN52bDl+jsvQ2UZvzqZJGrDCn/tXstxNDyH0laTKrzOavJ7CQgidtbzEN8UY76jE2Z5PO6CziaGzraKziaGzhZSxhOp3aQAAAR5JREFUt1l/jultq8rYWSnjz3FZOtvozdnTkgaFEDYOIXSR9FVJYxq8hvYwRtJRld8fJemuJq6lsBBCkHStpJdijJet8L+yPJ92QmcTQmcLobMJobOFlbG32X6O6W0hZeyslOnnuFSdjTE29D9Je0t6RdJrkn7Y6OPXYf03S2qRtETLv7/4G5LW1fIJMJMl/Z+k3s1eZ8Fz2VnL3959XtJzlf/2zvV82vHjRGcT+Y/OFv440dlE/qOzq/Sxyra3Zeps5XzobbGPU7adray/NL0tU2dD5YQAAAAAAE3EQBAAAAAASACbMwAAAABIAJszAAAAAEgAmzMAAAAASACbMwAAAABIAJszAAAAAEgAmzMAAAAASACbMwAAAABIwP8HCczmNqVZfwUAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] @@ -178,12 +173,12 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 11, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAACzCAYAAAD48u9xAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAH65JREFUeJzt3W+MXNWZ5/Hf0+3+43bbYLuxcRhnbcAJAgIhsQBlE5LNLAsbJWFGCpsh0oZIIxlFgwgKLwbNvtjRvhqtZjJvdjMRGwgZ5M0oJDNKMqBljWEJCIRsSAIGu92OgzGOwcZg2m2723/67AtXUNvnufStqlvV59z+fiTL7ce36p5b9auqe7rqPGUhBAEAAAAA0tEz1wMAAAAAAJyNiRoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJIaJGgAAAAAkhokaAAAAACSGiRoAAAAAJKatiZqZ3Wxmo2a2y8zurWpQQKeQWeSI3CI3ZBa5IbNIkYUQWrugWa+knZJulPSGpC2SbgshvPoBlwlm1tL+gBCCQggtB6iVzPb29obe3t5Wd5mcso+/stv19Pi/6/GeV7zbcXp6uuVaDk6fPq3Tp0+39aTXbG57e3tDX19fO7vEPDc1NfV2COGCVi/fbGbNLBQ9lwBlTE9PdzWzjctwTouWlT2nXdDGPq6VtCuEsFuSzOyfJN0i6YNCrcHBwTZ2iflscnKy3atoOrO9vb268MIL291vMrzJkneC5NW8yw4MDLj7OXXqVFRbsmRJVDt+/HhUO3r0aFQruu9Tf5F88803q7iapnLb19enNWvWVLFfzFOjo6N72ryKpjLb09OjoaGhNneJ+WxiYqKrmW1sU/gaCMxmamqq1Hbt/ArrIkl7Z/z7jUYNSBWZRY7ILXJDZpEbMosktfOOWilmtkHShsbPnd4d0LaZma3Txx5RXzMzu2BBx5/WgbZxboAczcwt0A3tvKO2T9LqGf/+o0btLCGE+0II60MI69vYF1CFpjPLugkkYNbczswsv1xAAprKLBM1JKDp8wNyi25o51evWyStM7O1OhPmP5P0tUpGBXTGvM+s925L2fVk3nb9/f2l93348OFS1+mtR5vnL4jzJrdl7+dWm2Cha+ZNZlEbZBZJanmiFkI4ZWZ3SnpMUq+kB0IIr1Q2MqBiZBY5IrfIDZlFbsgsUtXWYoYQwqOSHq1oLEDHkVnkiNwiN2QWuSGzSBELcAAAAAAgMUzUAAAAACAx9HEGEuB1l+zr6yt1Wa8hhySdPn06qk1PT0c1r8HIxMREVPO6CZ44ccLdt1f3xuOZ541Dase7P4uagXjNabx8elnyvnjW23fR48XLvLcfumoCALqFd9QAAAAAIDFM1AAAAAAgMUzUAAAAACAxTNQAAAAAIDFM1AAAAAAgMXR97AKv81gzndA8zWyL1plZ1JGxmdve6+Y4ODgY1VauXBnVpqamoprXme748ePuvr1xel31vC543hi97ntFHfSKukFi/mnm8eJ1Ol23bl1UW7x4cVQbGRmJah/5yEei2q5du9x9v/jii1Ht9ddfj2pFmQc+SNlutt5rhtetV/K7knqX9x6DdNcF8sA7agAAAACQGCZqAAAAAJAYJmoAAAAAkBgmagAAAACQGJqJlOQtvPUW7ZZdoOstDm63QUhvb29Ua3dhMuL7dGBgINpmyZIl7mWXLVsW1W688cao9uUvfzmqPfnkk1Ht+9//flSbnJx09+3lwWvyUTbbzfD2Tb4wm6VLl0a1b3/721Ht0KFDUW3Lli1R7Rvf+EZUe/nll91979ixo9R4Dh486F4e85P3euo1nPGeU73XEm+7oufjoaGhqOY1ffJqXhOqoteSonMG4A/KNrGRyjfBwRncMgAAAACQGCZqAAAAAJAYJmoAAAAAkJi21qiZ2WuSjkg6LelUCGF9FYMCOoncIjdkFrkhs8gNmUWKqmgm8u9CCG9XcD1J8xoueA0TvEW3ZRdZFi3YLbvIsq+vL6qVXdQszbtmD6Vyu2DBAo2MjJxVu+2226LtvvjFL7qXX758eVQbHh6Oat6i8qeeeiqqebnp7+93933s2LGoVnZR+Pj4eFRrpkEIi887ovbPteedd15UO3z4cFTbunVrVNu8eXNU27ZtW1R75JFH3H17jRQuvfRSd1uUllRmy74Wl329l6Spqamodv7550c1r7GUl/dPfOITUe3KK690971q1aqoduDAgajmnRs89NBDUW3nzp3ufo4ePRrVany+kFRm51pR7s/lZcx7HEh+c5s9e/ZEtXYa9tUJH30EAAAAgMS0O1ELkv6vmb1gZhuqGBDQBeQWuSGzyA2ZRW7ILJLT7kcfPx1C2GdmKyRtMrMdIYRfztygEfYNjZ/b3B1QiQ/M7czMFn2sEOiy0pn1PlYCzIHSmeXcAIlo6pwW6Ia23lELIexr/H1A0r9IutbZ5r4QwnoWZSIVs+V2ZmY56UUKmsls2TUFQCc1k1kmakhBs+e05Bbd0PJZqJktktQTQjjS+Pk/SPpvlY0sA2UX0548eTKqec0W2n3QnzhxotR2Rd8WPx80m9sQQnS7eg1a9u3b517+pZdeimpPPPFEVPMmhC+++GJUO3ToUKnLSn7Gyt73XrZrvHg8afPpuXbHjh1R7a677opqXsMEb4L6m9/8Jqp5jREkacWKFWWGiBJSzaz3/Fe24daaNWvc61y9enVUu+qqq6Ka93y8f//+qOY1ZShqzuSN/bLLLotq119/fVTzzku+973vufvxxln0OMpVqpntlHaa3C1cuDCqXXzxxVHtzjvvdPftXf6ee+6Jat75znw8f23n7YKVkv6lMblYIOl/hxD+TyWjAjqH3CI3ZBa5IbPIDZlFklqeqIUQdku6usKxAB1HbpEbMovckFnkhswiVbTnBwAAAIDEMFEDAAAAgMTQ0q4NZRc1ett1oltQ0YJjtO7kyZNR44Jf/OIX0XYbN250L+8tuD5+/HhU874GYHJyMqoNDAxEtaImMvNx0S3y5jVS8Jp8eLXDhw9Htampqai2cuXKFkeH3HnNNy655JKoduutt0a16667zr3OxYsXR7Vnn302qm3evDmqvfLKK1HNOzfwGj9I/mu+99j40pe+FNVefvnlqHbs2DF3P6gf77lxaGgoqn34wx+Oat5j5pvf/GZUu/nmm919e02jvHMg73zHOy+qO95RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDE0EykJBozzE9mFjU42LNnT7Sd1yDkD5c/18KFC6OatyjcW1xLwxjUWW9vb1T71Kc+FdVGR0ejmtdUx1swj/nBy9KSJUui2t133x3VvvrVr0a1hx9+2N2PV//Vr34V1bzXCK+5iXeu4W0n+cfoNVt46KGHoprX6KqoMVVRHekralznnYd89KMfjWpXXHFFVBseHo5q4+PjUW3r1q3uvp9++umo9tZbb0U1L/edaMSXOt5RAwAAAIDEMFEDAAAAgMQwUQMAAACAxDBRAwAAAIDEMFEDAAAAgMTQ9bGkOnV9bLdrTp1ui6p4HZSKcPvlpZnHS0/P2b/7mo8dqtqxZs2aUtstXrw4qr377rsVjwY58zrTebn58Y9/HNU2btwY1Xbs2OHup2w3R4/3/ODVvA7ARbyuj173U6+DMK9NefPuv8HBQXfbkZGRqHbppZdGtYmJiaj2/PPPR7Wf/OQnpcYj+c/VJ0+ejGoDAwPu5ecb3lEDAAAAgMQwUQMAAACAxDBRAwAAAIDEzDpRM7MHzOyAmW2bUVtmZpvMbKzx99LODhNoDrlFbsgsckNmkRsyi9zYbItHzewGSROS/jGEcGWj9t8lvRNC+Bszu1fS0hDCX862s56enlC0sBHdk2szkcnJSU1PT5cafFW57e/vDxdeeGG7Q0fGih4v3nPZ0qVnv75v27ZNExMTXc3s4OBgKNuUIzXebfqhD30oqu3ZsyeqnT59uiNjmo9GR0dfCCGsn227qjLb29sbhoaGqhj6+85t7NMYm7fvUjWv2YHk567q10hvPJJ/jN54vMYhdTMxMdHVzEpnzmlTanjh5aEZixYtimpenrzHgpfRotx59bINeOpkamqq1DntrPdqCOGXkt45p3yLpB82fv6hpD9peoRAB5Fb5IbMIjdkFrkhs8hNq9PvlSGE/Y2f35S0sqLxAJ1EbpEbMovckFnkhswiWW1/j1oIIZhZ4fv8ZrZB0obGz+3uDqjEB+V2ZmaLPnICdFvZzC5YwNdjIg1lM8u5AVLRzDkt0A2tvqP2lpmtkqTG3weKNgwh3BdCWF/ms8NAh5XK7czMtvuZb6BNTWeWXy5gjjWdWSZqmGMtndOSW3RDq796/bmk2yX9TePvn1U2IqBzyC3eV7bZwHnnnedeftmyZVHt3MYzY2NjLY7uffMqs5OTk1Ft9+7dczAStCGJzJZtoJFaExrveamoCVu7TR3wviQy2y7vfi66773XuiNHjrS876JmO2hfmfb8P5L0nKSPmtkbZvbnOhPmG81sTNK/b/wbSAa5RW7ILHJDZpEbMovczPqOWgjhtoL/+uOKxwJUhtwiN2QWuSGzyA2ZRW5YgAMAAAAAiWGiBgAAAACJoY9zjXiLQ71aUVc4r+4tEE1t8TV83v05MDAQ1byMeAuQvUYPIRR2MU6ed4xLly6NaiMjI+7lh4eHo9rbb7991r9ZYI0q9fX1RTXv6xiKGgicOHEiquX8GK6jRYsWRbWhoSF321OnTkW19957r/IxIV85d672Xj+956uir6TxzoFyfL7L9x4EAAAAgJpiogYAAAAAiWGiBgAAAACJYaIGAAAAAImhmUiNeIskvYXJF110kXv5cxshSNL4+HhUo5nI3CnbIETyG2Ocd955pfZz8ODBqOYtSvYajEh5ZMRruLB69eqotnbtWvfyY2NjUe13v/vdWf/2mjcArVqyZElU85oBFT3+vOYTOTxW68B77vaaIKxbty6qFTWEOHz4cFQ7fvx4VPPud8xvXh69nPX390e1snnynpsk/7zBeywMDg5GtWXLlkW1Y8eOufvxHgtTU1PutinjHTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAzNRDLgLfr0GiF4CzyvvvrqqHbzzTe7+3nwwQej2rvvvltihOgWb8Ht4sWL3W2Hh4ej2nXXXVdqu+3bt0e1559/PqoVNTIpWtybEq+JwooVK6Kat3hZ8hcln7t42XucAq3ymtN4DQCKmtiQx7njPd+cf/75Uc1rJlJ0vz333HNRzWsqBpzLO1+85pprotrHPvaxqPbOO+9EtXMbaUnFjYoOHDgQ1bzGI97r8de//vWo9uijj7r72bFjR1SjmQgAAAAAoG1M1AAAAAAgMUzUAAAAACAxTNQAAAAAIDGzTtTM7AEzO2Bm22bU/trM9pnZrxt/vtDZYQLlkVnkiNwiN2QWuSGzyE2Zro8PSvofkv7xnPrfhxD+tvIRIVK2w6PX6etrX/taVPvtb3/r7mdiYqLUvjPwoGqa2ZMnT0a1os6chw8fjmpjY2NRzevc6HVG8jLndSTNhXfcXlfNvXv3upf3Ole1+Xh5UDXNLarhPUd7j8Gibmsd6Aj4oMhsKX19fVHNu5+8jnp79uxxr9PLQ1HHT7zvQZFZ9ff3R7VVq1ZFNa/r49GjR6Pa5ZdfHtUuuOACd98LFy4sdZ2f/exno5rXCXLTpk3ufiYnJ916bmZ9Ry2E8EtJ8TMHkCgyixyRW+SGzCI3ZBa5aWeN2p1m9lLjbeSlRRuZ2QYz22pmW9vYF1CFpjOb6TuKqJdZczszs0XvpgBd1FRm+e4vJKDp8wNyi25odaL2D5IukfRxSfsl/V3RhiGE+0II60MI61vcF1CFljLrfZks0EWlcjszszl/HBW10HRmvS+7BbqopfMDcotuaOksNITwVgjhdAhhWtL/knRttcMCqkVmkSNyi9yQWeSGzCJlZZqJRMxsVQhhf+Offypp2wdtj/Z4b6977/RceeWVUe26666Lalu2bHH3c+jQoajm/cYox7f765JZ76OYzSweL9uExsuX13yj6GN23jhT++2jN56dO3dGtSNHjriXHx8fj2pVvwNbl9yiGt5zr/eYnktk1ufdT95zy3PPPRfVip5nveZHXka85+OhoaGo5jV5KLpO73XDq6WWT898zOzx48ej2hNPPBHVvPNFr/GHd35w1VVXufv+zGc+E9VuuOGGqOY1MnnkkUeiWlGDPG+cOZp1omZmP5L0OUkjZvaGpP8q6XNm9nFJQdJrku7o4BiBppBZ5IjcIjdkFrkhs8jNrBO1EMJtTvn+DowFqASZRY7ILXJDZpEbMovc0CkBAAAAABLDRA0AAAAAEtNSMxG0r5mmA2W/F+mmm26Kat63xe/bt8+9/MmTJ6Naag0g0B5v8bln0aJFUc1rVrNr1y738u+++25U8xakz2VjGu9xtXv37tKX9x4bPF6Aemr3se0913nPQRMTE23txxvn2rVro9rIyEip8UjSsWPHolrZ5mM5NBOZj7xzgampqVI1z+TkZFQbHR11t/XONb3zi3feib+X/PHHH49qBw8edPfjnWfn+D2jvKMGAAAAAIlhogYAAAAAiWGiBgAAAACJYaIGAAAAAImhmcgcmZ6eLr1tb29vVPMWeHoLfrdt2xbVvIWckv/N8t62c9kAArGiRe5LliyJat4C8rKLvZtZKN7X1xfVyi5KBoBO8JoLeDXv9dl7fSy6vPe86DVb8F7bh4aGSm0nScPDw1Htsssui2pf+cpXopr32v7ss8+6+/HOI7zzjRwbNaBzvGYgknTxxRdHNe/8Yvv27VHtqaeeimreY6tOeEcNAAAAABLDRA0AAAAAEsNEDQAAAAASw0QNAAAAABJDM5GSvMW83je7e4tpveYb3mLjoiYd3n68Zg1btmyJak8//XRUW7x4cen9eIuqyzafQHd4WZCkW2+9NardfvvtUc3LyMaNG6Paq6++GtUmJibcfTfTLCdXRU1cmt0GQHd4r89ekxCvEdOll17qXqf3erhr166o5p1DLFy4MKpdffXVUW358uXuvlevXh3VvNf3sbGxqPb4449HtZdeesndT9mGZkWvRag/77XuiiuucLe96aabotq+ffui2v333x/VvMeW19BHqk9zG95RAwAAAIDEMFEDAAAAgMQwUQMAAACAxMw6UTOz1Wb2pJm9amavmNm3GvVlZrbJzMYafy/t/HCB2ZFZ5IbMIkfkFrkhs8hNmXfUTkm6J4RwuaTrJf2FmV0u6V5Jm0MI6yRtbvwbSAGZRW7ILHJEbpEbMouszNr1MYSwX9L+xs9HzGy7pIsk3SLpc43Nfijp/0n6y46MMgFeh6OlS1v/hcvk5GSpmlTcDfJcO3bsiGqbNm2Kal5XKUn6/e9/H9W8Tn+pd32cb5n1OodJ0oUXXhjVDh48GNW87l+7d++Oal4nx/nQ3bEb5ltmUQ91yK3XMc57bb/hhhvcy3uv297lDx06FNWuvfbaqHbXXXdFNa8jsyT94Ac/iGoPPfRQqX0fPnw4qhV1yfPqXpe/sucqc6kOmZ1rXh4HBwej2ic/+Un38l72Hn744ai2c+fOqOblruictC5dl5tao2ZmayRdI+l5SSsbgZekNyWtrHRkQAXILHJDZpEjcovckFnkoPT3qJnZsKSfSro7hDA+c6YaQghm5v4qxcw2SNrQ+Lm90QJNqCKz3vfHAJ1SRWaLfvsOdEorueXcAHOpiudaoBtKvaNmZn06E+iNIYR/bpTfMrNVjf9fJemAd9kQwn0hhPUhhPVVDBgoo6rMFn2RIlC1qjLLLxfQTa3mdmZmmaihm6p6riW36IYyXR9N0v2StocQvjPjv34u6fbGz7dL+ln1wwOaR2aRGzKLHJFb5IbMIjc22+JPM/u0pKclvSzpD50D/kpnPtP7Y0kflrRH0n8KIbzzQdfV09MTvAWHOfA+TrRyZfwR5pGRkVLXt3///qh25MgRd9vjx4+Xuk6vscOKFSui2gUXXOBefnx8vNSYvO26YXJyUtPT07P+CqvKzPb39wevKUdKihbSLlq0KKq99tprpS7vZbu/v7/5wdWY93g7t7HL3r17NTk52dXMDg4OhjVr1sx+AECB0dHRF8p8Cqaq3Pb29oahoaG2xz2bvr6+qOadkyxfvjyq3XHHHe51fv7zn49q3mvsxMREVFu7dm1Ue/3116PaPffc4+77mWeeiWre+YL3zo/XICSHZiBFJiYmuppZ6cw57cDAQFvjztHw8HBU8843ihqdeY8F77zSy2hRw5scTU1NlTqnLdP18RlJRVf0x80ODOg0MovckFnkiNwiN2QWuWEBDgAAAAAkhokaAAAAACSGiRoAAAAAJIYv3Cnp5MmTUW3v3r1R7e23345qXot3r4FDUVOIsrz9eN8Af/ToUffynRgTOq/oe7NOnDgR1ZYtW1bqOmkcMjtvgf65t3nOi/PrwLv9aak9f3mv497zpPe6+dhjj7nX6TUoueyyy6Lae++9F9W++93vRrUnn3wyqo2Njbn79niZ53kIVTp27FhUm5qaimpew62iOhktxjtqAAAAAJAYJmoAAAAAkBgmagAAAACQGCZqAAAAAJAYmomU5C1A92regsq5RIOQ9piZent7z6p5i16LFs3OJW+cAwMDczCSevIe/+fe5iyQ7oyyDUG4/TEbL0teg5CiZiJbtmyJal4zksHBwah27muLJI2Pj0e1hQsXuvsm35gL3vlOiudAdcE7agAAAACQGCZqAAAAAJAYJmoAAAAAkBgmagAAAACQGJqJAACy4jVRoLECquI1RliwwD9d8pp/eE3FTpw4Ueo6e3ri35+TbWD+4h01AAAAAEgMEzUAAAAASAwTNQAAAABIzKwTNTNbbWZPmtmrZvaKmX2rUf9rM9tnZr9u/PlC54cLzI7MIjdkFrkhs8gRuUVuyjQTOSXpnhDCi2a2WNILZrap8X9/H0L4284ND2hJpZk9dyH36dOno23MrMWhApJ4nm0bj8Gum1eZ7e3tLb3t8PBwB0eCNs2r3CJ/s07UQgj7Je1v/HzEzLZLuqjTAwNaRWaRGzKL3JBZ5IjcIjdNrVEzszWSrpH0fKN0p5m9ZGYPmNnSiscGtI3MIjdkFrkhs8gRuUUOSk/UzGxY0k8l3R1CGJf0D5IukfRxnfntxN8VXG6DmW01s60VjBcorYrMeh9zBDqFzCI3VWSW7wlDt5Fb5MLKBM3M+iT9q6THQgjfcf5/jaR/DSFc+UHX09PTEwYHB1sbKea9yclJTU9Pl1qIUlVmBwYGwqpVq86qsUYNZb355ps6ceJEVzM7ODgY1qxZ0/xggYbR0dEXQgjrZ9uuqsz29vaGoaGhFkcLSBMTE6UyK1V7TjswMNDCaAFpamqq1Dltma6PJul+SdtnBtrMZp69/qmkba0MFKgamUVuyCxyQ2aRI3KL3JTp+vhvJf1nSS+b2a8btb+SdJuZfVxSkPSapDs6MkKgeZVlNoTgvoM2V5p5526+fiyj7G2U2O3D8yxyQ2aRI3KLrJTp+viMJO/M59HqhwO0j8wiN2QWuSGzyBG5RW6a6voIAAAAAOg8JmoAAAAAkBgmagAAAACQmDLNRIB5LaXW+14DjKKmGCmNu5ump6dLbdfTE/+eKrEGIwAAYB7jHTUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASIx1c/G8mR2UtKfxzxFJb3dt551Vp2OR0j2efxNCuKCbOySz2Uj1eMhsdep0LFLax9PV3NY4s1K9jiflY5nL59qUb5dW1Ol4Uj6WUpnt6kTtrB2bbQ0hrJ+TnVesTsci1e94qlKn26VOxyLV73iqUqfbpU7HItXveKpSt9ulTsdTp2OpUt1ulzodTx2OhY8+AgAAAEBimKgBAAAAQGLmcqJ23xzuu2p1OhapfsdTlTrdLnU6Fql+x1OVOt0udToWqX7HU5W63S51Op46HUuV6na71Ol4sj+WOVujBgAAAADw8dFHAAAAAEhM1ydqZnazmY2a2S4zu7fb+2+XmT1gZgfMbNuM2jIz22RmY42/l87lGMsys9Vm9qSZvWpmr5jZtxr1LI+nU8hsOshsOWQ2HWS2vJxzW6fMSuS2rJwzK9Urt3XNbFcnambWK+l/SvqPki6XdJuZXd7NMVTgQUk3n1O7V9LmEMI6SZsb/87BKUn3hBAul3S9pL9o3B+5Hk/lyGxyyOwsyGxyyGwJNcjtg6pPZiVyO6saZFaqV25rmdluv6N2raRdIYTdIYQTkv5J0i1dHkNbQgi/lPTOOeVbJP2w8fMPJf1JVwfVohDC/hDCi42fj0jaLukiZXo8HUJmE0JmSyGzCSGzpWWd2zplViK3JWWdWaleua1rZrs9UbtI0t4Z/36jUcvdyhDC/sbPb0paOZeDaYWZrZF0jaTnVYPjqRCZTRSZLURmE0VmP1Adc1uL+5jcFqpjZqUa3Md1yizNRCoWzrTRzKqVppkNS/qppLtDCOMz/y/H40FzcryPyez8luN9TGbnt1zvY3I7v+V4H9cts92eqO2TtHrGv/+oUcvdW2a2SpIafx+Y4/GUZmZ9OhPojSGEf26Usz2eDiCziSGzsyKziSGzpdQxt1nfx+R2VnXMrJTxfVzHzHZ7orZF0jozW2tm/ZL+TNLPuzyGTvi5pNsbP98u6WdzOJbSzMwk3S9pewjhOzP+K8vj6RAymxAyWwqZTQiZLa2Ouc32Pia3pdQxs1Km93FtMxtC6OofSV+QtFPSbyX9l27vv4Lx/0jSfkkndebzyH8uabnOdJIZk/S4pGVzPc6Sx/JpnXkL+CVJv278+UKux9PB24nMJvKHzJa+nchsIn/IbFO3Vba5rVNmG8dDbsvdTtlmtjH+2uS2rpm1xsEBAAAAABJBMxEAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABIDBM1AAAAAEgMEzUAAAAASAwTNQAAAABIzP8HfU7ieBOpF80AAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2cAAACxCAYAAABAxMXKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO2dW4wd13Wm1242qSbZF16aV/EqSjIkWpGc0E7gRIAGHhuWk0DJQwYjA4GABFCQC5AASRAjfphgHoI85PaQQRAFtuUEmUwExEH8IMyMR5EhO5k4YhLJlERZJCVRZPPSvN/JJrv3PPBw3Kf2v9mrus6l6uj7AILsxV21a+/696qqPrX+E2KMBgAAAAAAAP1lqN8HAAAAAAAAADycAQAAAAAA1AIezgAAAAAAAGoAD2cAAAAAAAA1gIczAAAAAACAGsDDGQAAAAAAQA2o9HAWQvhsCOF7IYSDIYQvdOqgALoFmoUmgm6haaBZaBpoFupCWOz3nIUQlpjZO2b2aTM7amavmtnTMca3cttMTk7GHTt2tMVOnz6dtLt8+bLcfsmSJUns2rVrSWxoKH3mvH79ehIbHh5OYjdv3pR9q32qtiGEJDY7O+tqNzc35+5bnTfvPtX+cqg5v3XrlqtvxT333CPjap/Lli1r+/nGjRt28+ZNX0eCxWh27dq1cdu2bW2xs2fPJu1yulHjvXHjRhJTWvRq9sqVK7Jvde6vXr2axMposUgufyjdqH16+/bqy0zru8o+ly5dKuNq+7GxsbafL126ZNeuXVu0Zlv9lNKt0uypU6eSdjndjIyMJLFLly4lMW8+Lq5jM50TzbSe1Nry5kRv/jLT51Pt06uvnG68x6nGndtnkYmJCRlX51ydnzNnzpyOMa5zdSYoq9nx8fG4bl17dyrPKn2Z+edPjbXqvUE38ueHEXUOzUqtl55q1kzf0548eTJpp667Zlpn3lzr1W0u3ym896oKdYy5PO/N3+r+Sa3DMveV3rWt9qmOcdWqVbJvNfZiXj5+/LidO3dOTnB6lH4+YWYHY4zvmpmFEP6HmT1lZlkh79ixw/bu3dsW+8pXvpK0+9a3viW3Vxec119/PYmtXLkyiR04cCCJrV69OokdO3ZM9j0+Pp7Ejh49msTUxVMtNiXOmZkZ2bcSiRKdWhxqn+rmy0wvQiU8daPnvWnYtWuXjKuH9C1btrT9vG/fPlcfd6G0Zrdt22b/8A//0Bb7m7/5m6RdTjf3339/ElNa3LBhQxJ78803Xe3+5V/+RfatdKfWi9KieoBU+sjdsKg1qLTovcHPXbgVo6OjSUzdlKp1pRLqpk2bZD8qyX/qU59q+/mFF17IHmcJSulWafbP/uzPknbFXHyHBx98MIm98sorSUzN81tvpYe0devWJHb+/HnZt8prKs+uWLEiiSktqhyv8peZvnFX66D4AG6mb77UWjXTY1TXl+PHjycxpUW1fp988knZt8oVxQd5M7OvfOUrh+UO/JTS7Lp16+x3f/d322Iqz+auAeo6peZPafF73/teEluzZk0SO3HihOxbXftUvlH5QuVElWdzN9jeXxR0A+8v4Ly/DFbrykyva3UveOzYsZ5q1kzf0/7xH/9x0i53jd64cWMSe/nll5OYGq/S7dq1a5PY9PS07Ftx4cKFJKbuF9V1UuUwdQ+S26fSzn333ZfE1DosPiDfQV1n1BxNTU0lMXWvqsb9kz/5k7LvixcvJrGf+ImfaPv585//vNzWrNprjfea2ZF5Px9txQDqCpqFJoJuoWmgWWgaaBZqQ9cNQUIIz4YQ9oYQ9uZ+YwlQJ+ZrVn2iB1A30Cw0jfmaVb9lBqgj3NNCL6jycDZlZvPfEdjSirURY3wuxrgnxrin+E45QI8prdnJycmeHRxAhgV1i2ahZpTSrHolCqDHcE8LtaGKIciw3S6e/JTdFvCrZvb5GGNaKNNix44d8Ytf/GJb7KWXXkraqZocM11ncPhw+pqxesdZvUur3hVX7cx0IaDXGKNX74B7yRV4qri3eNL7XnmucFO9e19se+3aNZudna1iCFJas9u2bYu/+Zu/2RYr1vOY6Zo5MzN1o6zq01SNlqqZUOdDtTPTtVuqiFidu1whb5Fc/vCaK3QDbx2GtyhZ5Z1c282bN7f9fOTIEbt+/XpVQ5BSut2wYUN8+umn22Iqz+bqAdQ7+YcOHUpiak5VjZbSds7YQenOm2ermCSZVTOSUX2rtZrDuwZVrYY6ntzDzrlz55LY8uXLk9jZs2f/Nca4R+7EQVnNjo6OxkceeaQtpjSXuz6reVEa87ZT9wYqd+bwahFDkO+Tu4dQc6TOz8zMTE81a3b7/uA3fuM32mIvvvhi0u69996T26s6O1VLpuZA5Vpv3Wxun1WMs8rUP6rcqI5H5SbvNcbMb5qi7j9VDlU5ef369bJvZWxYrI177bXX7NKlS501BIkx3goh/IqZ/S8zW2JmX76biAH6DZqFJoJuoWmgWWgaaBbqRBW3Rosxvmhm6a8JAGoKmoUmgm6haaBZaBpoFupC1w1BAAAAAAAAYGF4OAMAAAAAAKgBlV5rLEsIISnkVEWEqgjQzP+lsaq4TxURqkLz3Jfqegt3m1DgmztGFVfz4S0kVeSKqtX2xX76YawSQkgKSFWBbO4LxNWXkaovRjx79mwSO3nypDyeIuoLcM18c1qmXRn6uQ68OvG2y51btX3x3HqNVTrJ7OxsYvygipNzhiDqy2XVulXnWI1X5ZBcnvVqsRv52KsH7z5zXxxchZyRSpFcni1zzesls7OziR5V7sytJ7VGvSZe6ryrdk3OiU2gzH2J16Si2wwNDSX3q8pAShmY5OLevOrVbS6vdTqHlrmP8LZVuUnNRS7fqft+FVN9K/Mhddy5nKxyUjGn3S2f88kZAAAAAABADeDhDAAAAAAAoAbwcAYAAAAAAFADeDgDAAAAAACoATycAQAAAAAA1ICeujXOzc0l7iTKGSznyKScUpRrjzfWDye1QcXrnlQXlyUvIYTEJXRsbMy9vXJPyrnkFVGuQooyOsYx7Pt4tVh067yDmnflPttr5ubmEu2Mjo4m7ZQbnpnfodWbZ1XeruogBt+niptl2bbdQjk5q3WXc5b0ut4tW7bMtU+vtnN410sd5r4uqHtBMz1H4+PjSezUqVMdP6aFWL58uT3yyCNtscOHDyftiu65d1Bjzs1Dkaoa9erR205d+3J5Xjmvq3ulkZERV9/eOSuDN6/m7r88brF3W//9v5MAAAAAAAAAHs4AAAAAAADqAA9nAAAAAAAANYCHMwAAAAAAgBrQU0OQmzdv2rFjx9piqojz8uXLcntVXKhME1SB78zMjOsYKdBdHN55a1qheowx0Zi3GNbMb2Jz/fr1JKYKTasW/yvqMM91Jjc/VQt+u0WMMcl3qtg6lxNVW6VFr8mS6qfJmuunsUOuwH6x7cq27SbF41BF/rk8q+IqpowIiiZluW1zZguqrXe9NHkdePEaVyhTFzM9l90wgFgMV69ete9+97ttMWUIcuLECbm9Mr1RelLtVF5V7XKGFeocqNzvNblavnx5EssZ+KxYsSKJqfGsXLnStc+qRlxqHXrzYpk1XCbX8skZAAAAAABADeDhDAAAAAAAoAbwcAYAAAAAAFADKtWchRDeN7NLZjZrZrdijHs6cVAA3QTdQtNAs9A00Cw0DTQLdaEThiD/IcZ42tNwdnbWLl261BZTxbi5IsJly5YlMVUYqooavUWpuaLfJhTuVvnG91zbKsXOZShznB3CpdsQQlJkqzSnClfNzFavXp3EDh06lMS8haKq77oU9NcFpVmlr1wB+mLbmaU5qsMadml2ZGTEHnzwwbbY1NRU0u7s2bNye1VUnjNpKlI1zyqq5F6vFsz8a0tdh1Qxe65IXV2fvHQ5J3YDl2aXL19uu3fvbotdvXo1aZczghgfH09iSrOrVq1KYup6pswNivcudzsmdexKN+oYvddcM78BhPc+qYyxldqnWlvK/EFtq86Nmdn58+eTWO562yHc97Rm6dzkdKJQmlDaUzE112pelBZz23t1q3Ty0Y9+NIkpwz8zs/Xr1yexollgrm9lnjY2Nib7UW0VSt/KPKjMfaoyKyxjZMNrjQAAAAAAADWg6sNZNLP/HUL41xDCs504IIAegG6haaBZaBpoFpoGmoVaUPW1xh+LMU6FENab2TdCCG/HGF+Z36Al8GfN8h89AvSYu+p2vmbXrFnTr2MEmI9bs+oVL4A+4NasevUNoA+Uuqfl/gC6RaVPzmKMU62/p83s78zsE6LNczHGPTHGPeq9WYBes5Bu52uWXyhAHSijWW50oQ6U0ezIyEg/DhGgjbL3tKOjo70+RPiQsOhPzkIIK81sKMZ4qfXvz5jZf73bNjdv3ky+KX16ejpplyuonJiYSGKq4E8V4nlNLJpg/JGjG8de5VvSqx5PN8ZTVrdzc3OJaY0q6swVUqu4KrpVxjhq7pusz17hNU3wGjuofGKmc0px+06cr7KavXXrVlJEr/JkzphCmQx417eKqXmqo46V8Ysyp1Jr2hurSjfmrQ55Vt0bKCOInLmB0qzKqUqLap+qXc5cQOnGa0BU1eDFa8rhXdPKBMFM50WVP9V68ebZCxcuyL7VeczpoAqLuaedmZmxw4cPt8XUONR4zfRbDrm2RdR1SekpN1fq/kJdE9RaULlNjTu3ZtTaVturftS4c7+Q9Bp4VJmLXP5UbYtavlvurfJa4wYz+7vWQIfN7L/HGP9nhf0B9AJ0C00DzULTQLPQNNAs1IZFP5zFGN81s0c7eCwAXQfdQtNAs9A00Cw0DTQLdQIrfQAAAAAAgBrAwxkAAAAAAEANqGqlX4q5uTm7cuVKW6xMMbIqLlQFqFVMLJqMt7g49y3lvTBNyR2jp4hZnetuMzs7mxSvnj17NmmXM7HxFkOj2e/jLeLN4S3QVw5xag3k8BT9Vy34XwzDw8OJxbMqmM7ZQCsHMlWsrQqm1RpV+San9yp6UDFlcJAz6ti4cWMSO3PmTBJTa7p4XTPL51lvHuvVWlfz4c1HnSLGmIxXmVjk1pNatznDm8W2y6Hmz2s6oLZV+St3jCtXrkxiai5UO2W0kTNWUPpWBm3qOqhcutV4cuvSu9b7wdzcXHJf6jVFMdNzo/KGGq9aH2XMylRbpR3vPtV1I2duovKy6kfNj9pnboxqjlRblWtVO+89ci5eRrd8cgYAAAAAAFADeDgDAAAAAACoATycAQAAAAAA1AAezgAAAAAAAGpATw1BZmdnk4JR9W3fqlDVzOz06dNJTBVfquK+qiYDXqoUtOcKwL0FjN7C+zIFjN6+vXNZZs6L4+mHIcjMzIx98MEHbbFDhw4l7ZRhgpnWsiqG9hb/d9qgpd94zQhyBb/esat2qrC4asFvHVBF6mr8q1atktvv3r07iV28eDGJqdytzHK8Bho5lB68Rh/r1q1z9+0tCvduq47RzOzq1auufao8UdVAp1fXwbLcvHnTjh071hZT50lpzkzPldewpoqZSK6tmmelY9VOmSDkNKOu+d65UPpUpjhm+t5rfHw8ianroDJyU+cxZx6hzk+uba+5du2avf76622xqamppJ2aAzNtWKE0quZL7VNpLDdXXtMaNf9Kt8X1a2aJmdodlJ7UelexMoZqXnOWKuPu1v0Xn5wBAAAAAADUAB7OAAAAAAAAagAPZwAAAAAAADWAhzMAAAAAAIAawMMZAAAAAABADeipW+Pw8LCtXr26LXbq1KmkXc6tUTnbKMcZrwuN2p9yaCmzz6VLlyYxr1NSru/R0dEklpujIl63GjPtgqPmSOF1iizjuufRSreZnZ21y5cvt8WUFtR5z7VVc+V1TFPnI+dgpqjqHOrFuy5XrlyZxJRmR0ZGZD/KxUydC6VtdR7U2igzF8XxnDt3zr1tpxgbG7PHH3+8LXbixImk3ZYtW+T2jz32WBKbnp5OYmpejh49msQ2b96cxN544w3Zt8oZKn9u3749iRXXqZnZ/fffn8TefPNN2bfSjffcKy2tWLFCtlWaVfouOhub+d3dcqhrjLoWeR0lO0nx2HLXQ4Waf+/2VXNiFXdopbldu3Ylsffff1/2rTSm+hkbG0ti6r7i05/+tOznW9/6VhJTrpIqTyh9eZ0rzXTuVseuHAC7zdDQUOJaqdZoLhdMTEwkMeX2mHMj9JC75/Jeo71rQeXfnFOk181U5TbvNSKHN4eqnFIGlX+K6/1ux80nZwAAAAAAADWAhzMAAAAAAIAawMMZAAAAAABADVjw4SyE8OUQwnQI4Y15sTUhhG+EEA60/l59t30A9Bp0C00DzULTQLPQNNAsNAGP28PzZvYnZvYX82JfMLOXYoy/F0L4Quvn31poR7du3UqKNlXBfK4YWRkfVCn6LVOU6i2UVIYC3nZljB1UIaEaj7fIMtdW4S2ALlPEqrbPFbI6ed46oNulS5cmhgaqYPf8+fNye6VvpVnv/JUpkvfiNSMpQxXTEzUXOUMQbyGv2qcqVi5jrqD2WaYwWfC8dUCzN2/eTMxzVL7JFdAfPnw4ialC/ytXriSxixcvJjF17nKGRt5ib3XsFy5cSGLqWnLmzBnZt9KNOk6Vl9S6zOUvNR7V1jsXZXJCVaMfwfPWAc3Ozs4melLrM3dN8V4PveMvM0/etuoce420cnnp+vXrSUxpXplyKFONn/mZn5H9KLOHgwcPJjFvnlWx3PVB6buiWcPz1qF72hhjcg7UHKjzZKZNf1TOUXPg1XxVkzuFGqPK/bm+lZ6r3NvkjEfUeKo8R6h2Od1WMXExc3xyFmN8xczOFsJPmdlXW//+qpn9VKWjAOgw6BaaBpqFpoFmoWmgWWgCi/1oYkOM8Xjr3yfMbEOHjgegm6BbaBpoFpoGmoWmgWahVlQ2BIm3P9/Mfq4fQng2hLA3hLC3zCtDAN3kbrqdr1nv98kBdBuvZtUrSAD9wKvZqq8AAXSKMve0udfpAKqy2IezkyGETWZmrb/TgoQWMcbnYox7Yox7cl/UC9AjXLqdr1n1RZsAPaS0ZlUtCUAPKa3ZivXFAFVZ1D2tquUF6AQeQxDF183sGTP7vdbff+/ZaHZ2NiksVb95KFNU7zXq8BYH54r7coYERVatWpXEVOG8KtDNjbvThc1lfkvpnV9vgW5uHtXYizeZOQODEpTWbQjBNbbcJ2xeEwx1c+LdNtdueDhd3urcq+2VbtT+cppV51n1PTExkcRU8fSaNWtkP95C3lxBtofcjaMaTxduMktrdsmSJbZixYq2mDrHqhjdTBtmKEMQde6VGYEqFM9p1psX1VsYKlal+DuH17wjl6PVGNU6UHPpNWPK5Sw1l2NjY66+S7CoPFucVzXPVQ27umCIIvOi+kX0li1bkpg6HyrXffDBB7JvdZ7VLxS3bduWxJ544okk9rGPfUz288ILLyQxpRuV99V6886ZmdbBunXrktihQ4fk9k4WdU+7ZMkSW7263dhRjUPdA5ppoxWvsZuijL69a6Yb66jT67BMXvDea3m3VVo207ovauVun7x6rPT/2sz+r5l9JIRwNITw83ZbwJ8OIRwws//Y+hmgNqBbaBpoFpoGmoWmgWahCSz4yVmM8enMf32qw8cC0DHQLTQNNAtNA81C00Cz0AR40RsAAAAAAKAG8HAGAAAAAABQAxZrCLIoZmdn7fz5820xVaifK5LzfiO6t4CxTFGi1wzBW6iujjv3VQOqH1Vs6B1jN2yLveYVZYrxizrol91ycQ7LaFYds3cOvOPNmVAUTSHMdKG/KipXmvMWyJr5j131rRywcgYHylyhSvG0d13lKOqgGwYEC3H9+nV755132mLHjx9P2uUK6N99990kdvZs8Ttb9VypmOo7t17UeVb7VCYjqp33mmFWrSi8TKG4Mmx44IEHkti5c+fk9kXUuHPrUs1vXV2U1RhyBkRqDqpcL7xaMEuL/HOxzZs3J7EjR44kscOHDyex3Li9Wly7dm0SUyYVr776quznwoULScxrgqNi6l4nN79qPCrv94OZmZnkHCqjpZz5kjISq3JvV9UQpApeo6ReUsUQxGtIlMvzKtcW16Ey3/r//Wf/BwAAAAAAAHoGD2cAAAAAAAA1gIczAAAAAACAGsDDGQAAAAAAQA3oqSFICEEW+6t2Cm+BXpl9FskZD6iCWvXt7qqdMpAo863r6pi8c6FiuTF626oCyJUrVyYxVfSbKz5XRbCrVq1q+/nEiRNy227i1WzVQly1vWrn1YKZ2djYWBJTheVqfMo4pEyRvPc4N2zYkMTUelHF9LnjVCjdqXFXNfAoro1OF117mJ2dTQwzlClHznBCzWnOrKiINwfliqhVof+NGzeSmDLVUO3WrFmTxHJF6iMjIzJeZP369UlMGaaMj4/L7dWxf/KTn0xiXhOGDz74IInlcoKaoy1btiSxo0ePyu27xfDwcDKv6vqaM8aoYuai5lTpM2dC8dRTTyUxdexKD6dOnUpiKi+p82ZmNjEx4TrOj370o652ufOu1r+6ZisTKrWuVE7wXGfvoNZgP5ibm0uMVdS5ypmAKZ1VMacoc71ReUgZNSmdqHZK3zkjFHV/oPbpHbcaS277TZs2JbHp6ekkpu5plYFL8T71DuqcP/LII20/K0OgO/DJGQAAAAAAQA3g4QwAAAAAAKAG8HAGAAAAAABQA3g4AwAAAAAAqAE9NQSJMSYFvarAt8y3iucKLVXfHnJ9e4s8VUwVOqrjyRU7q7bqOKsYTZTpR8WqjNu7fT++bT7GmBybKgrNFWyXOc9FvPOcWwPKpECZbajzpPapimtzfauCXxVTRh3KCCFXWKzmXfXjLZRW48lpVsWLZhr90Oz169dt//79bTFVeFymWFtpVs2pGq8yE8jNqdcUwFukrvrOmWWMjo66+t64cWMSU+PO7U8Vzqt9es2lvDnaTK8XNW+95tatW3b69Om2mMqzuXzjNfqoYgyWMx1Q51PlsJzuinjvacy06ZM6n1NTU0ns5MmTSezgwYOyn/fffz+Jqdyt+lbrQI0xd11UBkWHDx+WbXvN7Oxscp1VeTVnqOS9xiu892s51Hwrg5LJyckkVjRBMdPGGDnzOWW2oUx0lHZU7OMf/7jsR82RMkB6+eWXXdseO3YsieVyiveamYNPzgAAAAAAAGoAD2cAAAAAAAA1gIczAAAAAACAGsDDGQAAAAAAQA1Y8OEshPDlEMJ0COGNebHfCSFMhRBea/35XHcPE8APmoUmgm6haaBZaBpoFpqAx63xeTP7EzP7i0L8j2KMv1+2w6KbjHIvyTmalGlbxOtamNufcp1Rzjbefso4SXldntTxKMeYkZERub1yWlKOTOrYve58ub7VcRbdf9T4MjxvHdJsjFE6pBXJuRIp5yWvZlU71U/OoUnFlT69mi0x/9LZTO1TObIpl8kc6twoxz/velPzm1t/SrPFcSvXtrvwvHVAt7du3bIzZ860xdTazunG6/rqpYxjrHLiUvOsHLeUk5ZyPFQOd2ZmDz/8cBJTWrrvvvuS2D/+4z8msVyu8zq0KZc0xbvvvutqZ6bPhbefDM9bh3JtcQ6q3huodatcPtXcK42sWLFC9u3NNyp/Kn0q90flrGhmtnXr1iS2b9++JKZcGJXj4YYNG2Q/Ks8qdz41brV+1ZypcZvp87N69WrZ1snz1sH7g+LxlcmV3rZVcnJuzajzNzExkcQeffTRJDY9PZ3E1PnLue/u3LkziW3atMm1z127diWx3bt3y35yjsRF1By98847SUxdR3NjVNf+om7vdk+14F1/jPEVM/PfLQH0GTQLTQTdQtNAs9A00Cw0gSo1Z78SQvhu6yPi7K8xQgjPhhD2hhD2lvn+DoAuUFqz6pMdgB6zoG7na9b7PXoAXaSUZvvxfYAABUrfH6Bb6BaLfTj7UzPbZWaPmdlxM/uDXMMY43Mxxj0xxj25V78AesCiNJv74lGAHuHS7XzNlnn9FKALlNas99V9gC6xqPsDdAvdYlHKijGejDHOxhjnzOzPzewTnT0sgM6CZqGJoFtoGmgWmgaahbqxqF+xhhA2xRiPt378aTN7427t51MsYlSfpuVey1G/pfAaLnhNGHJcvnw5id24ccO1bRVTiBxqLtSnPOoYlcmHmZ53rxGKGo8atyqoNNPFrcW2VV4hWKxmh4eHbd26dW2x4s9mZufPn5fbqzk4d+6cp+vsOvDifY3YW1is9JU7n0pjqu2RI0eSmJrLK1euyH5u3ryZxLx5QqE0llsvat6K66XqOl+MblWRuipazp07NX9VxlFmW9W319hBmTioAvfjx48nMTO9LtUaVPOmDBvKvCmyd+/eJKaOUxXIq1evc7/RV+vg6tWrnkN0s1jNFufaa150l+NIYmotq/Ok2uUMXpQhS9GQJ7dPZRqgjEcuXrwo+56amnL1rXKqMl5S9zlmeh0ozSstKTMRtb+cIYg6j51+dXux9wdzc3PJtclrxGXW+Wu0Ipd/vYZ2CrVmlJmTMmky02YkKt89+OCDSUzlyo0bN8p+FMr05tvf/nYSU1pW9yu5t6tUXn777bfbfr6b2dyCD2chhL82syfMbDKEcNTM/ouZPRFCeMzMopm9b2a/sNB+AHoFmoUmgm6haaBZaBpoFprAgg9nMcanRfhLXTgWgI6AZqGJoFtoGmgWmgaahSZANSMAAAAAAEAN4OEMAAAAAACgBvTUc3nJkiXJN2Qrw4qcFbQqyFWFsqpYUfWj2uVMJ1QBqiqeVMeuivHVtrniUFVIrIo8VZGlKgTOfaO5Gvvk5GQSU3OpinlPnDiRxMoUyRcLo/thW6sMQR566KGk3cqVK+X2aryvv/56ElPfZK9MMKoa6FQpkt+yZUsSm56eln2r7VWBrNKcd63ltld4C53V+i3mrDuo4vniOlBF891m6dKltmnTpraY1+zCTOdZpU+lJXU+lClHrm9l6qHOXXF8ZlrvmzdvTmKnTp2SfasxKs0qbSstKNMSM53nt2/fLtsWUVosFpmb5c2q1BzljC56yfDwsK1du7Ytporlc987qc6JGqvKYaofpcNc3yoHqvuSrVu3JjG1NlQOyuU51Y8aj1pDXgMwM7/xkjcfq3uQbdu2ybYqz4WJFrIAABYXSURBVORycj/w3JfkTDnK3IMWqWqGp/pR9xz79+9PYqdPn3bFcrlWGWuo9fXmm28mMZVr1X2qmV4fO3fuTGLK1EcZfag5z5l6qPktmozc7VzzyRkAAAAAAEAN4OEMAAAAAACgBvBwBgAAAAAAUAN4OAMAAAAAAKgBPTUEiTEmxXOqCFAVn5rpAmtV6FolljPl8JoZqGLFXCFxkVyRvPcb59U3rKt9zszMyH6886Fiqoi5THGqKqoszqW3ULaTxBiT+Tp79mzSLlf4qoqFlTmDKuBX56lMEbaaa3WO1fYqdvTo0SSmCm5z5NZWEa95R66td47U2lDtcmNU56xowOMdcyeZmZmx9957ry125syZpF3ONCJnyORp59VsLgep41RzqHKd0ueFCxeS2NTUlOxbrRd1LVJ9Fwu9zfImQcqAR82l0rYyLfGaQuSOM5e7esnc3FxynVRzn8sNylhBxdQ5UXOq9KnmzkxfD9R9gJrn999/X+6zSM54Sa0tta5VuzKGIGoNqu1VLDdvRXLrUs2vMpToB0q3aq5yuvWaKnnxGr+YabMq1bfSrdqnuq/J5XnvvYnK32p+c9dor8aVmYnKyUrLOUMYdZzFa1Rufsz45AwAAAAAAKAW8HAGAAAAAABQA3g4AwAAAAAAqAE8nAEAAAAAANSAnhqCmKVFdqo4OlcQqdqqb5ofHR1NYqpY0Vskm0MVMObMTKqg+iljtuHZn5meD1VAPTIyksTGx8eTmCrSzBUcK8bGxha9bae4ceOGHThwoC12+PDhpN2xY8fk9mquVBGotwi4jFmGVyNqDaljVCYDZQwvyhy7d1s1Hq+5gmqn1r/KO7m2y5cvX/D4us2tW7cSY5Iyec2rEe+aV/Oc07tXY6pQ3LttrghbjVHNhdKDyk1FLdxB5QSVZzdu3JjE1Pwqo6wcauw5ffeSoaGhJN+ra3YuD3jPSZVzXKZv1VbNvTIOUWsjp9kyBlEectuquVyxYkUSU3lG6V2ty5yxgsofq1evlm37QZX57vT5K4PXuMRrzKPalTErU3iNaHLrQ22vDIC8Y1THndOt2r6Yq+82D3xyBgAAAAAAUAN4OAMAAAAAAKgBPJwBAAAAAADUgAUfzkIIW0MIL4cQ3gohvBlC+NVWfE0I4RshhAOtv+vzEjB8qEGz0DTQLDQRdAtNA81CE/B8cnbLzH49xviwmf2Imf1yCOFhM/uCmb0UY3zAzF5q/QxQB9AsNA00C00E3ULTQLNQexZ0a4wxHjez461/Xwoh7Deze83sKTN7otXsq2b2TTP7rQX2lbiiKDeVMm41ykVIufvkHFW8qGPyuiqpMSqXlpwblHc+vG45OYc9dUxFB61cP15HNtXOTJ/HogtWCTfKjmn26tWrtm/fvrbY1NRU0k65qN3ZvojXJakqXo15ta22LeOiWIUy81PF+bKMA6vH5a0fmp2bm0t0pxyucvOkcpjX7VLlWa/jVu6YVNv33nvP1Y9y57tx44a7bxXzjjGHchNW7nPFvGOmj/3KlStJLHduO+0w3Cndzs7OJu6Maqy5cXnn//Tp00nMO/5cDlIuoWoNnThxIoldu3YtiZW5J/LmMO/85PpRmlf3BuqcKTdLlReLDrN3UOenyvWlk7m2tY9FH0unnRnLHIu3rfcYy9zXePv2OlsrB0YzrZMqOaCMy6jH7bFjbo0hhB1m9jEz+46ZbWiJ3MzshJltKLMvgF6AZqFpoFloIugWmgaahbri/p6zEMKomf2tmf1ajPHi/Ce+GGMMIchH4RDCs2b2rFn+UxOAbtAJzZb5DiGAqnRCs/34bjX4cLMY3c7XbD++wxI+3HQi1wJ0C9cnZyGEpXZbxH8VY/xaK3wyhLCp9f+bzGxabRtjfC7GuCfGuIcEDL2iU5qtwxe0woeDTmmWhzPoJYvV7XzNVi07ACgDuRbqjsetMZjZl8xsf4zxD+f919fN7JnWv58xs7/v/OEBlAfNQtNAs9BE0C00DTQLTcDznuGPmtnPmtm+EMJrrdhvm9nvmdkLIYSfN7PDZvafFtrRsmXLbPPmzW0xVUyrYmZmGzakrwArwwVVBO4tss0VqqvfkHS6mDNHGfMQD7nfUqrXTh944IEkdvTo0SSmXv9bsWJFEhsZGXH3XTzfJV6L7Zhm5+bmErOPMgX0aq7VONQ5VsWw6pO8nGa9Ji3qnKi+FbnfHHZas2X6UXOkzsP4+HgSUwXtSse5fW7durXt5w8++EBuK+iYZoeHh21ycrItpswIcrpZvnx5ElM5VRlbqH2q+btw4YLsu9PmR1WNlzxF3blYLtcpirox02tQvX0yMTHhOh4zs8uXLyexHTt2JLHXX39dbi/oiG5DCEm+8s6zmV7zXkMqpe01a9YkMWXekdunwmvK4dXc3eKLJTcWNUe7d+9OYocOHUpi69atS2InT56sdEyrVq1yby/oWK4dGhpK8ptaY7m8pubVa0ykcm1T7lO9bb05IHdP673v9+Zvta0yxjHTJnHFMd7tnsjj1vhtM8tlgE8ttD1Ar0Gz0DTQLDQRdAtNA81CE+BFbwAAAAAAgBrAwxkAAAAAAEAN4OEMAAAAAACgBvT9i8e833BvpovxVGG5KhZXxbyqiLCMIYg6Tm8BYhm826tjV8dYxtBi//79SUx9w7oyBzhz5kwSy1nTKyOGoolBmePuFLOzs3blypW2mDKsyRloKI15C1K9Zhe5vlVc6aE4PrNqxeu9xFtErM6DMlJQRby5rwBRc1QsIu6HRfjw8LCtXr26LXb27NmkXS6vqAJ8pRFlxqTWhmrnNZwx0/Os8oUaj+onN27vtUgV/Kt2OdMTxT//8z8nMXXOlBbVucmhjr3McXaLGGNyrtR5z+UbFVfXw/PnzycxpSWlG5UbzPScqn5UO9W3GktOsyq/eHNimXsvFVdaVPcBylzlyJEjSUzNhZk+F6dOnZJt+4HH5KHMPa3SfRMs+7thPFLFHCXXVl2jVDv1zKD6yRkYqvvVY8eOLdjmDnxyBgAAAAAAUAN4OAMAAAAAAKgBPJwBAAAAAADUAB7OAAAAAAAAakBPDUGGhoZsfHy8Laa+XTtXPKkKQ1WxubdgUH07e66AX8W9pgm9wls0qgr+zfR8LF++PIndc889SUzNr5oztb8cxePph7lCCCGZV2XKkSuG9Rp4qHOi5jR37rwojXgLeb2mJVWPx1vQnosrow/Fpk2bkljR0ONu+1N6fPzxx9t+fvXVV13H0klGRkbswQcfbIsp04ecEYTKA5cuXUpiau5VTlQ6rqqbMkX3ncZr8pNDrf+TJ0+62k1OTiYxZcKgzqGZNnHYuXNnEvvmN78pt+8WS5YsSYwjlAFH7pq7cuXKJKY0q/KnKsovY+zlNeXwGlqVybNqPOo4lR7U8aj7MTOtm+K9nJmet3Xr1iWxFStWJDGVe8204cL27dtl235QPDdlzp/3WpczpSqitJgzX+q3mVcR7zWhzPx6zcGqzEWZa1mZfvjkDAAAAAAAoAbwcAYAAAAAAFADeDgDAAAAAACoATycAQAAAAAA1ICeG4IUC1PLmCuoAnZVLKoK9FRRepmicq/5RxOKLMuM8dy5c0nsxo0bScxbaJ3r21Nk36+5LY5NHWuZgl81z2qfav7U2sitF++598a6YcjiNVfIFUSr7VWxuULtUxXOq4L2XN/3339/28+5IvduMjQ0lBT2KxOf3Hq6du1aEvOaGShte3Wco1fr3rsOvAYn6tqU2+eRI0eS2JUrV5KYMmxQx6PMGsz0+Vm1apVs20uGhoYSsyil2ZxulNGU0rFa8yqWM1FQqDzgvUZ4zR9y9x/e+ydlmKLGmDPsUuNRhivKxEWdR3VucqjjVPcg/SDGmJwbNf+5HKbWrhdvP3W7JzXr/DGV2Z93jrz3r7lzWHWMfHIGAAAAAABQA3g4AwAAAAAAqAE8nAEAAAAAANSABR/OQghbQwgvhxDeCiG8GUL41Vb8d0IIUyGE11p/Ptf9wwVYGDQLTQPNQtNAs9BE0C00AY8hyC0z+/UY47+FEMbM7F9DCN9o/d8fxRh/39vZkiVLkkLl0dHRpN309LTc3ls86zVXKFOwpwwvVEGmOkavAUSu2NlrxKDmQh2PKg4204W7qq3qR51HddyrV6+WfauiykcffbTt53//93+X26rdWYc0e88999h9993XFlNaOn/+vNxeFfCr7dWceoueyxh1KM0rjah9Ki0o04LcPlXfSnNqveRMPtS8bdy40XWc6txMTEwksYceekj2rUwXdu7c2fazMhjJ0NE8WxybGldOs8pkQBX/K414jZNyePfp1VfOqMfbt9qnN8/mzr2KKy2q86Py7Pr165NYzsRGmSt8/OMfl20ddEyzy5Yts+3bt7fFlBlVzmRFmZqo/KnmT+lLnaPc+VR5UeV4ZbahTM5UXlHtzMzWrl2bxNQ53rx5cxI7c+aMq28zs6tXryYxlVPUOVM5Xt1P5cyT1BosY9gi6JhuQwhJviyTF3th2lUmB9bNPKSqYZhq692nt53St5nOAcVckctnZo6HsxjjcTM73vr3pRDCfjO7d6HtAPoFmoWmgWahaaBZaCLoFppAqZqzEMIOM/uYmX2nFfqVEMJ3QwhfDiHoj0QA+giahaaBZqFpoFloIugW6or74SyEMGpmf2tmvxZjvGhmf2pmu8zsMbv9W4g/yGz3bAhhbwhhb5nvtgCoSic0W/H1CYBSdEKz6hUkgG7RCc3W5Xur4MNDJ3Rbt9cAYXBwPZyFEJbabRH/VYzxa2ZmMcaTMcbZGOOcmf25mX1CbRtjfC7GuCfGuCf3BYcAnaZTmi1RMwRQiU5p1vsl3ABV6ZRmc3UbAN2gU7otU88FUAaPW2Mwsy+Z2f4Y4x/Oi2+a1+ynzeyNzh8eQHnQLDQNNAtNA81CE0G30AQ8bo0/amY/a2b7QgivtWK/bWZPhxAeM7NoZu+b2S8stKMYY+LKpxypcq8/KkcW5YKjHFVUzOuAU7ZtEe9vV3LtvNsrByQVyzl5KXca5d6k3KnuvTetp1WvqignODM9l5OTk20/q7Fk6Jhmh4aGEhepMs5syoGqikOTV++5frztVEzNf64PtVa97qZeh7zc9upVVNVOfcKkjjHnIqaclopuZSXcC7uaZ9VaLPPKrhqHV4vKtSqnWW9O7dXrRF4nU9VOOQOa5XNgETW/6jwqHebc/S5fvpzEcq6dDjqm2dnZWbt48WJbrPizWV6zqq3SXZm5KpK7L1F5UelT5RvlIuu9jpv574nUOVax3LpUc6ncHtV5UMeoxp17tVVp9ujRo7Ktk47pNoTgckvO5Svvfal3W+/1tClUuS8y889vTvcelNN4bp/FdXy3ezSPW+O3zUzt4cWFtgXoB2gWmgaahaaBZqGJoFtoAqXcGgEAAAAAAKA78HAGAAAAAABQA3g4AwAAAAAAqAFuh4VOMDIyYrt3726LTU1NJe1y39OjzCmOHz+exFTBnyp+VYWcucJCVcStimSVeYBqp6yDc8XO6isIlAHFli1bkpgy6vjkJz8p+1FzpPapYtu3b09iL76YvsKdK05VhZHPPPNM289/+Zd/KbftJmNjY/bEE0+0xZQWckX1mzZtSmKvvfZaElNFzwo1f7lCalVErvQ9Njbm2vb+++9PYocPH5Z9q3WgxjgxMZHEVLH4rl27ZD9qbf3gD/5gElPz9gM/8AOuvj/ykY/IvtV6ffzxx9t+zplCdJPJyUn7uZ/7ubaYMkLImQyoY1bFzaoQWp1jtV7U8ZjpnKzmWeU/dYzKhCG3XtS41fGoNa3m8sknn5T9KNOEH//xH09ib731VhJTufc73/lOEluzZo2771/8xV9MYr/0S78kt+8WExMT9pnPfKYtpow6cuYdas2rewNlhrVv374ktmHDhiSmcoOZ2datW5OYyksPPPBAEnv77beT2I4dO5LYkSNHZN+q7YEDB5KYyvEHDx5MYrk8q9b1D/3QDyWxVatWJbGHHnooial1lTPVOnHiRBJTa+uf/umf5PbdZOXKlfbDP/zDCx5HTjvee1Cv8Ysya7t06ZLs22uM4TUZUWPJ3e+pfOk141L5O6cdNUeqb7Ve1VyqY1S5wkxf4z772c+2/fy1r31NbmvGJ2cAAAAAAAC1gIczAAAAAACAGsDDGQAAAAAAQA3g4QwAAAAAAKAGhF5+e3gI4ZSZ3XEQmDSz0z3rvLsM0ljM6jue7THGtKK7i6DZxlDX8aDZzjFIYzGr93h6qtsB1qzZYI2nzmPpZ66t87wshkEaT53HktVsTx/O2joOYW+McU9fOu8wgzQWs8EbT6cYpHkZpLGYDd54OsUgzcsgjcVs8MbTKQZtXgZpPIM0lk4yaPMySONp6lh4rREAAAAAAKAG8HAGAAAAAABQA/r5cPZcH/vuNIM0FrPBG0+nGKR5GaSxmA3eeDrFIM3LII3FbPDG0ykGbV4GaTyDNJZOMmjzMkjjaeRY+lZzBgAAAAAAAN+H1xoBAAAAAABqQM8fzkIInw0hfC+EcDCE8IVe91+VEMKXQwjTIYQ35sXWhBC+EUI40Pp7dT+P0UsIYWsI4eUQwlshhDdDCL/aijdyPN0CzdYHNOsDzdYHNOunybodJM2aoVsvTdas2WDpdpA029OHsxDCEjP7b2b2pJk9bGZPhxAe7uUxdIDnzeyzhdgXzOylGOMDZvZS6+cmcMvMfj3G+LCZ/YiZ/XLrfDR1PB0HzdYONLsAaLZ2oFkHA6Db521wNGuGbhdkADRrNli6HRjN9vqTs0+Y2cEY47sxxhkz+x9m9lSPj6ESMcZXzOxsIfyUmX219e+vmtlP9fSgFkmM8XiM8d9a/75kZvvN7F5r6Hi6BJqtEWjWBZqtEWjWTaN1O0iaNUO3ThqtWbPB0u0gabbXD2f3mtmReT8fbcWazoYY4/HWv0+Y2YZ+HsxiCCHsMLOPmdl3bADG00HQbE1Bs1nQbE1Bs3dlEHU7EOcY3WYZRM2aDcA5brpmMQTpMPG2/WWjLDBDCKNm9rdm9msxxovz/6+J44FyNPEco9kPN008x2j2w01TzzG6/XDTxHM8CJrt9cPZlJltnffzllas6ZwMIWwyM2v9Pd3n43ETQlhqt0X8VzHGr7XCjR1PF0CzNQPNLgiarRlo1sUg6rbR5xjdLsggataswed4UDTb64ezV83sgRDCzhDCMjP7z2b29R4fQzf4upk90/r3M2b29308FjchhGBmXzKz/THGP5z3X40cT5dAszUCzbpAszUCzboZRN029hyjWxeDqFmzhp7jgdJsjLGnf8zsc2b2jpkdMrMv9rr/Dhz/X5vZcTO7abffL/55M1trtx1gDpjZ/zGzNf0+TudYfsxuf7z7XTN7rfXnc00dTxfnCc3W5A+adc8Tmq3JHzRbaq4aq9tB0mxrPOjWN0+N1Wzr+AdGt4Ok2dAaEAAAAAAAAPQRDEEAAAAAAABqAA9nAAAAAAAANYCHMwAAAAAAgBrAwxkAAAAAAEAN4OEMAAAAAACgBvBwBgAAAAAAUAN4OAMAAAAAAKgBPJwBAAAAAADUgP8HYAZKciXsN3kAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] @@ -206,17 +201,19 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "tensor([[-0.9777, -1.3779, -0.6812, 2.4773, 2.3612, 0.1038, 0.8307, -2.4117,\n", - " -1.0913, 1.0372, -2.3588, -0.2581, -1.2573, 0.8061, -1.3952, 2.1415]])" + "tensor([[-7.0867e-03, 9.9738e+00, -6.5140e+00, 4.5889e+00, -4.8899e+00,\n", + " 3.6933e-01, -2.5673e+00, 1.4296e+01, 8.0464e-01, 1.1216e+01,\n", + " 6.2219e+00, 3.5970e+00, -1.4565e+00, -1.0004e+01, -4.6819e+00,\n", + " 1.0152e-01]])" ] }, - "execution_count": 11, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -227,12 +224,12 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 13, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2oAAABpCAYAAABLV9A4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJztnXmQHVX5/p8zk4RAQJTFiAFFENGwKBgBQQUEFAEBRRDEH6hoFBFxLVHLpUrL4osii1JYsUBR2RdlB9lBQGSRJSSCGEDAIIJCkpnJMrn9+yP3Of305Ezm5t6+9/bMPJ8qis6ZmXu7nz5L93mf856QZRmMMcYYY4wxxlSHnm6fgDHGGGOMMcaYIn5RM8YYY4wxxpiK4Rc1Y4wxxhhjjKkYflEzxhhjjDHGmIrhFzVjjDHGGGOMqRh+UTPGGGOMMcaYiuEXNWOMMcYYY4ypGC29qIUQ9g4hPBpCeDyEcHxZJzWesablY03bg3UtH2taPta0fKxp+VjT8rGm5WNNO09odsPrEEIvgMcA7AXgGQD3ADgsy7I55Z3e+MKalo81bQ/WtXysaflY0/KxpuVjTcvHmpaPNe0OE1r42x0APJ5l2TwACCGcD+AAAMPesBBC1tNjt2WKWq32AoD9YU1Lw5qWT61WeyHLsg2xmu3fmg6PNS2fVjTt3FmOOprStP47WQihM2c5isiyDFmWBfh5qlQ89pePNS0fGadWSSsvatMAPC3/fgbAjkN/KYQwE8DM+jGmTJnSwleOXRYuXPgUmtR07bXX7sg5jjYWLFjQtKZrrrlmR85xtNHX1/dU/XBEXYdqOnny5I6c42ijv7+/aU3dn6ap96fAamoKABMmtDIsjl0GBwcb1hRYWdeJEye29fxGI8uWLeNhU+OU+9Q09T61KU3XWmutjpzjaGPRokVNa+pxKo2MU6uk7SNSlmWzAMwCgN7eXs9WloA1LR9rWj7WtHysafmopo6olYfq2tPTY11LwO2/fKxp+VjTcmklHvksgE3k3xvXy0zzWNPysabtwbqWjzUtH2taPta0fKxp+VjT8rGmXaCVF7V7AGwRQnhDCGESgEMBXF7OaY1brGn5WNP2YF3Lx5qWjzUtH2taPta0fKxp+VjTLtC09THLssEQwhcAXAegF8BZWZY9UtqZdRBd5Lx8+XIAQH9/fyxjZkxdu9CONUxjSVNlcHAQQFFTssYaaySPy2Ksasp6unjx4pV+pmtBJk2a1JbvHyu6attnO1+yZMlKv6dtv7e3ty3nMhY1JbIWJ6ILzNu12HysaJrSh32A0omEHWNFUyCtV7OZsFthLGlaFaxp+VjT7tDSGrUsy64GcHVJ52JgTduBNW0P1rV8rGn5WNPysablY03Lx5qWjzXtPM6ZaYwxxhhjjDEVY9zlIabVQS0Past5xSteAQCFlPe07A0MDMQy2iO8N0yapUuXxmNaGl/5ylfGMlr2aIs0I6P1lFa8ddZZJ5axfqYsUSaNasW2rOmZWT+1nrbL+jhWSFnHUqnZ3fYbRzVlPdV6yJ/XarWVfs+0Tkp/kyalz0hjkvfZWjWqD+tiauxy3WyclKZV7T/dOowxxhhjjDGmYozbiJrORmy4Yb4x+NFHHw0A2HHHfA+/2bNnAwB++ctfxrInnngCQPENfLzOCjG6o1G09ddfPx4feuihAIDtt98+lv35z38GAFx//fWx7LnnngOQnikab1BTjaJp9GyPPfYAAGy33Xax7E9/+hMA4JFH8rW9L774IoBiPR2vpDTVJCFvfvObARTrKdv5nDlzYtnLL78MoDjL3o0EBFWAUR2Njmlde93rXgcA2GijjWIZ9f/HP/4Ry+haGG+apsYMlqmmejxt2jQAKGwiyz5z/vz5K/2N3o/xoCmQ1pVjiWqpY83UqVMBFKO/1OuFF16IZdRzvPWpjWqquqy33noA0i6El156KR6nIhrjgVW5M7Ru6jGfA/RvU0nwUs+644FGNdW6RheN1vGU64uf3WlNx+ebhTHGGGOMMcZUGL+oGWOMMcYYY0zFGNPWR9qa1O5B283WW28dy7761a/G47322gtAMdy5wQYbrPTZp556KgDg+eefX+lnY9leolYxwhDym970plj2yU9+Mh7T+qhsscUWAIq207POOgsA8L///W+l3x9vmpJNNtkkHh9yyCHxeObMmQCK+/ntuuuuAIALLrggll100UUAcrveeCGlKW0Lr3rVq2LZe9/73nj8+c9/HgDwmte8Jpb97W9/AwD84he/iGU333wzgOIedrSZjOV6mrKUUOd11103lu28887xmJpuvPHGsYyWxxNOOCGW3XvvvQDGn+2ZVhu14VBTTb707ne/Ox7Tnq/19O9//zsA4Hvf+14se+ihhwAUdRwPmgL5daqutDRq+3/nO98Zjz/+8Y8DSOv6wx/+MJY99thjAIrtYawlGFpVPdE+jppqXd1hhx3i8UEHHQQAeO1rXxvLaCM/6aSTYtm8efMAFPf9HGuapqyj1FI1TSVfUzv++973PgBFO/ndd98NIB/vAeDJJ58EUNR0rC3PSV0P23xKUx2nttlmm3jMOvvqV786ll111VUAiktJaC3XutmJPnVs3TVjjDHGGGOMGQOMmYha6s2a6co16vCe97wHAPCNb3wjlm277bbxmBE3nS3nou2ddtoplnEG45ZbbollCxcuBDB2ZtVTM1rUdPLkybFsxowZAICvfe1rsextb3vbSp+j2xtw5oJRIAC46667AOQzwUC+OHasLIhNRXn7+voAFNPCM0mIRns1cQhn23TxMKNv++67byz761//CgCYO3duLGPdHisLt6mp9gGperrVVlsByKM8QF53gTxyvmTJkljGBCMf+9jHYhkjQpyxBPL6OVbqaarts2/UesqZ3k9/+tOx7O1vf3s8ZnRCE7hwVvNzn/tcLPvud78LAHjmmWdW+t6x0p+ynmq7o3NDZ3oZ5VFXgrZ9zqSr64MRiy9/+cuxjGOcuj54X8dK2wfSurLOMJkFkEd61eGhzhqOSVrfNttsMwBFR8L3v/99AEXnRyoyOprh9Wh/xjJNFMa6ysgZUNSUfar2J9OnTwdQfB5gxHLBggWxbKxF1IjWEV6jOov4nHnggQfGMo5dQK6/6vOOd7wDQL69FACceOKJAIBFixbFMkaWxgqpxD6M8qoTbpdddgEAfOhDH4plb3nLW+Ixn6dUU0YuzzzzzFg2a9YsAPmzvn5fO3FEzRhjjDHGGGMqhl/UjDHGGGOMMaZijBnrI+0KarFhGHPvvfeOZV/4whcAAFtuuWUs07Apw8S0owF5aFMtPwyh3n777bGMlilNZDCaF29TF90fjdemmtJKpuF5tY8wTKyWMtooNDz90Y9+FEC+WBvI7SWrSrgxmqCVRLXgYl+1LB577LEAigtetZ5SU63vrGuagOSII44AUFwMz78dK9YSaqpWMGqq9hHa84bTlG0+ldBCE+UcddRRAICTTz45lnGR8Whu7wrbvNpJac85/PDDYxkT3Gh/qvqxP9X+gJ+pdj7em9NPPz2WpRI1jWZS4wN1U+voBz7wAQDApptuGsu0ndOGp/eG9U73/zzssMMAAOeff34s++9//9vaRVQQaqO60n77qU99KpbRUqZJGLTPoO0u1S+qrrvtthuAfN9KYGwka9K+i21YrXJMaKPtn0scNAlDqv2rPYx9riYdYZ+s+1WqrX+0onUpZcflUpyDDz44ltGSpxbT1POU3hveO1r8gLxvYUIcIF/2MJrHKe33WNc0ARATAn7kIx+JZUxepwlaFLZ9fcbn73LPWgD4wx/+AKC4NIr3pp2aOqJmjDHGGGOMMRVjbIQpkM/SaCpSzqZrkgtGG3QmXSM4t912G4DiQmH+DRMLAPlM0vvf//5YduGFFwIoRqBGc9SCsxWajIUz6Mcdd1ws40J2nZ18/PHH4/Gf//xnAMVZiDe+8Y0AgDe84Q2xjIu9NeLx4osvAhg7i7Sp6dprrx3LjjzySAB5FA3IZ311Jv2JJ56Ix/fffz+AouaciXvd614XyzgLqjNO//nPfwAUZ4BGc8IG1g1dSM2ZdI1UMCKkmj311FPxmLO5+nO2eY1Schb0Zz/7WSzjfdL+Z7ShdYDRCU2rfcwxxwAoJmNgqnPt855++ul4zO0NdJadmmo9ZZTitNNOi2WMQKmmo3kmmIltNDrLZEH7779/LGM91v7yX//6VzzWdNGEbX/atGmxjGPTeeedF8t4n0bzuDQUzoK/9a1vjWVf+cpXABQjDPw9TQSgUVt9DiC8V9r+6Sa58847Yxnb/2jWVSMVTKCm+n3pS18CUEwWwqjOSy+9FMs0uqjPAYSaMsoB5A4lJmoCRrfzg/2URhJZ/9TlQXfG61//+ljG61VN1eHFCJlG1Pg8pc4GJng55ZRTYhkTuIzmflTHA0Yd+QwF5E4CjfJybFNNtX+lpqz3QF4/tb7z+feMM86IZauKxJeFI2rGGGOMMcYYUzH8omaMMcYYY4wxFWNE62MI4SwA+wF4Psuyretl6wG4AMCmAJ4EcEiWZf8b7jPKJrXHBy0dDHsCwBe/+EUAxbAybVL33HNPLPv9738fj2nT0xApbWO6BwP3XtMw9mWXXQagGFLluWqoeWBgAMuXL0cIQUOtvSGE61FxTY8++mgAwMYbbxzL+DfcWw4Arr766nj8l7/8BUBxPw8mEthvv/1iGZNpaFKNO+64A0BR0+HC9gMDAxgcHEQIIVoLu1lXeZ5q26T14BOf+EQs435SusidmrI+AsC1114bj1l/1aa3++67A8gX0wL5ovp99tknlj366KMAiglNhtN0yZIlUVNaN6qq6cyZM2MZk6jowm3ak1TTG264IR4/8MADhc8D8oXEH/zgB2MZkzzofmG0+6l9cFWasv3TVtztPnUorFfUEcj7Ot2bjjYntYHddNNN8Xj27NkAihZe1kXtO2mxVDskE7Q0ommqT61CPdXzpTVGNeVePWqRYl26+eabY9mNN94Yj2l9VAsQ9/lTTTfffHMARZv1c8891/A1LF++PGpPK2y36yn11MQhvD5NyMB9pVR/2vBSYxMAzJs3D0DR/kTr9J577hnL+Dyhn92olWxwcBC1Wg0hhHjPq6Kp1kE+8+j+aLTXanukXZzPPloG5FZGtaHRQsl7BORLIJqx5KX6U3T5eYpoPaWVTsdijiU6jj/44IMAinv0cmwCck3VOnr88ccDyK3oQN6nqqW10SUOixcvjppKgo1KaKr1lHWSzz5A/hyle/Ix8Y/qqMd8JuKyBiDfL1ETkPB5IpUkpp00ElH7NYC9h5QdD+DGLMu2AHBj/d+mQSZOnFhY91VnI1jTlpg4cWIha08d19UWmDBhQuHBvI41bQFrWj7D9KnWtAV6enpS6y6saQv09PSkMhhb0xYYpj/181QLTJw40ZpWiBEjalmW3RZC2HRI8QEAdqsfnw3gFgDfKPG8GkJnablwmqnigXzRukYOOIOuCyx1ASsjGal0qEw0AgAHHHAAgOLu5pxp0xklzpToLMCECRNSyTFeiRVaAl3UVCNqjCZoQobNNtsMQFHTP/7xjwCKiQCefPLJeMzInF4zk7Xo4nfOLjNRC5AvmNXZEQ50Qx8ihtG1a3U1Ff2hppo2mhpoJIeaagp4TSaSSl3M1Nua9IazlpoKmXU7tUWAzr4BKzSukqY8Pz0nJqHRJBdTp04FULzG66+/HgBw6qmnxjJNJkL9ta2yj9GZNr4Q8HsB4Morr1zp+4bbUqJqmrId6awu641GEjmbqJFxpiueNWtWLNPEF9RDF76vu+66AIqJmJisQXVmNE4TlQxH1do+NdX+dPr06QCK6Z4ZvdH+8qyzzgJQdHpoggbqoS+mjLJpUhLOBGuaf63vZLgZ4RBC6mddHfupq95rRn80kQK599574/HZZ68YXjVSqc8Q/Ex9QGXiJR1r2FdqvWw0EtTT01M5TXnu2neJIyWWMbGXRiHPPffclcq0/2Sfoo4Y9rN6D/ndmuhl6Fg0HMP0p119nuI91iQg7AsYvQHyBCO6zdM111wDoPhcqlArTZ7F+6T9zQsvvACgqGmj9bSKmhJ99uEzj7qNmCBInR18dv/3v/8dy1Qr3if9bI5Z2i6YdESfRzuRmKXZrI9TsyybXz9+DsDU4X4xhDATwMz6cZNfNy6YYE3bQkN11ZquFta0fKxp+ay2pmZEmhr7zSrx81T5+HmqfKxpl2g5mUi2YupgWJNmlmWzsiybkWXZDN+wxrCm7WFVulrT5rCm5WNNy6dRTTt8WqMaj1PlY03Lx5qWjzXtLM1G1P4dQtgoy7L5IYSNADw/4l+UCC0HtDcB+R5UakVk6FYtJD/4wQ8A5JYGYOSFgQzl33fffbHs2WefBQDsuuuusYx7LJx44omxjHYBtQMMw2A3NeV166JfJg5RSwmtDKrpCSecAKAYVlbbFz9bQ80MMT/00EMrlenearRg0Qqo59DgvhVdratAMaEF903RhcC0hVxyySWx7Mc//jGAoo0sdb0almc4nnUOyO8DEwsAuWVCE+awrTRoN+mapqxLavv6yEc+AiC35QK5leGKK66IZT/96U8B5G0XKGrKASVlK11nnXVWKtM9xlgnW1hY3HVNVYv3vve9AIrJPWihod0RyDXVeqp1iP2eWoA22GADALkFEsgtaFrGOtnCQN/1tq/94E477QSgmNyKbfXSSy+NZRdccAGA4h5fqinvk2rKPlP7GtZjtfelrPirWWe7qunQ5CZA3peqvZa2MWoJALfeeiuA4h6pqYQgmpCBdlVd+8z+Wq3CLT6Mdr2eAsX2Ty3VKsvrvvjii2MZk1qpHVo/h22YOgL5M5omw+GzQ4n7pXb1eYr1QbVge2ZiCyBPcMO6CQDPPPMMgGK71Wcn1jvunQbkCTS0n+BSCa2nLSa+6KqmRK+RNlK1htNGqkuQtK8kqi/vF5PcAbkFWi3ODz/8MICijlVJJpLicgDcYe5IAJet4ndNY7wEa9oOXFfLx5qWjzUtH2taPta0fKxp+fh5qnysaZdoJD3/eVix0HWDEMIzAL4H4AQAF4YQjgLwFIBD2nWCfNPVt1YudP3Wt74VyzhbqTNbl19+OYBikgtGHfT3Rnoj5myGRi/++c9/Aii+bc+YscI5ozNFqQgFU0lnWYZFixbx9+cD2KsTmpJU6uhPfvKTsYya6qwQUxv/8pe/jGWcaddrTc2M6c+pm0Z1+DmaDpURS52F0qQbSn9/f9S1/lkboIN1FUhroAv8d9ttNwDFBetMcPPb3/42llGLkeqp1jVqqRE1zhrpwmMmbNCUyjrrpixevBi1Wg1ZlnGGvuOaqgY8z3e9612xjEkp9BoZ9eFid2Dktp9Kqb6qSLhGnxklUu1TfReQp5MGYl3uuKYK26JGelhPNZLIdNEaTdeZdKLXy+tUrd70pjcBKEZAiEbUGDXRPna4KPrQPhVd1pRjhV7PW9/6VgDFa+BWEbpNBDXVPjSlgbaBXXbZBUC6T9f+lP2OztAPh6bnr7e7rmoK5FEd7fd4TbrAn8lVNCED+0LVVWfV+ZmMJgPA1ltvvdLvURONsqWi8CmYnh8o3NOuasq6oPWSES5NYMXoj27xwL/Veqd1lc9qmkBn/fXXB1DUjMd6X6n5SNHKof1pvc/u+PNUCh1XGVHTa2RyCtU0VZdUA7ocPvCBD8Qy1kVNHEKXg97XRto9kKfnz7IMfX19XXtGTaGa8nq4lQuQX7eOTeyPVQttv3vvvSKxPRPaAXk/oc4G6jtcorB20UjWx8OG+dEew5SbEUikkcaSJUuWZ1lmTVtgaGr+BQsWvJBl2YtwXW2aoSl6+/r6rGmLDH1B6e/vt6YtMrRPXbhwoTVtkaEvxYODg9a0RYY+4C1btgy1Ws2atkBqwmfp0qV+nmqBRGp+P6N2kZaTiRhjjDHGGGOMKZfOxu+agDYDtSvQ8kF7DpDP/t11112x7Dvf+Q4A4Omnn45lnH1pZvGv2h9oadGZXJbpZze6F0gnSVm8uNeO2j5oJfvb3/4Wy8444wwAaUvJ6kBddNaW+tEuAeT7K2m0TC2oVUPvN2dP3/nOd8ayDTfcEEAxocWFF14IoLiXEu0PI9VT1Y+WAF04y3ajC+S5CFlnIktcxF06qQX/tBkDuRWE+8YAefIZLswG0pbZVFtVawrRBARDvxfIbYNqt2C76MRi41agBrR5AXkd0brEPalUU9qcVDO117COaTIbJr5ILebWREK0naasqlXPJMa6pNZHXpvqQ2tZyk6mY4vuQ8XkTkyiA+R7J2p/wP5H91W87rrrAFS/Tg4Hr0/HA9pqVWtqmEqkwL3rgGJ/Tfuo7u/H39X6zX3b+H+gaHke+n2jBY2isF3rPqfsP9Vizr/RuqoJbfiMRm2BvF3r+MPEcKk6P9p0BNLnzLqodYn1WOshnw10zN5qq63i8b777gsA2H777WMZx2/tU6mfRnEbtT5WGX1WYV1KWRr1OZLXzaQrALD77rvHY+7BqgnCWN81SR5tlSUmEmqI6r1FGGOMMcYYY8w4Z9RE1HT2hYso9e2Yb72nnHJKLOMseCoF90ik0strOlTOjuosPWfVmlm82Q10FmefffYBAGyzzTaxjDMy55xzTizj4led4W0wTX5BU86K6AwG77FqysiSzqIwopbypneLVOSUs4kapaRWmjb+/vvvB1BMMZuakUvVXf09Jg3gYm39PtWPM3r6t1XWVOsXI7866816deONN8YybqWRioTpvdL2ydk5rdvUSGdB+X06+8v+YPbs2bGMs9KdXni8KlLRdLbBvfbaK5axvmgSH9ZTXbDOGVyNUuhsI/to3Y6CM8WqC++JRik4y67fx/NqtM/pJHo9vEZ1fbCuqeuDSak0GsQoZiriAOSRNE0mwnqqfSfLNOU075O2iyq6PhTVle1QE9+wHapzJpUgh2iCHB3TqatGkVi/UzPo3DoGyKPM2odXmVSUWtsU3QJa7xipYOIkANhxxx0BFNu/tnXqq1E4aqr1jp+tn8N2oMlJRkt0LbWNBMdnJhUC8oRtOmazDmkCJn3WZX1X/Th+q1b8Po0+D0kOBKDabhol5aZgP6t93J577gmgGHHkfUjdDyDXSN1a7K91Ky/2P5pkjL/XzrpZ7R7aGGOMMcYYY8YhflEzxhhjjDHGmIpRHU/OCGjol+FiDd8yeQD3pQHykG4z1iP9bFqhNLzKsKmGmh966CEAxQWdVbSVMESrFhCGizV8e+uttwIALrjggliW2jMtRcqmp7pQX+6pBORhZz0HJjJRS08VEwmwrqld6bDDVuxsobamO+64AwDw61//OpYxCcZwljySCq1rqJ6WFNoDgbzualII2ld1v5Yqa6qL3D/zmc8AyBMqAHmSC90XjlZo1TSV3EOvm9+nNh1af9QmwbqoyUvmzZsHoJhMpIptn6iV88Mf/jCAfH89IE9ucffdd8cyaqqWz//+978AinVOF8HTUqL6pfbsYz3V+swy/f0qakrLmOrCMWq//faLZaxrbH9A3i41AQPtempD33jjjeMx+xO1L7Lta5/Oz9bxj+NVFXUcCs9R2z/rme53RNux9nG8TrXS7rDDDgCAbbfdNpapvYxtQutgKk05+xG9J80k1OoGKTs5+7i3v/3tsYy2TrWO0u612WabxTLW+ZQ1D8htfKm9r/R+8W/03tx8880AVm/P226Qaktsj6m9KdUOzWRpqf069blBNeC4o5ZF6qvWW1ra9XMaTVLWbVIWfY4hqunBBx8MoLinHLVPJbjSvlDbAMcxrV/6bE/Y5vVvueSpncucqt9bG2OMMcYYY8w4wy9qxhhjjDHGGFMxKm99ZCiStgUgt9Zo6JzZydSC0MyeOwwhawiUe1h86lOfimW0VDz44IOxjBZB3VMlFdLuNgyZ77zzzrGM4eQFCxbEsltuuQVAUedmQuYpqxP3FqPtCshD27SyAcBVV1210jlUKYseoaa0PAF5dqeXXnoplt1www0AgOeffz6WUVMNnbNM66H+PGW15L4gmg2OWbRuv/32WEb7pd6XKmrK61WbI/sBtXjw2h599NFYtqrsVlqm+vFvuNcdAOy9994AinYLavXwww/HMloftX1UMTMhNdVsbqw3avOi7UizPtLWmbLbavZW1YD2Ea27bOe6zw3R/Rlp7Rstmqp9kVl0td6w3dFSDuTZArX90WKv1jq1V1EjtaDRJqoZ3qiV9uksGymDbBVIWfRpJdWMbo888giAYnukRV//luO41nO1LzO7m2Y15H3RcZzHWn/Zj2jbYH9Tpax6KU1pG9P9PufPnw+gmKGUbVkzFL7lLW8BULSLa9Zcfo7WS/a/ai3jz6dPnx7L7rnnnsLv63HVNd1jjz0AFDM+89mUmV6B3GKu/Rrrn+4pqXZS2he1jFpqu6aW+vzBZzqt96m9iquCtlWOU6lnRn3mZvZXXYbA32N9BYr6cTlDakzSLKZHHHEEgOLzFJcH0D4J5PejrD7VETVjjDHGGGOMqRjVm0YfAmcadFEw37J1BoCzmjrzmIrkpBZ+6ixYambn29/+NoBiBIozIWeeeWYse+yxxwAUFzFWbZYSyDXQ/TwYpdTZMM4A6WxvSlPeI71WjWxypkmjol//+tdXOgfOvp1//vmxjLMjGvmoysyPzl5RU93fiwuFdUaLkTSdTVxV4pDhrpX73Oj3HXvssQCKiR04e6ea8hx0tqoqmqba56GHHhqPOSuuEUnWEU2OkuoPUnui6Uz5m9/8ZgD5rBmQ762kyTAY0fjDH/4Qyzgjp5pWZQ/FlAYzZ86MZbvuuiuAPLoD5El82CaBPEqhbZFlOkOriQWYHODd7353LGPESD+HCTZ0L7xUMplU/9MNdAac53TkkUfGsoMOOghAUVMmm9IyjiNaJ//6178CKPahOqvLBEw6487xUT+H4xG/F8jru2pf9uxvK2hd5f3/0Ic+FMsOP/xwALluQL53oUYqUn0cI5Q6nmkkiO4DnVXnsd5v3j+t8/wevWccS7vdt6baP50CAHDUUUcBKO4XxcRMzz77bCxj/6p1MZXwJfX8o88VfB7Q8ZP9p9ZL9rna5ums0We2bpAap9iPAsBnP/tZAMWEP9yPVusutdA4IGsMAAAVKklEQVSkajzWCF0qyYXWP/6ujjmpz2ZCKEahgTwSpPeoG2h94LFGzqmptrFLLrkEQDGaxUiZJgqaMWMGgGKU7cknn4zHrH9a19g3qC587mJ0D8ifnW+66aZYxnEzlZCkGRxRM8YYY4wxxpiK4Rc1Y4wxxhhjjKkYlbQ+agiUVkRdqM3wrobqGU5OJRNJ7aWkdgRdtE3bk9qtaDHRsDJDrty/DcitAWqnqsp+FXoe3FeDYVwgD9E+8cQTsWzOnDkAiok8GBpOJWFQy4NaVbn3lS4CnTZtGoDi/briiisAANdcc00sS2laFbResX7qYlVem9oMuDhbF/hTU/08htN1EbbW02OOOQZAvmgZyBd5a6j+8ssvB1C0lDHMr3aeqtTT1P4+an+gVprE57bbbgNQtD8QbedM7KDWPFoigLzN014J5DYKtZkwwY0m2qDmVdyfSjXlteueaezXuHgfAK699loAxT6W15bSVBMRaJ2ktVn3VmNdow1Hv4/JdoD0nj9VrKfs63QvH/ZXuq8nr00XvlNTTcay+eabAwD23XffWKZ2ZvbfaofiZ6rV6tJLLwUAXHnllbGMmqrFpwqWR5LSdf/9949ltHNdffXVsezOO+8EULQ+U1dty7Tc6n5hqcQ3ao+67777ABStdvw+TXpFPVMJn7qN9km0g9HSDeRtU9se+wK9htSykNR+a9pn/Otf/1qpjDqrLWzu3LkAirZAPk/o+Vel/et5sJ7qMyN1ZlImAPjLX/4CoGi9ZUIgrafUVC24DzzwQDzm2Ed7OpCP6Zogg9+jZTxXJr8C8r6q29ZHhWO/LkPg8w+fvYE8MZNab9nOdd9j6qtWXh2/Wf/UJs76pxZKPjOzjwZye/9TTz0Vy6iljnGtUL2nCmOMMcYYY4wZ51QyoqbRE8726e7qjFRopIczBToDxMXBCmdntt5661h23HHHxWMuEtToEGd5dGbyRz/6EYBiZIQzGFWZ9VFUUy7S1RTd1E2vh/rpLCFneTRFL69bZ4D1mBERnS3lYksuWgaAE088EUAx+UYV08YTneljhFA15QyvzhLqzA9h3dYoJNP67rXXXrHsjW98YzzmjI7eGyZkuOiii2LZ6aefDqA4087zrmI9VbgIWtPls24wgQiQ11Nts5z91agxZ5E1mqTRNd4Hzk4CeWKH3/3ud7GMM3o68151LQn7PZ0l5Gy3Lq5mhFijFKyfupCa9XTHHXeMZboInn21fjZnLX/zm9/EMqau1++jplXUVqNQjDBoVIyaauIQ9g36t9tssw2AYsIMJiXQZCFaJ+l60Agoo8oaOWeEWcfEqkR5hkO1YTRWozWMHGpUkj/X8YX18YADDohljF6oS0G3g2DKbUZ3gTwiqhE1nuNwW6ekrqUbpNoNE6IxcRKQ96kaUWFUR8ck/q0mzWC0RiMVGkWi44iRSSDXSjVNOWZYV1XHbmuackzsueeeAIC3ve1tsYzjrf4+E39pMhbWUx3bqYtu48FxHMj7SnUjcezSSDmf1VLJs3Ts6naCJrZb7ZsOPPBAAMW6Rk01QsjkQvo8T021/jP6ePLJJ8cyju1A3p/osyfHSN32hwlGtL7yu/WZRPvrMhgxohZC2CSEcHMIYU4I4ZEQwnH18vVCCNeHEP5e//+rRvoss4JarYb+/n709fWhr69P98Cwpk1Sq9XQ19eHRYsWYdGiRbGzs6bNU6vVMDAwgP7+fvT398eBwZo2T61Ww+LFizEwMICBgQFrWhJD+1QArwasaytkWYbly5djcHCQD3PWtEWyLMOyZcuwdOlSLFu2TPd6taZN4j61fDj29/X1ob+/38+oXaYR6+MggK9mWTYdwE4AjgkhTAdwPIAbsyzbAsCN9X+bBlljjTUwZcoUrLXWWmwEk2FNW2Ly5MlYe+21MWXKFHbW1rRFJk2ahLXWWgtrrrmmNS2BEAImTZqENddcE5MnT+YDsDUtAe1TAbza41Tr9PT0YMKECZz1tqYl0Nvbi0mTJqkDx5q2gPvU9jBp0iRMmTLFY38FGNFXlmXZfADz68cLQwhzAUwDcACA3eq/djaAWwB8o4yTSu2Zpfud0D6m9h1axDQEz1C+Wki4fwjD1UBuWwNyO6WG6rlf0oUXXhjLuH9QMwtdh/5Nb28vBgcHJ6GNmirUVEPiHDRUU1qc1G7Av9F9kbg4mzYeoLgQlhYcDTUzYQiTXQC5ps0kDxiqaU9PD2q1Wls1VesBQ+Fq1WQ9VusOEw7o3ia0kWoyAv5crZR6H7hnEO06QG4jvfvuu2MZLQ5labp8+fK2aqoWIlp01dJEnVVTtmm9H2zzasmjNU33RGOdA3Ir2cUXXxzLqKlaR1P6NappCKFg6eu0prQ0qqZEE1bQeqJJQJgoRxPmsJ2rpZcJBIDcpvOTn/wkltFuplarVjQFkv3wANowTvGcVFNac9UOQzuRjj2HHXYYgKL1iQvR1TbJz9GF6JqU5OyzzwZQtDnSQaA2JmrSrF0s0We0RVNFdeVYpOMUr1PrKvsCagnkdVRtuLSjasKAn//85/GYer788suxjJEEbS8pq1az7b9OWzRN2VxZz1LtX+sqky9sueWWsYzHag/jc5L2mfqcxOcp7Zt5Xql9SJupq53sU3nu2t+wLqaeBXWc5zMT91cFcmupJgv51a9+BQC47rrrYpkun+Bna7tIJbNimVpMUwmhUrbdTo79/H6tk2y/KaupJrOhZVSX4rD/1KVK3/zmNwEUxyb97NQ+fqy7Wk91aRBJ2aPLtpOu1gKgEMKmALYDcDeAqfWXOAB4DsDUYf5mJoCZ9eNmz3PMUqvVWFEXAdjMmraONS2fWq3Gzt2aloQ1bQ91TddCg+OUamrS1B+gG9YUsK4jIS8lfp4qCfep5WNNu0/DL2ohhLUBXALgS1mWLVDxsyzLQgjJqZAsy2YBmAUAvb29DU2X6Bs+v0cX6nGWS2fLmB5aZ3u5MF0XanKGWN94NX06U6iedtppsYzfnZpBa6USZlmGgYEBrLHGGli8eHFtyM9K1TQ1M6HJWDjjpdsgMOrIhAE8Z6AY0WAUSdOja/p0Rs3OO++8WEYtU2miW9W0v78fkydPxsDAQFs1Tc2g68whyzShBSMVuqCdM0kbbLBBLGMbeOyxx2KZavr73/8eQDGlMmfYVNPU7OXqkmUZlixZgkmTJmHJkiVt1VTPk1rq9fAadVaNdVGTjvDe6D1if6CL3VU/pjPX2cvUIvfUDOTqQk0nTpyIpUuXtlVThX1nStMddtghljE6kWrnmvCDKYmZzAIAzjnnnHg8e/ZsAMXF65y11BnUMto+P6c+O/p0o+OUajqc7o2cp9YLtu9ddtklllFzjahRU022wqRAJ510UizTVPDUX2dw+Tnaz5eVeIFr1bAamtZ/FnXt6elp6GT0nFk/tIxjvkbKec0aZWM0QR0HTMigSRp02w1+n15fqv2XNU6xPrTreSr1jJKKpHHM1/bP+qTOmMcffxwAcOaZZ8Yy9p+amlxT8bMdjJQUrIy62ok+lVrq9aSerTiW65jE39NEOKyL6jhg1Efbt/Yt7D9H0qyscWrx4sVtfUZlndQIIa9Nn6cYYdfnKf5cHQe//e1vAQDnn39+LKMWet9STrGRSEXKOpGMpaH0/CGEiVjxknZOlmWX1ov/HULYqP7zjQA8355THJvwgWLixIk6GFjTFuBLmjUtDw5+EyZM0E7OmraANW0P2qcCoD/LurZAlmWo1Wp8yLSmJcCXtJ6eHn2JsqYt4D61fPiSZk27TyNZHwOAMwHMzbLsp/KjywEcWT8+EsBlQ//WpGED6OnpGfomb02bhJr29vYWfMawpk2TZRmWLl2KEMLQmWVr2iTUtKenx5qWiPvU8uFLGrBS1MCaNgmjk1ybLljTJnGfWj588XV/Wg0asT7uAuD/AXg4hMAVj98CcAKAC0MIRwF4CsAhZZ2U7kFAm4faaWh74v5cQL5ruSYGYVhUKxoXA9LiCAA//vGP4zEXumuSgdS+U63YHpjyuKenR+2H66KNmmoYnTabq666KpYxRK+L2mkZ1WvlvVH7E62haiM799xz4zGtZBqK5wxNWfv6LF++HMuWLUNPT4/arNqqqV4PbSG33HJLLKN1VBOCcCGxPvhwcbbaR1J7oqmNJ7Wolba2sjSt1WoYHBxECEHbZFs11XOnpmprYJtXTWl50nrKRAxz586NZbTg0jYKFO26/G4d7KlpWTYyrp/slqasV6op9/9R6y0tT2qPZv1U6ygt4rpfWCrJTsrWUiaJPnV6CGEftElXvQaOGZrEh3uA6SL3lJ2ZiS1mzZoVy/hzXfieso5pH9Ku/dF4nXV7T1s1HQrrqib24v5fah1n/6kJLWh/0j2QmBBIny9S43g795rjy28IIabnb7emOk7df//9AIC77rorlnEfKP097nPKfdCAfH85tTayf9TngUZtZGXRjT5V+zj2h9p/UlO1fLPPZd0E8jqp+pHhLHWd2EuOY39PT4+eW1s0ZXvTZ9QrrrgCQLHfY4IbrX/c35TLFoDcSqn68XO6vQ9fszSS9fFPAIZ7K9mj3NMZH0yYMKGwvg4AFi5c+HKWZS/CmjbFhAkTCplBAWDBggXWtAV6e3tX2jS+r6/PmrZAb29v4SETAPr7+61piwztUxcuXDgny7Kr6/+0rk0QQig8KA0ODlrTFhkaoVi2bBlqtZo1bQH3qeXT29tbWJ8IAIsWLbKmXSJ08g2zt7c3G/rgNxJ829bdyJkOfr/99otlu+22G4DiGzhnyznjCQC/+c1vABQjajqTlFoY2gkWLlx4X5ZlM1b373p7e7OhDSpFKhqoA8ZOO+0EANhjj7wNciG8zkBytldnj5lqX9P26qxkKiLZCRYsWNC0plrfGoH1Wq9x5513BlBMKMAkDbqgmIkYmMocyGc0dUYutdC+0/T19TWtqaZwHw69Rk0NT7jgnbNrQL64eN68ebGMUTNNDMLZX23v7Y70NEJ/f3/TmjbSn2pbZIRH6+l2220HIHclAHmCAU0SMmfOHADFKBuP9Ty61YcqzfanIYRspMQHQLGuUFONxDLVuerCBBca5eWMus7Qy/5aye/rFoODg01pCqxIJpJKzDEUvU7WQe03Ult20F2gfSrHoiq071VRf1FramBspk9dd911ARQjkuxnU9sS6PhDTRv5zm7TSp869KUvhfZxbOM6UcRJY91iI+VGSrk4qlhPAWDRokVtHadUA7Z9TW5HNPES62cqOdZoyDbZ6DjV/RHVGGOMMcYYY0wBv6gZY4wxxhhjTMWovPWR6Hmm9rJgpj8NgdIeoYsUGXZW60QjVpd2027rYwq12zBcrCF9WgDU/sDwNG08QH5vhtujolt00vpItK7xM9R6Rk3V/sD6nKq7VUDvZSuWktW1zFAX2nCAXNOUzvp71FLrMz+vHXtNtUK7rY9Kqq7xvmh/wLLUIvdUsoUq9KFKu62PCutTal+elF05ZX2qUnsfjk5YH4f8TeH/QK5Tag8+teTS3ry639lpOmF9VKhlah+wlFVMk6qxrnY6WUgztNv6qFpRv1RdS/WVqWen0dD+2219TGmqulDL1H7GZewZ1w1sfTTGGGOMMcaYUUq1pkFXQWq2R2crdTaYpJIHDNlja1yTWsCqmnLmR2c1qHMqYUAVohPdRusXZ3607lLTVKKAqs6qdWtRLnXRespIkGqlkTSyqqjIeK6nqbqW6jtZT1PtvArJQqpEatZcI75D0fZU1TZfBVIz6MOlLAeK9dJ1NA21TPWZKaoWKa8KOobw+bLRqI7rZppWNB3ruMYYY4wxxhhjTMXwi5oxxhhjjDHGVIxRHdfWsKhDpOWgNpOUpcesPhrSH8+2O2OMMcYY0ziOqBljjDHGGGNMxfCLmjHGGGOMMcZUDL+oGWOMMcYYY0zF8IuaMcYYY4wxxlSM0MnkBiGE/wDoA/BCx760vWyA8q7l9VmWbbi6f2RNV4k1XYE1LZ+qaPpUyefSTaxp+XRdU2DMtX9r2h66rqs1XSXWdAUd17SjL2oAEEK4N8uyGR390jZRlWupynmUQVWupSrnUQZVuZaqnEcZVOlaqnQurVCl66jSubRCla6jSufSClW6jiqdS6tU5Vqqch5lUJVrqcp5lEE3rsXWR2OMMcYYY4ypGH5RM8YYY4wxxpiK0Y0XtVld+M52UZVrqcp5lEFVrqUq51EGVbmWqpxHGVTpWqp0Lq1Qpeuo0rm0QpWuo0rn0gpVuo4qnUurVOVaqnIeZVCVa6nKeZRBx6+l42vUjDHGGGOMMcasGlsfjTHGGGOMMaZi+EXNGGOMMcYYYypGR1/UQgh7hxAeDSE8HkI4vpPf3QohhE1CCDeHEOaEEB4JIRxXL18vhHB9COHv9f+/qgvnZk3LPzdrWv65jUpNgerqak3bcl7WtPzzsqbln5c1bc+5jUpdrWn5VErTLMs68h+AXgD/ALAZgEkAHgQwvVPf3+K5bwRg+/rxOgAeAzAdwIkAjq+XHw/g/zp8XtbUmlrTcairNbWm1tSaWlPrak3HvqadjKjtAODxLMvmZVm2FMD5AA7o4Pc3TZZl87Msu79+vBDAXADTsOL8z67/2tkADuzwqVnT8rGm5TNqNQUqq6s1LR9rWj7WtHysaXsYtbpa0/KpkqadfFGbBuBp+fcz9bJRRQhhUwDbAbgbwNQsy+bXf/QcgKkdPh1rWj7WtHzGhKZApXS1puVjTcvHmpaPNW0PY0JXa1o+3dbUyURWgxDC2gAuAfClLMsW6M+yFXFQ73WwmljT8rGm7cG6lo81LR9rWj7WtHysaflY0/KpgqadfFF7FsAm8u+N62WjghDCRKy4WedkWXZpvfjfIYSN6j/fCMDzHT4ta1o+1rR8RrWmQCV1tablY03Lx5qWjzVtD6NaV2taPlXRtJMvavcA2CKE8IYQwiQAhwK4vIPf3zQhhADgTABzsyz7qfzocgBH1o+PBHBZh0/NmpaPNS2fUaspUFldrWn5WNPysablY03bw6jV1ZqWT6U0LSsrSSP/AdgHKzKn/APAtzv53S2e97uwIrz5EIAH6v/tA2B9ADcC+DuAGwCs14Vzs6bW1JqOQ12tqTW1ptbUmlpXazq2NQ31EzLGGGOMMcYYUxGcTMQYY4wxxhhjKoZf1IwxxhhjjDGmYvhFzRhjjDHGGGMqhl/UjDHGGGOMMaZi+EXNGGOMMcYYYyqGX9SMMcYYY4wxpmL4Rc0YY4wxxhhjKsb/B/xk5ohU54DTAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2cAAABpCAYAAAC+qVuIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO2debQlVXnFfxWUGKNRkSEEDWBAEBEUEY0QbBkcEGycEKLYDoAoJgZRwBA1S40RJCooITY2AoqCBJSWqAiIImoMg4BDMzWKDA0IERxiYjSVP/rtU7vo++g31L23Xr/9W4tF9Xffvbdq31Onqs63z3equq4JIYQQQgghhDBefm/cOxBCCCGEEEIIIQ9nIYQQQgghhNAL8nAWQgghhBBCCD0gD2chhBBCCCGE0APycBZCCCGEEEIIPSAPZyGEEEIIIYTQA2b1cFZV1fOqqrquqqobq6o6squdms9E0+6JpsMhunZPNO2eaNo90bR7omn3RNPuiaajoZrpOmdVVa0FXA/sDtwKXAbsV9f1D7vbvflFNO2eaDocomv3RNPuiabdE027J5p2TzTtnmg6OmaTOdsBuLGu65vquv4NcAawsJvdmrdE0+6JpsMhunZPNO2eaNo90bR7omn3RNPuiaYj4kGzeO9GwC3271uBpz/QG6qqmlmabn5wN/AGommXRNPuubuu6/WY5vkfTR+QaNo90bR7ZqQpRNcHoq7rimjaNbn2d0807R71qaswm4ezKVFV1UHAQcP+njWAm6f6h9F0ykTT7omm3RNNuyeads+UNYXoOgyi6ZTJ+d890bR7JtV0Ng9ntwGPtX8/ZiLWoq7rxcBiyBP0FIim3RNNh8NqdY2m0yaadk807Z70qd0TTbsnmnZPNB0Rs5lzdhmweVVVm1ZVtTawL7C0m92at0TT7ommwyG6dk807Z5o2j3RtHuiafdE0+6JpiNixpmzuq5/W1XVm4DzgbWAk+u6/kFnezYPiabdE02HQ3TtnmjaPdG0e6Jp90TT7omm3RNNR8eMS+nP6MuS3nwgrqjrevvpvimaPiDRtHuiafdE0+6Jpt0zI00huj4QEwVBpk00fUBy/ndPNO2eSTWd1SLUIYQQQgghhBC6IQ9nIYQQQgghhNADhl5Kf67ye7/XPLfK+jlKC+iaRFVVrf9DNJ0t0bR7pKWf+//3f/8HRNOZEk27J+f+cHA9RfQMIYyDZM5CCCGEEEIIoQfM28yZj5JtsMEGZfvwww8HYNNNN13lb9/61reW2PLly4GMrDmTaXrYYYcBbU1/85vfAPD2t7+9xG6+eVprnM4LXNM//uM/LtuHHnooAI99bLPkyM9//nMA/v7v/77EVqxYMeQ9nHtMpumb3/xmADbccMMSu+OOOwA45phjSuyee+4Z9i7OOSY791//+tcDbU1//OMfA/DP//zPJaa2Gxpc0/XWW69sL1q0aJXYddddB8BnPvOZEvuv//qvYe/inMR1XXfddcv2HnvsAcCDH/zgErvhhhsA+Na3vlVi//u//zvsXZxzeGZ8nXXWKdtPe9rTAPjFL35RYjr/b7/99hJTNj00uKYPf/jDy/bmm28OwM9+9rMS03XqV7/61Yj2bm7imv7BH/xB2dZ9wL333lti9913HwC//e1vR7R3bZI5CyGEEEIIIYQekIezEEIIIYQQQugB89bW+Ed/9Edl+yMf+UjZ3n333QFYa621Sux3v/sdAB/96EdLbN999wXaadD5ziMf+ciyfcIJJ5Tt3XbbDWinlJUq/sM//MMSe8UrXgHAL3/5y6Hu51zi0Y9+dNl2G9guu+wCtDWVVdTb9mte8xoA/vu//3uo+zkXkJ1p/fXXL7ETTzyxbC9YsGCV9/z6178G4BGPeESJ/dVf/RUQexM0mrpt8V/+5V/K9k477bTKe9RnulXnXe96FzA+C0mfkKZuWfb+dMcddwTaWt15551Auw/+0Ic+BMQyJtRXPu5xjysxaQSNBe9//ud/Suy2224D4BOf+ESJLVmyBIiu0NwnbbHFFiV29NFHl+1tttkGaF/TZcFdvHhxiZ1//vlApokAPOhBK2/Ln/jEJ5bYO97xjrKtuN97XnrppQB87GMfK7Ebb7xxqPs5l1h77bUB2HrrrUvsjW98Y9l+0pOeBLStoueddx4AZ555Zon99Kc/Hep+OsmchRBCCCGEEEIPmHeZM2UiPvzhD5eYsmXQTBL0ERyNuPlIhjJnJ510Uokpwzbf0MT04447rsQGaeojjRpx22677Upsr732AtojFfN1dFKZCNdUGUiAhzzkIcBgTZ/5zGeW2LOf/WwAvvzlL5fYfB2d3GijjQA4/vjjS2zXXXct27//+78PtM9jFQd47nOfW2I77LAD0C4SMF81VXbHNVWbg0ZTz/KoCMPLX/7yEvvsZz8LwNVXXz28nZ0jbLzxxkDb0eHtVCPrnuHR73DQQQeV2LnnngtkBF2okIK31Z133rls6zqvbDk02V0VtYKmL73llluGt7M9xguqKBPh16lnPOMZZVv94kMf+tASU8EQtXOAyy+/HIC77757CHvcf1xTXV+OPfbYEnvKU55StnXNf9SjHlVicoOojQPst99+QLs9zyfcVSSn0bvf/e4S8yyarvleuEb3C37f/9d//dfAaFwzyZyFEEIIIYQQQg/Iw1kIIYQQQggh9IB5YWv0tUw+97nPAbD99tuXmKc/lQL29KbsI74uwl/+5V8CbbuY1u9Yk1H63dcykqaeeveCKtLU1+BQ8QrZngBe+9rXAvC1r32txObDOl1qf14A4KyzzgJg2223LTHXVGsYuWVB1jFZHgEOPvhgAL7zne+U2H/+5392tu99RZputtlmJaY1oNzOMEhTt4vJQvKwhz2sxFQQ5Hvf+16JzYd1umSn23LLLUvs1FNPBdqa6u+gOeddU1mdvHiF7CLSFubHOl2yzbp15uSTTwbamvraWyqu4EV+1C/7tU6avu1tbysx/x3WZKSX95/S9QlPeEKJeVuVrm5Z0uu+HqL6VF9Pcj4UB9K12m3zKv7zZ3/2ZyXmfaruo9z2reuTF2bZf//9gbbldE2dJuIWRt1Tum1eU24e85jHlJjfo+pa4zEVrHJL6fOe9zygsTfDmjtNxDVVQbQXv/jFJfa+970PaBcDG6SpCodAc87vueeeJXbGGWcAcMkll5TYsKY0JHMWQgghhBBCCD2gGuVE9qqqhv5l/gStETKVxIQmO+F/56Pemph6zTXXlJgmrvpIpp7Av/jFL5aYJmPPcALmFXVdb7/6P2szCk19hEGTVb1ohyZO+t955vGKK64AYNmyZSW2ySabAO0SvBpJV4YD4K1vfSsw4xHf3mrqo4sa4fIS5H/yJ3+ifSkxzzxeeeWVQHuyv9qpj0hqqQIvxf2P//iPwIxHe3urqY96vepVrwLgPe95T4npnPV26iWedc7ffPPNJab+wkcx9T0q/w5Nqe0Zjvb2VlOfyP+Wt7wFaGe3VGBpUKYcGk1VkhzgT//0T4F29l3XIWV7AJYuXdp6bZr0VlNfQuDII48E4MADDywxaert1LNkytjefvvtJab+1Jfe0HtUGACavniUmsJodPUCCe985zsBeOUrX1liKkQxma7f//73AbjrrrtKTJl3X0pD5bQXLlxYYjfddNOM97uu62r1f7Uqw9RU1x3PGKov3XvvvUtM1+zVaeolylW0wt0Iujd4yUteUmKzLA7Su/NffaTOVWiuxV7sy9uacBeBNPX7Vt1HeX/91a9+FYADDjigxGa5TFHvNB3kPJCmnuH1tiZcU12nPCaHiDvm5Gg69NBDS2yWyxRNqmkyZyGEEEIIIYTQA/JwFkIIIYQQQgg9YLUFQaqqOhnYE7irruutJ2LrAGcCmwA/Bvap6/pnk33GKJHtDuBLX/oS0EwQhMZ25NaFT37yk2VbNkW3jOy0005A2zYhm5NP5FywYAEw4zWl1qqq6gJ6pKmsDb5mmdYjkl0OmmO88847S0wTJ6EpGHLHHXeUmNbtcRuObGdubTjnnHOAdpGQ6Vhy+tZWZW3YZ599Smzx4sVAO32uY7znnntKTFpAo+8gTTW5Ghq7gwrYAJx99tkA/PCHP1zl+6ZC3zTVRPU3velNJab1TLzgzCBN3fKs4hbeN8hu4vrJlqK1Dv1z/PeYDn3TVNY72cOg0deLKAgvMnP++eeXbdk9ZQWDps986UtfWmKyT7llSrYct0hPh75pKgvY3/7t35bYIYccArQtucI1veiii8r2kiVLgHY7fsELXgC0rXay8uj6BY0lcqaFQfqmKTRFUFxXWbm8ONKg69QFF1xQtk8//XSgbf3SWnxaJwkaO7hb8n/yk58A7fX8pkpfNHULvdbZPOqoo0rsZS97GdDuU3U/peOH9vmva7/bvV/96lcD7eIVskJ7QRu1/xkWsejF/ZRrKov829/+9hLTvZW3UxVL8ykLPn1G97VuJZfV3IuyaRqEWx0Vm6GtuReauoVWFllZ7gGe9rSnAe0iSrIe+j3Pv/3bv5XtCy+8EGjbHw8//HCgXQDrvvvum/Szu2YqmbNTgOfdL3YkcFFd15sDF038O8yODYmmwyBttXuiafdE0+6Jpt0TTbsnmnZP7qe6J5qOkNVmzuq6vqSqqk3uF14ILJjYPhX4GnBEh/s1bbbZZhug/TSskUqNREAzcvjBD36wxHz0XKNiPrLwH//xH0B79EwZOp+8qZKbX/nKV0psGgUCHslKLaEnmu68885Au0CHspA+8nr99dcD8E//9E8l9vnPf75s6299FOm73/0u0B4p1+ueoZSm3/jGN0psmqOTY2+rftwq73riiSeWmEZrXFONRHo79SIsGq3xz/7BD34AtEd+NcrkE7uf85znAHDdddeV2FzT1EcN3/jGNwLt0tYaLfRRLWW1XFNv25oM7CNz1157bes1aDJHXj76qU99KtDOmk+zOMjYNfXRwL/7u78DmrLh0IzuesEPZcQ+8IEPlJi3U43UerZN7dRLFOt1L3O+6aabAs0EeJj2KPrYNfUsg0a3VTgKmnbq7UsZg6OPPrrE5FiARlP/vTShfY899igxve6FrNS3evZomqPoY9cU2pkGOQW8+Id09b5Quno/4RkJtWt3MOh+Qa4YaPpcFcLy98wwy9sLTb2tqh15e1J21zO6akfqLwC+/e1vl21d0zwjobaqPhOaNuguJ/XxM8yc9eJ+yjXdcccdgfZxi1tvvXWV7Xe84x0l5su26B7VNb366quBdv+pPsWvlbOkF5p6v6fiZ37PqOyWL22lIl8qqQ9www03lG3d//jn6NqvAlbQtPdRFFKc6TpnG9R1rQWo7gA2mOwPq6o6CDhostdD4UHRdChMqa1G02kRTbsnmnZPNO2eXPu7J5p2T+6nuieajpBZL0Jd13X9QKUy67peDCyG0ZTTXROIpsPhgXSNpjMjmnZPNO2eaNo9uU51TzTtnmjaPdF0+Mz04ezOqqo2rOt6RVVVGwJ3rfYdQ0ATLAG+/vWvA42VERpLw6c+9akS06RhpT5hcNrc7WJao8etdS984QuBtr1CRTLcFjUNa9Nv+6Cpr8GhybyDUsZeREXFF9zuMOi4XRelnGUZhcYq6hYoFSbw1Pw0LXhja6tqQ25/01pmbt/QGjBuXVL6fcWKFSU26LhdU1kh3QKx7bbbAm39tN7PXNZUE4GhOafdjqRiCWrDAB/60IcAWL58eYn5Wm+yKnj7k43BLRCPf/zjgbZmg9b7mqatcWyaqg3puAAWLVoEtG05sjBqQjrARz7yEaCxKsLgYhNe8ELWL1/7TBYxt6HqHHFNp2lxGrumvj6eCir48chqq+sXwEknnQTAv//7v5eY66JzwM994X2wrk1umdTvOcNrFIz52q/99muSphu41Ug2posvvrjEdB9w2WWXldggXf2arnZ77733lpjsj25hVJ/huk6jrfZCUz9uWYq9PalQkhf8UJEqTW2A9jQStXXXRee134PpPPDfQ+/1fnYadrKx3k+pLfl9pI7D73lk2fbzX8XPvH/065Tamv9eWhfV26SK2vl79TvMcD3OXmjq55XuI1UkCZq+wYv6qRCIr53n7Urnufcrum/zdqrv8/dqv7q2Os60lP5SYNHE9iLg3G52Z15zL9F0GKStdk807Z5o2j3RtHuiafdE0+7J/VT3RNMRMpVS+p9h5WTVdauquhV4F/B+4LNVVb0OuBnYZ/JP6B495aoEOTSFOXzU5h/+4R+AZsQcpl5C2J+CNfLoE121OruXKdUo6aARzSmwAth9XJpqRPWYY44pMWnqBQBU9OPYY48tMX/9gfARD2WLLrnkkhLTxG7PXEjTGU5qXZcxtlVNXH3b295WYsoE+gjXxz/+cQDe+973lpheX91ojL+u0d3LL7+8xFR0wScPaxKt6zyN0tpj1VTnvjI70Iwgemnx0047DWi3Z70+2Yi2j3IKZd89M6QS0P73T3rSk4D2cgfeF62GXrTT5z//+SWmtnHLLbeUmJZvOOGEE0pMmUUfiXVdtO2TuPXZnsHceOONgfYopTS96qqrSsxHgVfDWDXVMapQFTQaeInsc89deX9zyimnlJhGzAeNzkLTF/ooryat+2ix8Oz7BhusnCbiBUGmMYo+Vk2hOXaVeofBGQkV5fIsr9qytyHXVdfAzTbbrMRUmtwzZxo992Uz9NvOMHM2Vk2lgTsP1D68wJGuK1deeWWJSYNB2Rlo+maVNwd4+tOfDrQzusog6b4AGv38N5pGdmKs91PCtVA215cZUn/nWTK1NT///f5HmUctnwGNvrovhUZT13mGxVVELzT1NqD25+eitPIsrO5vvK9zN4eyZIceemiJbbXVVkC7/L4cNDPM5k6LqVRr3G+Sl3bteF/mO7+r6zqadsvddV3fQ9pql0TT7omm3RNNuyeaDoFoOhRyP9U90XSEzNTWGEIIIYQQQgihQ2ZdrXEcaC0HrRvhuKVLk9WnYdkaiFLBbm0YtCq4Xp9l6ngsyFb07Gc/e5XXXFNZmma7KrrSy265kZVPBSugSfvPRU21tthuu+1WYkqBu1Xr+OOPB9pr8kw1Ve6WD1lLfM0UTYB1+63WSRpk4+s7snLtumszgCeLgVtk1U59QvvqCv9o24tg6LN9zRTp62vH6bPdfjKsicJdI6vtTjvtVGJqI75upGzkPql6kKaugbTUhHVoLH+u6Y9+9COgbQNSm/XfQxadvvcH2ne3c6kvk5UR4NOf/jSwepuhT/5XISytQwlNW3QbqvoT71f0W7vNVPbbvmsKzX6rYAU0hWq+9a1vlZiKKrjVWbr6Ob/uuuuW7Wc+85kALFy4sMR0XfTr1E033QS07fzaL7eK6/v6fv7rnFNBM2j6TV8LU9tuPRZ+znuhthe96EVAU0ANGmutn/8qZuV2Me2X9wnScq5o6rronsmtm+pn/R5Vbcjf6+uXHXDAAQDsvvvuJaZpC97edV44g675fdfy/gw6hkEWUG83sux6v+f3um95y1uAxsron+l9s/el99+fvhQECSGEEEIIIYTQIXMmc+ZPy1q13kcTNZr7hje8ocQ0KtEV/nSuUTMf3VhdwYG+4ZqqyIEXjtAIj0rlQ5MdnO0ogb7bM3DK/PhEzbk2quNotGtQJlDlsqEpUTyddjOoVK9G3LydKuaZs0ElyucKKnyg0VdozruzzjqrxKaq6aCCAK6VRpO9uIfOEZXPB1h//fWBdtudK5kzZQ9Uzh4Gl83WaLpndnRsrqP3yyo+46PpWu7ER4sVUxEiaLJAnjnTaGjf+1idY76MhkZgvUT+IGeAtPTz05c5UDbCy/Trd1DRAWj6HR91V3/rk9wV67um0JybvpSGsqmeidExuYZql96eXvziF5ftvffeG2j3LWqjXvBCuvpvO+h6pvNkGkVsxoKyCX6OKsPqmRjp5+eo+kf9PcCBBx5YtuUa8ffIITPos73IjfTz+zjpO8NS8CND7cGPR9ntQUuDeCEPncv+3te+9rVl+8lPfjLQ1lya+vmv7/H2Lt1cv7mS4R1UuEaODD9nt9xyS6CdodQ1290ue+21V9lWhtyzbco8+tJEurdaXeGfLrRM5iyEEEIIIYQQekAezkIIIYQQQgihB8wZW6OncF/+8pcD7XTi5z//eaC9HlFXyBqmNU+gseu4ZUGTkOeCPQTa9i2tMeZoXbdvfvObJTabYxtkIdMkbIBNNtkEaGt62WWXAW2rXp9xS9e+++4LtFPgy5YtA+CCCy4osZnYXqSlf7bOkQULFpSY0vVuY7j++utn/L3jwG0ZsiL4xN5rrrkGaK9zJBvioDVyXDOfwK9t/2xZS/78z/+8xNROHa0vNVc09WNUe/GCANdeey3QLoSgc9DfO6iYgNsjZTdxu6faqRd10HvcPjrVtf76grel7bbbDmjsrtBYjryN6D1uYZKmsucAvPSlLy3bskz6+kXCrWnS3vskfZ//Hvq+Ga4nNXTcBqY1yNzWKOuhW+dk1fXYeuutB8Auu+xSYl5URcfvv48+26+V+k1df1mo3HKl10exJtJ08d9aa8bJLgeNXcz7Sh23ayqbudYug+beCJr7BddURRX8mqTPdvuu2qivAab3+H1IHzVVH6e1GgF22GEHoDl/obHGD1rb1dfycwv9oHshtTX/vdSneiE7Tf8ZZKPsu1VU1x1vXyo04/fm6iP8eKSfT9vxc3XQdVsWWtdUn+Pr1Oqa5dcutc/ZtM1kzkIIIYQQQgihB+ThLIQQQgghhBB6QO9tjUoVe9pcdhhPRZ5yyinAcCyFSg9rnR9obDxe9eq8884D+pNmnwxp6latpz71qUA7Zb506VKgO6uWp4e1nsR73vOeElO639dWOeOMM4D+p9ylqawL0FSq8n3XWjyDLElT/Q7fdnuSLDoHHXRQicmC4ppqbaXZrv83bHSMbg2Rvcs1Xb58OdBuu4Nsn4MqXPq2tHQL9Qte8AIAXvayl5WYLGh+7ms9MK/W1sd+QMe7xRZblJgqgXlbUrVG18+ti/fH3+sWHVlH3Ar1F3/xFwBss802JSYb2ve///0Sk6XZ15PqI9JUFmJoqgZ7pVadg25hlEXWrYd6j7/X25Vsfq6zronedqW925pkU/U+3X/jPuIV1g4++GCgbW264YYbVvk7aedV3NTm3WrqfaDamduoZWf098ge6fZe9QV+Huh1/46+9Anetl71qlcBsO2225aYKiR6O1F7ck11HVe1VWjfg3nbuz/+HllN3daodSt9H/TZbiHri6Zufd1zzz0BeNaznlVi0tz7M93zuBay7Ho79GNUhddBVR/VNgH2228/oJnGAE3/+t3vfrfEVBXXrXp9mZrj55PuUaUtNHZG117txftC6evnrGs66Bqj75ZFHRobr6b8QHOt9GuXKj3O5t65371yCCGEEEIIIcwT5kzmbPvtty8xjfT5CugqtNDV+ls+MveFL3wBaE9014RKXyfFRx76jEZcfIV0jcL62iPf+MY3gJmNonhGQt+ntdQAlixZAjQTj6EZZXvd615XYlobqC+jY5OhEZnnPve5JabRHK0PBfDVr34VGJy1cs2Ej2r7SJlGMb1QwFFHHQW0Rzb13YcffniJKdPUl9GxydDI1cKFC0tMGRhfw0TrRvkk50GojfuIpE8Q1simt7+XvOQlQHtkU5OqP/zhD5eYipL0vSCIjt0zgco4eDu9+uqrgXYfO2hNs0GZBS/0obbo58XWW2/dei80o48q7ARw5ZVXAu1RzT72A9J0n332KTGtgeX6qciKx9TG/TxXZs0zQd7+tO3rnCnmfYTOES+Uo9FdFWWA/roSlE30Pk6j2GovADfeeCPQvnZJG88uqu/wQgDeF6gP9z5B/bSPuCvm7VK/qa/JNeh8GTc6Xq3pBk3WyrNcKl7jOsvB4Ppoe7Jj1DXGMyDaB880DyrWpMyH/17qM7yvH/d1TMfjGR31r+6QufXWW4G2zuoLXVPPogt3hegzBxX18UI50tT7CWXq3AVx6aWXAu0Mm2cmx4GObddddy0xXYv991bWz887tVN3a6hf9Pe6G0H3md4Xah/cuaP+RFk8aO6nPBup65ivvTjdonbJnIUQQgghhBBCD8jDWQghhBBCCCH0gN7bGoXbWZSidGvGbKwDbgWR9U5WRmgsI25jOOSQQ4AmpTkXcVuR9HMbw3Qn4g+y5kBj9zn66KNLTGl8tyccccQRQNuGM27LwlSRfm4pFD/60Y/KtuwiqzsuaekWB5+wrcm+r371q0tM9g+397z73e8G4MILLyyxubJmnKxEPklc/YAX4/je974HDLaK+rktLd0uJh2hOfd33HHHEpONQZYUgI9+9KNA24LX96IVQnYWb0tqi7JmQlOMwy040tLbpNbi8XVmtA4lNFZRt4Tr+/w3POeccwA4/fTTS0wT3/toZXR0jCp0AoM1vfzyy4HGigNNW/SJ/LLuuUXpCU94wirf65Y9WRh9nc8rrrgCaGuq/r2P60TdH/WlKqwEg9uO2uodd9xRYrLE+ZqE0tCve74tm5Nbk2VL8oJK+ruzzjqrxNQHuR2sj3ZRrYOnQkfQtCPZQ6EpeOD3WLLHuaZq+96e/Poifd3ed9NNNwHN2pDQtH8VAYGmyJJfz9THj1tbv9/UeepTXNTPqlgNNDZtbyODCgLJTueaeT8sPfzeSYV+/LOf+MQnAu1rl9bi9fas/mjc9wWuqYpFaa1YaO7D3X456P5b9wuuqdrLT37ykxLzvlKF2nTNgcYC6veyus65zVl9qt9ruE11piRzFkIIIYQQQgg9YLWZs6qqHgucBmwA1MDiuq6Pq6pqHeBMYBPgx8A+dV3/bLLPmSl6mvaRV2UV/OlVI0I+0jNocr6ebr2U7Hvf+96yvf/++wPtp26NYLz+9a8vMZV473LUcVSaSgMvxiFNfQRMo7o+cqVRQx/lkFZeRt5L5Gsk2CewqviASk4DnH322avsw2wZlaaaFDwoc+aFYjQaM2jSuY+EayV6lTkGeOUrX1m2B014V1busMMOKzFlgLscaRyVphop80yX2oaKckDT/nwCsI7Xy+lqNO7AAw8sMS+Tq9FO10pZ3Pe///0l9vWvfx3odtL0MDX1c1WZLj/3dbyeNVd79oyO2gRZX28AAA2ESURBVJpPMFdWY6+99ioxf4/6YB+9/dd//VcAvvjFL5aYMiEdlh1fH4an66BlQXQNgmYU2tupsgiun/oLL3ilDK6KikBbC42Sq2ATNJlHn4CuTJJndWfZtw5VUxjsYvHiXDrnfIRb572f//ot9NtAc80fdB2CJsvh7fKiiy4C2lkK3WN4ZkP7NdM2O0xN/Rqx++67A7DllluWmI7DMydql56xVaEf7zuE6zOodPs3v/nNElOxIf8+9UFepEHZYNd0OvoOU1O/91TBGl8aRHp4W1MZdtdPWTfdF0Bz3Mq0QzujqCyt96nqu/231j2Gn1ODsmTqo6ei7TA19Xtu3Yf7ElrSRcU7oOkjvbiM+gvXQpp94AMfKDE5C6DpBwZlDz27rnsSf36Q9n4/0IXrYyqZs98Ch9V1vRXwDOCQqqq2Ao4ELqrrenPgool/h5nzEKJp10TT7omm3RNNu2f9XKc6J5oOgWg6FNKndk80HSGrfTir63pFXddXTmz/AlgGbAQsBE6d+LNTgb0Hf0KYImsTTbsmmnZPNO2eaNo9vybXqa6JpsMhmnZP+tTuiaYjZFoFQaqq2gR4CvAdYIO6rldMvHQHK22PnSP7yKB1LXztF63b4XYxpRbd7vCa17wGgBe+8IUltu6665ZtpSHd4qO/9UIVQ5hE/UvgcaPQVKl2txAIX69I63bICuW4NWfRokVA20LiqWCle30CptYB8Ympc1lT2efcEic7gU+gfv7znw80FkRoLLtuNdEadP57eJpe54OvVC+LqNsdhlBQZWSa6rx0q4E02GyzzUrsOc95DtC2b2kytGuqmK/x4r+XJgufe+65JSYbhNuf5pqmbmuU/cPPNWnqhVf22GMPoG3f0Xovfu6r7bolxW28WoPu1FNPLTEVpxny+mUPZYjXKbc1aq0ijw1aI0ft2a9RW2yxBdC2Q+t65ZqoiAI0WnpBmttuuw1onytzTVNo93Gyenob1Otud5L+PnFftkY/13UeeFEA11B2Ri82ovWTZmqtmyJD1dT1k26ui9rqzjvvXGLadk11jfM2Jivol770pRJbunRp2da1yG2og+zgc+3a73ZFTefw+1EV5/J1utRH+rptwu8HPvnJTwLNNA9or+sp/V2zERVOG6qmvuagzn23D6rN+v2o7jO979V1RVOPoJmW4EWDptrm/D5Z9wFe5E24fbSLaSRTfjirquphwNnA39R1/XO/4Nd1XVdVNfBIq6o6CDhotjs6D2idXdG0E6Jp90TT7omm3XPLVK9T0XTKTFlTiK5TJZoOhfSp3RNNR8iUHs6qqnowKx/MTq/r+pyJ8J1VVW1Y1/WKqqo2BO4a9N66rhcDiyc+Z9rDI5qs+LnPfa7EDj74YKCdEVNJcS/jrCdjn6Cu0UvvDH0E99hjjwXgfe97X4kNyjANiZFoqgnAKqsKTRlYH8FVRuwVr3hFiWnk0iew+u9QDsQmbaoAw5e//OUSG1SsZUiMRFONbPmEfI0EeZbngAMOANojPRpJ85Ejja77KKWvQK8Jsz75eoTLDgxNUz8vf/rTnwLN6DU0bc0LeUhf10+jlD5qLJ3981RIAeDwww8H2kUcRlhmfCSaqp36KJ+ys76EgCa3u6bS0j9P7dhHEo8//viy/alPfQpoF08YkaaqPb1aXWeiqWd4lBVwrZRRdFeG2p2f+9r2bIJG0b/yla+U2Cc+8Ymyfe211wJjKX09ZU1h9roqA+uZBunpRRV0/fb+T9cXZRShyYydeeaZJebl41WMZkxLCwxNU+8DdYyevVVGwouuqW15u1TWwItTHHfccUC7ZLwX9RnzUjgjaafKkrimKujl6O/cBaaiH15AbVDJ/R4xNE29X1T7c02VmfTrz6AS+R/72Mda/4d2ocAuGNRHeLasiz5ktXPOqpVKLAGW1XX9QXtpKbBoYnsRcO793xumTTTtnmjaPdG0e6LpcIiu3RNNuyeadk807Z5oOiKmUq1xR2B/YJeqqq6a+G8P4P3A7lVV3QDsNvHvMHMeQTTtmmjaPdG0e6Jp92yV61TnRNMhEE2HQvrU7ommI2S1tsa6ri8Fqkle3nWSeOdonQJoikj42jCaAOypUeHpRlkgPDXv6x55MYURc19d1/cwAk2lh0/ilYVMa3FAY3PwNLLwlPvtt98OwKc//ekS8zS9p/FHzMg0lfXVi0loXTKfKOwTscUgu4MsjCeddFKJnXXWWat83xgYqqZuB9D6el70ZMGCBUDbVqttt9DIWuOFFGSrdR215g6M1Gp7f0bWTmWZ03o30Kxl5GvCyUo2yNZ08cUXl5hsob4mj9tCu1xfb5r8sK5rLVjVua7e1pYtWwY0649BYw93S57atheXUfEJt9rp2uTWcP8dxmS7gyFrCm1dVUzGiypIT9dV57pbFFWUwtcsU9vvcC29Thi2pm5/1bptKkQDzfplbiHTNd3X0jvttNOAZu0yGFwwpScMtU91Tc877zygrZ/Ofy+UJDvyySefXGIXXHAB0L7291BLMVRN/Z5myZIlQLtP1XqnbslXez7llFNKTNe2UV97uv7dppI5CyGEEEIIIYQwZKpRPqXPpNDCIDQp+PTTTy8xz6IJjZB94QtfKLF3vvOdQLtUfk9GKq6o63rVg1gNs9HUM2IaPT/hhBNKTGXIfTK/MmwnnnhiiSkT4X/XE0auqU++3m233QA44ogjSkwZXo1MQrNSvY+oaUR9DJP+V8fINFX79CyZlsJ40YteVGLKRl511VUldv755wNN2XZoCjeMeZL6IEamqSaye0EFFfzxPlSZGncYKGO2fPnyEhtjtnF1DFVT7ztVCMSXGFCG15cnUNEUTfiHZjR9joycz0hTmJ2uXlxBempJCGiKInmWXFn3HrfPQl3XkzmTHpCZFFqQpoOKV3hhLxVYuPfee0tMmY0et09nqOf/IE29vL4yZv53KsbkWfAeXoseiJH1qbqPcv2UMfOMmLScYzo6k2qazFkIIYQQQggh9IA8nIUQQgghhBBCD5iTtkbhEwOVpvc1y5T+HOOk9Okwcgve/T4HaGuqNL1P1FR7ibVhSp8DtDVVmt7tNnNESzFWTaXl/RZtBebMeT6IsWo6qIjSHDvPB9GLc9+Zw1qKodsa5yPDtjXOU8Z6/q+hRNPuia0xhBBCCCGEEPrMakvp9xkfKddk/zAzNKrrBSh6WIxiTjFI0zA75nB2rLfM4cnUvWUNyJKFEEIYE8mchRBCCCGEEEIPyMNZCCGEEEIIIfSAPJyFEEIIIYQQQg/Iw1kIIYQQQggh9IA8nIUQQgghhBBCD8jDWQghhBBCCCH0gDychRBCCCGEEEIPGPU6Z3cDv5r4/5rAunR3LBvP8H3RdHKi6Uqiaff0RdObO96XcRJNu6cPmsKadf5H0+HQB12j6eRE05WMRNNq1ItlVlV1eV3X24/0S4dEX46lL/vRBX05lr7sRxf05Vj6sh9d0Kdj6dO+zIY+HUef9mU29Ok4+rQvs6FPx9GnfZktfTmWvuxHF/TlWPqyH10wqmOJrTGEEEIIIYQQekAezkIIIYQQQgihB4zj4WzxGL5zWPTlWPqyH13Ql2Ppy350QV+OpS/70QV9OpY+7cts6NNx9GlfZkOfjqNP+zIb+nQcfdqX2dKXY+nLfnRBX46lL/vRBSM5lpHPOQshhBBCCCGEsCqxNYYQQgghhBBCDxjpw1lVVc+rquq6qqpurKrqyFF+92yoquqxVVVdXFXVD6uq+kFVVW+eiK9TVdUFVVXdMPH/R41h36Jp9/sWTbvftzmpKfRX12g6lP2Kpt3vVzTtfr+i6XD2bU7qGk27Z+ya1nU9kv+AtYDlwOOAtYGrga1G9f2z3PcNge0mth8OXA9sBRwDHDkRPxI4esT7FU2jaTSdh7pG02gaTaNpNI2u0XTN1HSUmbMdgBvrur6pruvfAGcAC0f4/TOmrusVdV1fObH9C2AZsBEr9//UiT87Fdh7xLsWTbsnmnbPnNUUeqtrNO2eaNo90bR7oulwmLO6RtPuGbemo3w42wi4xf5960RsTlFV1SbAU4DvABvUdb1i4qU7gA1GvDvRtHuiafesEZpCr3SNpt0TTbsnmnZPNB0Oa4Su0bR7xqFpCoJMg6qqHgacDfxNXdc/99fqlTnOlL6cJtG0e6LpcIiu3RNNuyeadk807Z5o2j3RtHvGpekoH85uAx5r/37MRGxOUFXVg1n5A51e1/U5E+E7q6racOL1DYG7Rrxb0bR7omn3zGlNoZe6RtPuiabdE027J5oOhzmtazTtnnFqOsqHs8uAzauq2rSqqrWBfYGlI/z+GVNVVQUsAZbVdf1Be2kpsGhiexFw7oh3LZp2TzTtnjmrKfRW12jaPdG0e6Jp90TT4TBndY2m3TN2TbuqLDKV/4A9WFnxZDlw1Ci/e5b7vRMrU5fXAFdN/LcH8GjgIuAG4EJgnTHsWzSNptF0HuoaTaNpNI2m0TS6RtM1T9NqYidCCCGEEEIIIYyRFAQJIYQQQgghhB6Qh7MQQgghhBBC6AF5OAshhBBCCCGEHpCHsxBCCCGEEELoAXk4CyGEEEIIIYQekIezEEIIIYQQQugBeTgLIYQQQgghhB6Qh7MQQgghhBBC6AH/Dzzk8Wz6swRLAAAAAElFTkSuQmCC\n", "text/plain": [ "
" ] @@ -280,7 +277,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.7.3" } }, "nbformat": 4, diff --git a/vel/api/__init__.py b/vel/api/__init__.py index 89d2026f..358a356f 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -1,13 +1,13 @@ from .callback import Callback from .info import BatchInfo, EpochInfo, TrainingInfo from .model import ( - Model, GradientModel, LossFunctionModel, BackboneModel, LinearBackboneModel + Model, OptimizedModel, GradientModel, LossFunctionModel, BackboneModel, LinearBackboneModel ) from .model_config import ModelConfig from .model_factory import ModelFactory -from .optimizer import OptimizerFactory +from .optimizer import OptimizerFactory, VelOptimizer, VelOptimizerProxy from .schedule import Schedule from .scheduler import SchedulerFactory -from .source import Source +from .source import Source, LanguageSource from .storage import Storage from .transformation import Transformation, ScopedTransformation diff --git a/vel/api/info.py b/vel/api/info.py index 11544cff..1f0a765b 100644 --- a/vel/api/info.py +++ b/vel/api/info.py @@ -41,16 +41,11 @@ def __init__(self, start_epoch_idx=0, metrics=None, callbacks=None): self.callbacks = callbacks if callbacks is not None else [] self.history = TrainingHistory() - self.optimizer_initial_state = None - def restore(self, hidden_state): """ Restore any state from checkpoint - currently not implemented but possible to do so in the future """ for callback in self.callbacks: callback.load_state_dict(self, hidden_state) - if 'optimizer' in hidden_state: - self.optimizer_initial_state = hidden_state['optimizer'] - def initialize(self): """ Runs for the first time a training process is started from scratch. Is guaranteed to be run only once diff --git a/vel/api/model.py b/vel/api/model.py index 9cb31443..9032e098 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -3,6 +3,9 @@ import vel.util.module_util as mu +from vel.api.optimizer import VelOptimizer, OptimizerFactory +from vel.api.scheduler import SchedulerFactory +from vel.api.callback import Callback from vel.metric.loss_metric import Loss from vel.util.summary import summary @@ -45,10 +48,6 @@ def summary(self, input_size=None): else: summary(self, input_size) - def get_layer_groups(self): - """ Return layers grouped for optimization purposes """ - return [self] - def reset_weights(self): """ Call proper initializers for the weights """ pass @@ -63,13 +62,60 @@ def zero_state(self, batch_size): return None -class GradientModel(Model): - """ Model for a supervised learning problem """ +class OptimizedModel(Model): + """ Model that is being optimized by an 'optimizer' """ + + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + """ Create optimizer for the purpose of optimizing this model """ + parameters = filter(lambda p: p.requires_grad, self.parameters()) + return optimizer_factory.instantiate(parameters) + + def optimize(self, data: dict, optimizer: VelOptimizer) -> dict: + """ + Perform one step of optimization of the model + :returns a dictionary of metrics + """ + raise NotImplementedError + + def validate(self, data: dict) -> dict: + """ + Perform one step of model inference without optimization + :returns a dictionary of metrics + """ + raise NotImplementedError + + +class GradientModel(OptimizedModel): + """ Model that calculates a single gradient and optimizes it """ + + def optimize(self, data: dict, optimizer: VelOptimizer) -> dict: + """ + Perform one step of optimization of the model + :returns a dictionary of metrics + """ + optimizer.zero_grad() + + metrics = self.calculate_gradient(data) + + opt_metrics = optimizer.step() + + for key, value in opt_metrics.items(): + metrics[key] = value + + return metrics + + @torch.no_grad() + def validate(self, data: dict) -> dict: + """ + Perform one step of model inference without optimization + :returns a dictionary of metrics + """ + return self.calculate_gradient(data) def calculate_gradient(self, data: dict) -> dict: """ Calculate gradient for given batch of training data. - Returns a dictionary of metrics. + :returns a dictionary of metrics """ raise NotImplementedError diff --git a/vel/api/model_config.py b/vel/api/model_config.py index b593e878..62bcbdbc 100644 --- a/vel/api/model_config.py +++ b/vel/api/model_config.py @@ -41,7 +41,7 @@ def from_project_directory(path) -> str: return os.path.join(ModelConfig.find_project_directory('.'), path) @classmethod - def from_file(cls, filename: str, run_number: int = 1, continue_training: bool = False, seed: int = None, + def from_file(cls, filename: str, run_number: int = 1, resume_training: bool = False, seed: int = None, device: str = 'cuda', parameters: typing.Optional[dict] = None, tag: typing.Optional[str] = None): """ Create model config from file """ with open(filename, 'r') as fp: @@ -62,7 +62,7 @@ def from_file(cls, filename: str, run_number: int = 1, continue_training: bool = configuration=aggregate_dictionary, run_number=run_number, project_dir=project_config_path, - continue_training=continue_training, + resume_training=resume_training, seed=seed, device=device, parameters=parameters, @@ -71,7 +71,7 @@ def from_file(cls, filename: str, run_number: int = 1, continue_training: bool = @classmethod def script(cls, model_name: str = 'script', configuration: typing.Optional[dict] = None, run_number: int = 1, - continue_training=False, seed: int = None, device: str = 'cuda', + resume_training=False, seed: int = None, device: str = 'cuda', parameters: typing.Optional[dict] = None, tag: typing.Optional[str] = None): """ Create model config from supplied data """ if configuration is None: @@ -94,7 +94,7 @@ def script(cls, model_name: str = 'script', configuration: typing.Optional[dict] configuration=aggregate_dictionary, run_number=run_number, project_dir=project_config_path, - continue_training=continue_training, + resume_training=resume_training, seed=seed, device=device, parameters=parameters, @@ -102,23 +102,29 @@ def script(cls, model_name: str = 'script', configuration: typing.Optional[dict] ) def __init__(self, filename: str, configuration: dict, run_number: int, project_dir: str, - continue_training=False, seed: int = None, device: str = 'cuda', + resume_training=False, seed: int = None, device: str = 'cuda', parameters: typing.Optional[dict] = None, tag: typing.Optional[str] = None): self.filename = filename self.device = device - self.continue_training = continue_training + self.resume_training = resume_training self.run_number = run_number self.seed = seed if seed is not None else (dtm.date.today().year + self.run_number) self.contents = configuration self.project_dir = os.path.normpath(project_dir) - self.command_descriptors = self.contents.get('commands', []) + self.command_descriptors = { + **self.contents.get('global_commands', {}), + **self.contents.get('commands', {}) + } # This one is special and needs to get removed if 'commands' in self.contents: del self.contents['commands'] + if 'global_commands' in self.contents: + del self.contents['global_commands'] + self.provider = Provider(self._prepare_environment(), {'model_config': self}, parameters=parameters) if self.provider.has_name('output_directory'): @@ -128,18 +134,19 @@ def __init__(self, filename: str, configuration: dict, run_number: int, project_ self._model_name = self.provider.get("name") - if continue_training: + if self.meta_exists(): self._meta = self._load_meta() - if tag is None: - self._tag = self._meta['tag'] - else: - if self._tag != self._meta['tag']: + if resume_training: + if (tag is not None) and (tag != self._meta['tag']): raise VelInitializationException("Model tag mismatch") + else: + self._tag = self._meta['tag'] + else: + self._tag = tag else: self._tag = tag - self._meta = self._create_meta() - self._write_meta() + self._meta = None #################################################################################################################### # INTERNAL FUNCTIONS @@ -149,19 +156,12 @@ def _prepare_environment(self) -> dict: def _load_meta(self) -> dict: """ Load previously written metadata about the project """ - if not os.path.exists(self.meta_dir(self.META_FILE_NAME)): + if not self.meta_exists(): raise VelInitializationException("Previous run does not exist") with open(self.meta_dir(self.META_FILE_NAME), 'rt') as fp: return json.load(fp) - def _write_meta(self) -> None: - """ Write metadata to a file """ - pathlib.Path(self.meta_dir()).mkdir(parents=True, exist_ok=True) - - with open(self.meta_dir(self.META_FILE_NAME), 'wt') as fp: - return json.dump(self.meta, fp) - def _create_meta(self) -> dict: """ Metadata for this model/config """ return { @@ -171,6 +171,26 @@ def _create_meta(self) -> dict: 'config': self.render_configuration() } + #################################################################################################################### + # Metadata handling + def meta_exists(self): + """ If metadata file exists for this config """ + return os.path.exists(self.meta_dir(self.META_FILE_NAME)) + + def enforce_meta(self): + """ Make sure metadata exists for this config """ + if self._meta is None: + raise VelInitializationException("Given model has not been initialized") + + def write_meta(self) -> None: + """ Write metadata to a file """ + self._meta = self._create_meta() + + pathlib.Path(self.meta_dir()).mkdir(parents=True, exist_ok=True) + + with open(self.meta_dir(self.META_FILE_NAME), 'wt') as fp: + return json.dump(self.meta, fp) + #################################################################################################################### # COMMAND UTILITIES def get_command(self, command_name): @@ -223,6 +243,7 @@ def name(self) -> str: @property def meta(self) -> dict: """ Return name of the model """ + self.enforce_meta() return self._meta @property @@ -239,7 +260,7 @@ def torch_device(self): def render_configuration(self) -> dict: """ Return a nice and picklable run configuration """ - return self.provider.render_configuration() + return self.provider.render_environment() #################################################################################################################### # PROVIDER API @@ -300,9 +321,7 @@ def load_trained_model(self): if last_epoch_idx == 0: raise VelInitializationException("No trained model available") - training_info = TrainingInfo( - start_epoch_idx=last_epoch_idx, - ) + training_info = TrainingInfo(start_epoch_idx=last_epoch_idx) model_state, hidden_state = storage.load(training_info) diff --git a/vel/api/optimizer.py b/vel/api/optimizer.py index 0f677f19..82f56825 100644 --- a/vel/api/optimizer.py +++ b/vel/api/optimizer.py @@ -1,10 +1,111 @@ -from vel.api import Model +import typing +import itertools as it -from torch.optim import Optimizer +from torch.nn.utils import clip_grad_norm_ +from torch.optim.optimizer import Optimizer +from vel.metric import DefaultAveragingNamedMetric +from vel.api.callback import Callback +from vel.api.scheduler import SchedulerFactory + + +class VelOptimizer: + """ Vel optimizer interface """ + + def get_lr(self) -> float: + """ Return current learning rate of the optimizer """ + raise NotImplementedError + + def set_lr(self, lr: float): + """ Set current learning rate of the optimizer """ + raise NotImplementedError + + def state_dict(self) -> dict: + raise NotImplementedError + + def load_state_dict(self, state_dict: dict) -> None: + raise NotImplementedError + + def zero_grad(self) -> None: + raise NotImplementedError + + def step(self, closure=None) -> dict: + raise NotImplementedError + + def add_param_group(self, param_group: dict) -> None: + raise NotImplementedError + + def metrics(self) -> list: + """ Set of metrics for this model """ + return [] + + def create_scheduler(self, scheduler_factory: SchedulerFactory, last_epoch: int = -1) -> [Callback]: + """ Create a scheduler instance for this optimizer """ + raise NotImplementedError + + +class VelOptimizerProxy(VelOptimizer): + """ Proxy PyTorch optimizer into a Vel optimizer """ + def __init__(self, optimizer: Optimizer, max_grad_norm: typing.Optional[float] = None): + self.optimizer = optimizer + self.max_grad_norm = max_grad_norm + + def get_lr(self) -> float: + """ Return current learning rate of the optimizer """ + return self.optimizer.param_groups[-1]['lr'] + + def set_lr(self, lr: float): + """ Set current learning rate of the optimizer """ + if isinstance(lr, list): + for group_lr, param_group in zip(lr, self.optimizer.param_groups): + param_group['lr'] = group_lr + else: + for param_group in self.optimizer.param_groups: + param_group['lr'] = lr + + def state_dict(self) -> dict: + return self.optimizer.state_dict() + + def load_state_dict(self, state_dict: dict) -> None: + self.optimizer.load_state_dict(state_dict) + + def zero_grad(self) -> None: + self.optimizer.zero_grad() + + def step(self, closure=None) -> dict: + # TODO(jerry): potentially allow max_grad_norm being a list? + if self.max_grad_norm is not None: + grad_norm = clip_grad_norm_( + parameters=it.chain.from_iterable(pg['params'] for pg in self.optimizer.param_groups), + max_norm=self.max_grad_norm + ) + self.optimizer.step(closure) + return {'grad_norm': grad_norm} + else: + self.optimizer.step(closure) + return {} + + def add_param_group(self, param_group: dict) -> None: + self.optimizer.add_param_group(param_group) + + def metrics(self) -> list: + """ Set of metrics for this model """ + if self.max_grad_norm is not None: + return [ + DefaultAveragingNamedMetric('grad_norm', scope="opt", defaut_value=0.0) + ] + else: + return [] + + def create_scheduler(self, scheduler_factory: SchedulerFactory, last_epoch: int = -1) -> [Callback]: + """ Create a scheduler instance for this optimizer """ + return [scheduler_factory.instantiate(self.optimizer, last_epoch=last_epoch)] class OptimizerFactory: """ Base class for optimizer factories """ - def instantiate(self, model: Model) -> Optimizer: + def instantiate(self, parameters) -> VelOptimizer: + raise NotImplementedError + + def instantiate_parameter_groups(self, parameters) -> VelOptimizer: raise NotImplementedError diff --git a/vel/api/source.py b/vel/api/source.py index fb02e7e6..892f65a7 100644 --- a/vel/api/source.py +++ b/vel/api/source.py @@ -17,3 +17,58 @@ def __init__(self, train: data.Dataset, validation: data.Dataset, self.test = test self.metadata = {} if metadata is None else metadata + + +class LanguageSource(Source): + """ Special source for language datasets that handles dictionaries/encodings """ + + def __init__(self, train: data.Dataset, validation: data.Dataset, + fields: dict, mapping: dict, + test: typing.Optional[data.Dataset] = None, metadata: typing.Optional[dict] = None): + super().__init__(train, validation, test, metadata) + + self.fields = fields + self.mapping = mapping + + +# class SupervisedTextData(Source): +# """ An NLP torchtext data source """ +# def __init__(self, train_source, val_source, train_iterator, val_iterator, data_field, target_field): +# super().__init__() +# +# self.train_source = train_source +# self.val_source = val_source +# self.train_iterator = train_iterator +# self.val_iterator = val_iterator +# self.data_field = data_field +# self.target_field = target_field +# +# @property +# def train_loader(self): +# """ PyTorch loader of training data """ +# return self.train_iterator +# +# @property +# def val_loader(self): +# """ PyTorch loader of validation data """ +# return self.val_iterator +# +# @property +# def train_dataset(self): +# """ Return the training dataset """ +# return self.train_source +# +# @property +# def val_dataset(self): +# """ Return the validation dataset """ +# return self.val_source +# +# @property +# def train_iterations_per_epoch(self): +# """ Return number of iterations per epoch """ +# return len(self.train_iterator) +# +# @property +# def val_iterations_per_epoch(self): +# """ Return number of iterations per epoch - validation """ +# return len(self.val_iterator) diff --git a/vel/command/list_command.py b/vel/command/list_command.py new file mode 100644 index 00000000..11e37df8 --- /dev/null +++ b/vel/command/list_command.py @@ -0,0 +1,33 @@ +import os +import os.path +import glob +import json + +from vel.api import ModelConfig + + +class ListCommand: + """ List trained models for given config and their basic metadata """ + + def __init__(self, model_config: ModelConfig): + self.model_config = model_config + + def run(self): + meta_dir = self.model_config.output_dir('meta', self.model_config.name) + meta_paths = os.path.join(meta_dir, '*', 'meta.json') + + for path in sorted(glob.glob(meta_paths)): + with open(path, 'rt') as fp: + meta = json.load(fp) + + print("-" * 80) + print("Run name: {}".format(meta['run_name'])) + print("Tag: {}".format(meta['tag'])) + print("Created: {}".format(meta['created'])) + + +def create(model_config): + """ Vel factory function """ + return ListCommand( + model_config=model_config, + ) diff --git a/vel/command/phase_train_command.py b/vel/command/phase_train_command.py index 2318d7bf..0fb91197 100644 --- a/vel/command/phase_train_command.py +++ b/vel/command/phase_train_command.py @@ -56,27 +56,21 @@ def _select_phase_right_bound(self, epoch_number): def run(self): """ Run the command with supplied configuration """ device = self.model_config.torch_device() - learner = train.Trainer(device, self.model_factory.instantiate()) - - # All callbacks useful for learning - callbacks = self.gather_callbacks() - - # Metrics to track through this training - metrics = learner.metrics() + [SamplesPerSec()] + trainer = train.Trainer(device, self.model_factory.instantiate()) # Check if training was already started and potentially continue where we left off - training_info, hidden_state = self.resume_training(learner, callbacks, metrics) + training_info, hidden_state = self.start_training(trainer) # Prepare current training phase current_phase_idx = self._select_phase_left_bound(training_info.start_epoch_idx) current_phase = self.phases[current_phase_idx] local_idx = training_info.start_epoch_idx - self.ladder[current_phase_idx] - current_phase.set_up_phase(training_info, learner.model, self.loader) + current_phase.set_up_phase(training_info, trainer.model, self.loader) print(current_phase.banner()) if training_info.start_epoch_idx > 0: - current_phase.restore(training_info, local_idx, learner.model, hidden_state) + current_phase.restore(training_info, local_idx, trainer.model, hidden_state) training_info.on_train_begin() @@ -86,46 +80,46 @@ def run(self): # Phase preparations while current_phase_idx != iteration_phase_idx: - current_phase.tear_down_phase(training_info, learner.model) + current_phase.tear_down_phase(training_info, trainer.model) current_phase_idx += 1 current_phase = self.phases[current_phase_idx] - current_phase.set_up_phase(training_info, learner.model, self.loader) + current_phase.set_up_phase(training_info, trainer.model, self.loader) print(current_phase.banner()) # Create epoch info epoch_info = current_phase.epoch_info(training_info, global_epoch_idx, local_idx) # Execute learning - current_phase.execute_epoch(epoch_info, learner) + current_phase.execute_epoch(epoch_info, trainer) # Epoch checkpoint - self.storage.checkpoint(epoch_info, learner.model) + self.storage.checkpoint(epoch_info, trainer.model) # Tear down the last phase if current_phase is not None: - current_phase.tear_down_phase(training_info, learner.model) + current_phase.tear_down_phase(training_info, trainer.model) training_info.on_train_end() return training_info - def gather_callbacks(self) -> list: - """ Gather all the callbacks to be used in this training run """ + def start_training(self, trainer) -> (api.TrainingInfo, dict): + """ Possibly resume training from a saved state from the storage """ + if self.model_config.resume_training: + start_epoch = self.storage.last_epoch_idx() + else: + start_epoch = 0 + + # Initial set of callbacks, always useful callbacks = [TimeTracker(), SampleTracker()] callbacks.extend(self.callbacks) callbacks.extend(self.storage.streaming_callbacks()) - return callbacks - - def resume_training(self, learner, callbacks, metrics) -> (api.TrainingInfo, dict): - """ Possibly resume training from a saved state from the storage """ - if self.model_config.continue_training: - start_epoch = self.storage.last_epoch_idx() - else: - start_epoch = 0 + # Metrics to track through this training + metrics = trainer.metrics() + [SamplesPerSec()] training_info = api.TrainingInfo( start_epoch_idx=start_epoch, @@ -134,13 +128,16 @@ def resume_training(self, learner, callbacks, metrics) -> (api.TrainingInfo, dic ) if start_epoch == 0: + self.model_config.write_meta() self.storage.reset(self.model_config.render_configuration()) training_info.initialize() - learner.initialize_training(training_info) + trainer.initialize_training(training_info) hidden_state = None else: model_state, hidden_state = self.storage.load(training_info) - learner.initialize_training(training_info, model_state, hidden_state) + training_info.restore(hidden_state) + + trainer.initialize_training(training_info, model_state, hidden_state) return training_info, hidden_state diff --git a/vel/command/train_command.py b/vel/command/train_command.py index 5d3ca3ce..a504f9c7 100644 --- a/vel/command/train_command.py +++ b/vel/command/train_command.py @@ -14,8 +14,8 @@ class SimpleTrainCommand: def __init__(self, epochs: int, model_config: api.ModelConfig, model_factory: api.ModelFactory, optimizer_factory: api.OptimizerFactory, scheduler_factory: typing.Optional[api.SchedulerFactory], - loader: data.DatasetLoader, storage: api.Storage, callbacks: typing.Optional[typing.List[api.Callback]], - max_grad_norm: typing.Optional[float]): + loader: data.DatasetLoader, storage: api.Storage, + callbacks: typing.Optional[typing.List[api.Callback]]): self.epochs = epochs self.model_config = model_config self.model_factory = model_factory @@ -26,29 +26,19 @@ def __init__(self, epochs: int, model_config: api.ModelConfig, model_factory: ap self.loader = loader self.storage = storage self.callbacks = callbacks if callbacks is not None else [] - self.max_grad_norm = max_grad_norm def run(self): """ Run the command with supplied configuration """ device = self.model_config.torch_device() - learner = train.Trainer(device, self.model_factory.instantiate(), self.max_grad_norm) - optimizer = self.optimizer_factory.instantiate(learner.model) - - # All callbacks used for learning - callbacks = self.gather_callbacks(optimizer) - - # Metrics to track through this training - metrics = learner.metrics() + [SamplesPerSec()] + trainer = train.Trainer(device, self.model_factory.instantiate()) + optimizer = trainer.model.create_optimizer(self.optimizer_factory) # Check if training was already started and potentially continue where we left off - training_info = self.resume_training(learner, callbacks, metrics) + training_info = self.start_training(trainer, optimizer) training_info.on_train_begin() - if training_info.optimizer_initial_state: - optimizer.load_state_dict(training_info.optimizer_initial_state) - for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.epochs + 1): epoch_info = api.EpochInfo( training_info=training_info, @@ -58,32 +48,34 @@ def run(self): ) # Execute learning - learner.run_epoch(epoch_info, self.loader) + trainer.run_epoch(epoch_info, self.loader) - self.storage.checkpoint(epoch_info, learner.model) + self.storage.checkpoint(epoch_info, trainer.model) training_info.on_train_end() return training_info - def gather_callbacks(self, optimizer) -> list: - """ Gather all the callbacks to be used in this training run """ + def start_training(self, trainer: train.Trainer, optimizer: api.VelOptimizer) -> api.TrainingInfo: + """ Possibly resume training from a saved state from the storage """ + if self.model_config.resume_training: + start_epoch = self.storage.last_epoch_idx() + else: + start_epoch = 0 + + # Initial set of callbacks, always useful callbacks = [TimeTracker(), SampleTracker()] if self.scheduler_factory is not None: - callbacks.append(self.scheduler_factory.instantiate(optimizer)) + callbacks.extend( + optimizer.create_scheduler(scheduler_factory=self.scheduler_factory, last_epoch=start_epoch-1) + ) callbacks.extend(self.callbacks) callbacks.extend(self.storage.streaming_callbacks()) - return callbacks - - def resume_training(self, learner, callbacks, metrics) -> api.TrainingInfo: - """ Possibly resume training from a saved state from the storage """ - if self.model_config.continue_training: - start_epoch = self.storage.last_epoch_idx() - else: - start_epoch = 0 + # Metrics to track through this training + metrics = trainer.metrics() + optimizer.metrics() + [SamplesPerSec()] training_info = api.TrainingInfo( start_epoch_idx=start_epoch, @@ -92,17 +84,23 @@ def resume_training(self, learner, callbacks, metrics) -> api.TrainingInfo: ) if start_epoch == 0: + self.model_config.write_meta() self.storage.reset(self.model_config.render_configuration()) training_info.initialize() - learner.initialize_training(training_info) + trainer.initialize_training(training_info) else: model_state, hidden_state = self.storage.load(training_info) - learner.initialize_training(training_info, model_state, hidden_state) + + training_info.restore(hidden_state) + trainer.initialize_training(training_info, model_state, hidden_state) + + if 'optimizer' in hidden_state: + optimizer.load_state_dict(hidden_state['optimizer']) return training_info -def create(model_config, epochs, optimizer, model, loader, storage, scheduler=None, callbacks=None, max_grad_norm=None): +def create(model_config, epochs, optimizer, model, loader, storage, scheduler=None, callbacks=None): """ Vel factory function """ return SimpleTrainCommand( epochs=epochs, @@ -113,5 +111,4 @@ def create(model_config, epochs, optimizer, model, loader, storage, scheduler=No loader=loader, storage=storage, callbacks=callbacks, - max_grad_norm=max_grad_norm ) diff --git a/vel/data/bucket_loader.py b/vel/data/bucket_loader.py new file mode 100644 index 00000000..46f70a4b --- /dev/null +++ b/vel/data/bucket_loader.py @@ -0,0 +1,68 @@ +import torchtext.data as data + +from vel.util.dataloader import IteratorDictWrapper +from vel.api import LanguageSource, ModelConfig + + +class BucketLoader: + """ Loads sequence data from a source and batches together examples of similar length """ + + def __init__(self, model_config: ModelConfig, source: LanguageSource, batch_size: int): + self.source = source + self.batch_size = batch_size + + if self.source.test is None: + self.train_loader, self.val_loader = data.BucketIterator.splits( + (self.source.train, self.source.validation), + batch_size=batch_size, + device=model_config.torch_device(), + shuffle=True + ) + self.test_loader = None + else: + self.train_loader, self.val_loader, self.test_loader = data.BucketIterator.splits( + (self.source.train, self.source.validation, self.source.test), + batch_size=batch_size, + device=model_config.torch_device(), + shuffle=True + ) + + self.train_loader = IteratorDictWrapper(self.train_loader, self.source.mapping) + self.val_loader = IteratorDictWrapper(self.val_loader, self.source.mapping) + + if self.test_loader: + self.test_loader = IteratorDictWrapper(self.test_loader, self.source.mapping) + + self._loaders = { + 'train': self.train_loader, + 'val': self.val_loader, + 'test': self.test_loader + } + + self._loader_sizes = { + 'train': len(self.train_loader), + 'val': len(self.val_loader), + 'test': 0 if self.test_loader is None else len(self.test_loader) + } + + def __getitem__(self, item): + return self._loaders[item] + + @property + def loader(self): + """ Get a dict of loaders """ + return self._loaders + + @property + def size(self): + """ Get a dict of sizes of each loader """ + return self._loader_sizes + + +def create(model_config: ModelConfig, source: LanguageSource, batch_size: int): + """ Vel factory function """ + return BucketLoader( + model_config=model_config, + source=source, + batch_size=batch_size, + ) diff --git a/vel/data/source/nlp/imdb.py b/vel/data/source/nlp/imdb.py index ff351b0a..71168756 100644 --- a/vel/data/source/nlp/imdb.py +++ b/vel/data/source/nlp/imdb.py @@ -7,7 +7,7 @@ import torchtext.datasets as ds -from vel.api import SupervisedTextData +from vel.api import LanguageSource class IMDBCached(ds.IMDB): @@ -45,7 +45,7 @@ def __init__(self, path, text_field, label_field, **kwargs): data.Dataset.__init__(self, examples, fields, **kwargs) -def create(model_config, batch_size, data_dir='imdb', vectors=None): +def create(model_config, data_dir='imdb', vectors=None): """ Create an IMDB dataset """ path = model_config.data_dir(data_dir) @@ -61,13 +61,23 @@ def create(model_config, batch_size, data_dir='imdb', vectors=None): text_field.build_vocab(train_source, max_size=25_000, vectors=vectors) label_field.build_vocab(train_source) - train_iterator, test_iterator = data.BucketIterator.splits( - (train_source, test_source), - batch_size=batch_size, - device=model_config.torch_device(), - shuffle=True + return LanguageSource( + train_source, + test_source, + fields=train_source.fields, + mapping={ + 'x': 'text', + 'y': 'label' + } ) - return SupervisedTextData( - train_source, test_source, train_iterator, test_iterator, text_field, label_field - ) + # train_iterator, test_iterator = data.BucketIterator.splits( + # (train_source, test_source), + # batch_size=batch_size, + # device=model_config.torch_device(), + # shuffle=True + # ) + + # return SupervisedTextData( + # train_source, test_source, train_iterator, test_iterator, text_field, label_field + # ) diff --git a/vel/data/source/nlp/text_url.py b/vel/data/source/nlp/text_url.py index fea44f95..cadafecd 100644 --- a/vel/data/source/nlp/text_url.py +++ b/vel/data/source/nlp/text_url.py @@ -36,6 +36,12 @@ def __init__(self, url, absolute_data_path, train_val_split=0.8): } ) + def encode_character(self, char): + return self.data_dict['character_to_index'][char] + + def decode_character(self, index): + return self.data_dict['index_to_character'][index] + def download(self) -> dict: """ Make sure data file is downloaded and stored properly """ if not os.path.exists(self.data_path): @@ -86,4 +92,4 @@ def create(model_config, url, local_dir, train_val_split=0.8): url, absolute_data_path=local_dir, train_val_split=train_val_split, -) + ) diff --git a/vel/internal/provider.py b/vel/internal/provider.py index e1060f2d..79921125 100644 --- a/vel/internal/provider.py +++ b/vel/internal/provider.py @@ -77,11 +77,12 @@ def instantiate_from_data(self, object_data): else: return object_data - def render_configuration(self, configuration=None): - """ Render variables in configuration object but don't instantiate anything """ - if configuration is None: - configuration = self.environment + def render_environment(self): + """ Render variables in current environment """ + return self.render_configuration(self.environment) + def render_configuration(self, configuration): + """ Render variables in configuration object but don't instantiate anything """ if isinstance(configuration, dict): return {k: self.render_configuration(v) for k, v in configuration.items()} elif isinstance(configuration, list): diff --git a/vel/launcher.py b/vel/launcher.py index 18a4f687..8b537415 100644 --- a/vel/launcher.py +++ b/vel/launcher.py @@ -24,7 +24,7 @@ def main(): help="Configuration parameters" ) parser.add_argument( - '--continue', action='store_true', default=False, help="Continue previously started learning process" + '--resume', action='store_true', default=False, help="Resume previously started learning process" ) parser.add_argument( '--profile', type=str, default=None, help="Profiler output" @@ -38,8 +38,8 @@ def main(): warnings.filterwarnings('error', module='torch\\..*') model_config = ModelConfig.from_file( - args.config, args.run_number, continue_training=getattr(args, 'continue'), device=args.device, seed=args.seed, - parameters={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)} + args.config, args.run_number, resume_training=args.resume, device=args.device, seed=args.seed, + parameters={k: v for (k, v) in (Parser.parse_equality(eq) for eq in args.param)}, tag=args.tag ) if model_config.project_dir not in sys.path: diff --git a/vel/metric/__init__.py b/vel/metric/__init__.py index be14e6c4..29d9f310 100644 --- a/vel/metric/__init__.py +++ b/vel/metric/__init__.py @@ -1,3 +1,5 @@ from .base.base_metric import BaseMetric, MetricKey # noqa -from .base.averaging_metric import AveragingMetric, AveragingNamedMetric, AveragingSupervisedMetric # noqa +from .base.averaging_metric import ( + AveragingMetric, AveragingNamedMetric, AveragingSupervisedMetric, DefaultAveragingNamedMetric # noqa +) from .base.value_metric import ValueMetric # noqa diff --git a/vel/metric/base/__init__.py b/vel/metric/base/__init__.py index 20f27c6d..a50c4f49 100644 --- a/vel/metric/base/__init__.py +++ b/vel/metric/base/__init__.py @@ -1,4 +1,4 @@ -from .averaging_metric import AveragingSupervisedMetric, AveragingNamedMetric, AveragingMetric +from .averaging_metric import AveragingSupervisedMetric, AveragingNamedMetric, AveragingMetric, DefaultAveragingNamedMetric from .base_metric import BaseMetric, MetricKey from .summing_metric import SummingMetric, SummingNamedMetric -from .value_metric import ValueMetric \ No newline at end of file +from .value_metric import ValueMetric diff --git a/vel/metric/base/averaging_metric.py b/vel/metric/base/averaging_metric.py index 2355b7fd..ae535fa7 100644 --- a/vel/metric/base/averaging_metric.py +++ b/vel/metric/base/averaging_metric.py @@ -36,6 +36,19 @@ def _value_function(self, batch_info): return batch_info[self.name] +class DefaultAveragingNamedMetric(AveragingNamedMetric): + """ AveragingNamedMetric that has a default value in case a metric is not found in the batch """ + def __init__(self, name, scope="general", defaut_value=0.0): + super().__init__(name, scope=scope) + self.default_value = defaut_value + + def _value_function(self, batch_info): + if self.name not in batch_info: + return self.default_value + else: + return batch_info[self.name] + + class AveragingSupervisedMetric(BaseMetric): """ Base class for metrics that simply calculate the average over the epoch """ def __init__(self, name, scope="general"): diff --git a/vel/model/imagenet/resnet34.py b/vel/model/imagenet/resnet34.py index a4a78f86..18bdd667 100644 --- a/vel/model/imagenet/resnet34.py +++ b/vel/model/imagenet/resnet34.py @@ -5,7 +5,7 @@ import vel.module.layers as layers import vel.util.module_util as mu -from vel.api import LossFunctionModel, ModelFactory +from vel.api import LossFunctionModel, ModelFactory, OptimizerFactory, VelOptimizer # Because of concat pooling it's 2x 512 @@ -84,6 +84,10 @@ def get_layer_groups(self): g3 = list(self.model[self.group_cut_layers[1]:]) return [g1, g2, g3] + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + parameters = mu.to_parameter_groups(self.get_layer_groups()) + return optimizer_factory.instantiate_parameter_groups(parameters) + def forward(self, x): """ Calculate model value """ return self.model(x) diff --git a/vel/model/latent/cnn_iwae.py b/vel/model/latent/cnn_iwae.py index c4b79ded..a6ee1d7f 100644 --- a/vel/model/latent/cnn_iwae.py +++ b/vel/model/latent/cnn_iwae.py @@ -19,8 +19,8 @@ class CnnIWAE(IWAE): """ def __init__(self, img_rows, img_cols, img_channels, k=5, channels=None, representation_length=32, - analytical_kl_div=True, max_grad_norm=0.5): - super().__init__(k=k, analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + analytical_kl_div=True): + super().__init__(k=k, analytical_kl_div=analytical_kl_div) if channels is None: channels = [16, 32, 32] diff --git a/vel/model/latent/cnn_vae.py b/vel/model/latent/cnn_vae.py index 958877bf..491cdb70 100644 --- a/vel/model/latent/cnn_vae.py +++ b/vel/model/latent/cnn_vae.py @@ -19,8 +19,8 @@ class CnnVAE(VaeBase): """ def __init__(self, img_rows, img_cols, img_channels, channels=None, representation_length=32, - analytical_kl_div=True, max_grad_norm=0.5): - super().__init__(analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + analytical_kl_div=True): + super().__init__(analytical_kl_div=analytical_kl_div) if channels is None: channels = [16, 32, 32] diff --git a/vel/model/latent/fc_iwae.py b/vel/model/latent/fc_iwae.py index ab6d8602..7e7a44da 100644 --- a/vel/model/latent/fc_iwae.py +++ b/vel/model/latent/fc_iwae.py @@ -16,8 +16,8 @@ class FcIwae(IWAE): """ def __init__(self, img_rows, img_cols, img_channels, k=5, layers=None, representation_length=32, - analytical_kl_div=False, max_grad_norm=None): - super().__init__(k=k, analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + analytical_kl_div=False): + super().__init__(k=k, analytical_kl_div=analytical_kl_div) if layers is None: layers = [512, 256] @@ -91,8 +91,7 @@ def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: # self._weight_initializer(m) -def create(img_rows, img_cols, img_channels, k=5, layers=None, representation_length=32, max_grad_norm=None, - analytical_kl_div=True): +def create(img_rows, img_cols, img_channels, k=5, layers=None, representation_length=32, analytical_kl_div=True): """ Vel factory function """ if layers is None: layers = [512, 256] @@ -100,7 +99,7 @@ def create(img_rows, img_cols, img_channels, k=5, layers=None, representation_le def instantiate(**_): return FcIwae( img_rows, img_cols, img_channels, k=k, layers=layers, representation_length=representation_length, - max_grad_norm=max_grad_norm, analytical_kl_div=analytical_kl_div + analytical_kl_div=analytical_kl_div ) return ModelFactory.generic(instantiate) diff --git a/vel/model/latent/fc_vae.py b/vel/model/latent/fc_vae.py index 50d7d99c..fbad9e29 100644 --- a/vel/model/latent/fc_vae.py +++ b/vel/model/latent/fc_vae.py @@ -16,8 +16,8 @@ class FcVae(VaeBase): """ def __init__(self, img_rows, img_cols, img_channels, layers=None, representation_length=32, - analytical_kl_div=False, max_grad_norm=None): - super().__init__(analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + analytical_kl_div=False): + super().__init__(analytical_kl_div=analytical_kl_div) if layers is None: layers = [512, 256] @@ -91,7 +91,7 @@ def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: # self._weight_initializer(m) -def create(img_rows, img_cols, img_channels, layers=None, representation_length=32, max_grad_norm=None, +def create(img_rows, img_cols, img_channels, layers=None, representation_length=32, analytical_kl_div=True): """ Vel factory function """ if layers is None: @@ -100,7 +100,7 @@ def create(img_rows, img_cols, img_channels, layers=None, representation_length= def instantiate(**_): return FcVae( img_rows, img_cols, img_channels, layers=layers, representation_length=representation_length, - max_grad_norm=max_grad_norm, analytical_kl_div=analytical_kl_div + analytical_kl_div=analytical_kl_div ) return ModelFactory.generic(instantiate) diff --git a/vel/model/latent/iwae.py b/vel/model/latent/iwae.py index 46359c41..faa24a68 100644 --- a/vel/model/latent/iwae.py +++ b/vel/model/latent/iwae.py @@ -8,8 +8,8 @@ class IWAE(VaeBase): Importance-Weighted Auto-Encoder https://arxiv.org/abs/1509.00519 """ - def __init__(self, k: int = 5, analytical_kl_div=True, max_grad_norm=1.0): - super().__init__(analytical_kl_div=analytical_kl_div, max_grad_norm=max_grad_norm) + def __init__(self, k: int = 5, analytical_kl_div=True): + super().__init__(analytical_kl_div=analytical_kl_div) self.k = k @@ -53,21 +53,9 @@ def calculate_gradient(self, data: dict) -> dict: if self.training: loss.backward() - if self.max_grad_norm is not None: - grad_norm = torch.nn.utils.clip_grad_norm_( - filter(lambda p: p.requires_grad, self.parameters()), - max_norm=self.max_grad_norm - ) - else: - grad_norm = 0.0 - else: - grad_norm = 0.0 - with torch.no_grad(): return { 'loss': loss.item(), - - 'grad_norm': grad_norm, 'reconstruction': -reconstruction.mean().item(), 'kl_divergence': kl_divergence.mean().item() } diff --git a/vel/model/latent/vae_base.py b/vel/model/latent/vae_base.py index 03fa88f7..75562b42 100644 --- a/vel/model/latent/vae_base.py +++ b/vel/model/latent/vae_base.py @@ -9,11 +9,10 @@ class VaeBase(GradientModel): """ Base module for variational auto-encoder implementations """ - def __init__(self, analytical_kl_div=True, max_grad_norm=1.0): + def __init__(self, analytical_kl_div=True): super().__init__() self.analytical_kl_div = analytical_kl_div - self.max_grad_norm = max_grad_norm #################################################################################################################### # Interface methods @@ -96,20 +95,8 @@ def calculate_gradient(self, data: dict) -> dict: if self.training: loss.backward() - if self.max_grad_norm is not None: - grad_norm = torch.nn.utils.clip_grad_norm_( - filter(lambda p: p.requires_grad, self.parameters()), - max_norm=self.max_grad_norm - ) - else: - grad_norm = 0.0 - else: - grad_norm = 0.0 - return { 'loss': loss.item(), - - 'grad_norm': grad_norm, 'reconstruction': -reconstruction.item(), 'kl_divergence': kl_divergence.item() } @@ -131,7 +118,6 @@ def metrics(self): Loss(), AveragingNamedMetric('reconstruction', scope="train"), AveragingNamedMetric('kl_divergence', scope="train"), - AveragingNamedMetric('grad_norm', scope="train") ] @torch.no_grad() diff --git a/vel/model/rnn/multilayer_rnn_sequence_classification.py b/vel/model/rnn/multilayer_rnn_sequence_classification.py index db5da9f0..4c724c76 100644 --- a/vel/model/rnn/multilayer_rnn_sequence_classification.py +++ b/vel/model/rnn/multilayer_rnn_sequence_classification.py @@ -4,7 +4,9 @@ import torch.nn.functional as F import torch.nn as nn -from vel.api import LossFunctionModel, ModelFactory, LinearBackboneModel +import vel.util.module_util as mu + +from vel.api import LossFunctionModel, ModelFactory, LinearBackboneModel, OptimizerFactory, VelOptimizer from vel.metric.accuracy import Accuracy from vel.metric.loss_metric import Loss from vel.module.rnn_layer import RnnLayer @@ -129,6 +131,11 @@ def get_layer_groups(self): self.output_layer ] + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + """ Create optimizer for the purpose of optimizing this model """ + parameters = mu.to_parameter_groups(self.get_layer_groups()) + return optimizer_factory.instantiate_parameter_groups(parameters) + @property def state_dim(self) -> int: """ Dimension of model state """ diff --git a/vel/module/input/embedding.py b/vel/module/input/embedding.py index 37d3387b..d055e257 100644 --- a/vel/module/input/embedding.py +++ b/vel/module/input/embedding.py @@ -1,13 +1,13 @@ import torch.nn as nn -from vel.api import LinearBackboneModel, SupervisedTextData, ModelFactory +from vel.api import LinearBackboneModel, ModelFactory, LanguageSource class EmbeddingInput(LinearBackboneModel): """ Learnable Embedding input layer """ def __init__(self, alphabet_size: int, output_dim: int, pretrained: bool = False, frozen: bool = False, - source: SupervisedTextData = None): + source: LanguageSource = None): super().__init__() self._output_dim = output_dim @@ -20,7 +20,7 @@ def __init__(self, alphabet_size: int, output_dim: int, pretrained: bool = False def reset_weights(self): if self._pretrained: - self.layer.weight.data.copy_(self._source.data_field.vocab.vectors) + self.layer.weight.data.copy_(self._source.fields['text'].vocab.vectors) if self._frozen: self.layer.weight.requires_grad = False @@ -35,7 +35,7 @@ def forward(self, input_data): def create(alphabet_size: int, output_dim: int, pretrained: bool = False, frozen: bool = False, - source: SupervisedTextData = None): + source: LanguageSource = None): """ Vel factory function """ def instantiate(**_): return EmbeddingInput(alphabet_size, output_dim, pretrained=pretrained, frozen=frozen, source=source) diff --git a/vel/notebook/loader.py b/vel/notebook/loader.py index ca0db31b..5efbd88c 100644 --- a/vel/notebook/loader.py +++ b/vel/notebook/loader.py @@ -1,21 +1,21 @@ from vel.api import ModelConfig -def load_config(config_path, run_number=0, device='cuda:0', continue_training=True): +def load_config(config_path, run_number=0, device='cuda:0', resume_training=True): """ Load a ModelConfig from filename """ return ModelConfig.from_file( ModelConfig.from_project_directory(config_path), run_number=run_number, device=device, - continue_training=continue_training + resume_training=resume_training ) -def script(model_name: str = 'script', run_number=0, device='cuda:0', continue_training=True): +def script(model_name: str = 'script', run_number=0, device='cuda:0', resume_training=True): """ Create an ad-hoc script model config """ return ModelConfig.script( model_name=model_name, run_number=run_number, device=device, - continue_training=continue_training + resume_training=resume_training ) diff --git a/vel/optimizer/adadelta.py b/vel/optimizer/adadelta.py index 7bdc5529..e5e01f0e 100644 --- a/vel/optimizer/adadelta.py +++ b/vel/optimizer/adadelta.py @@ -1,24 +1,50 @@ -import torch.optim +import typing -from vel.api import OptimizerFactory, Model +from torch.optim.adadelta import Adadelta + +import vel.util.module_util as mu + +from vel.api import OptimizerFactory, VelOptimizerProxy, VelOptimizer class AdadeltaFactory(OptimizerFactory): """ Adadelta optimizer factory """ - def __init__(self, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0): + def __init__(self, lr: float = 1.0, rho: float = 0.9, eps: float = 1e-6, weight_decay: float = 0.0, + max_grad_norm: typing.Optional[float] = None): self.lr = lr self.rho = rho self.eps = eps self.weight_decay = weight_decay + self.max_grad_norm = max_grad_norm - def instantiate(self, model: Model) -> torch.optim.Adadelta: - return torch.optim.Adadelta( - filter(lambda p: p.requires_grad, model.parameters()), + def instantiate(self, parameters) -> VelOptimizer: + return VelOptimizerProxy(Adadelta( + parameters, lr=self.lr, rho=self.rho, eps=self.eps, weight_decay=self.weight_decay - ) + ), self.max_grad_norm) + + def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: + settings_dict = { + 'lr': self.lr, + 'rho': self.rho, + 'eps': self.eps, + 'weight_decay': self.weight_decay + } + + out_parameters = out_parameters.copy() + out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) + + return VelOptimizerProxy(Adadelta(out_parameters, **out_settings_dict), self.max_grad_norm) -def create(): +def create(lr: float = 1.0, rho: float = 0.9, eps: float = 1e-6, weight_decay: float = 0.0, + max_grad_norm: typing.Optional[float] = None): """ Vel factory function """ - return AdadeltaFactory() + return AdadeltaFactory( + lr=lr, + rho=rho, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm + ) diff --git a/vel/optimizer/adam.py b/vel/optimizer/adam.py index 5bda0c4f..46ad3f06 100644 --- a/vel/optimizer/adam.py +++ b/vel/optimizer/adam.py @@ -1,55 +1,44 @@ -import collections -import torch.optim +import typing + +from torch.optim.adam import Adam import vel.util.module_util as mu -from vel.api import OptimizerFactory, Model +from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy class AdamFactory(OptimizerFactory): """ Adam optimizer factory """ - def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, layer_groups=False): + def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, + max_grad_norm: typing.Optional[float] = None): self.lr = lr self.betas = betas self.eps = eps self.weight_decay = weight_decay self.amsgrad = amsgrad - self.layer_groups = layer_groups - - def instantiate(self, model: Model) -> torch.optim.Adam: - if self.layer_groups: - parameters = mu.to_parameter_groups(model.get_layer_groups()) - - if isinstance(self.lr, collections.Sequence): - for idx, lr in enumerate(self.lr): - parameters[idx]['lr'] = lr - - default_lr = self.lr[0] - else: - default_lr = float(self.lr) + self.max_grad_norm = max_grad_norm - if isinstance(self.weight_decay, collections.Sequence): - for idx, weight_decay in enumerate(self.weight_decay): - parameters[idx]['weight_decay'] = weight_decay + def instantiate(self, parameters) -> VelOptimizer: + return VelOptimizerProxy(Adam( + parameters, + lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay, amsgrad=self.amsgrad + ), self.max_grad_norm) - default_weight_decay = self.weight_decay[0] - else: - default_weight_decay = self.weight_decay + def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: + settings_dict = { + 'lr': self.lr, + 'eps': self.eps, + 'weight_decay': self.weight_decay, + 'amsgrad': self.amsgrad + } - return torch.optim.Adam( - parameters, - lr=default_lr, betas=self.betas, eps=self.eps, weight_decay=default_weight_decay, amsgrad=self.amsgrad - ) - else: - parameters = filter(lambda p: p.requires_grad, model.parameters()) + out_parameters = out_parameters.copy() + out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) - return torch.optim.Adam( - parameters, - lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay, amsgrad=self.amsgrad - ) + return VelOptimizerProxy(Adam(out_parameters, betas=self.betas, **out_settings_dict), self.max_grad_norm) -def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, layer_groups=False): +def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, max_grad_norm=None): """ Vel factory function """ - return AdamFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, layer_groups=layer_groups) + return AdamFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, max_grad_norm=max_grad_norm) diff --git a/vel/optimizer/radam.py b/vel/optimizer/radam.py index 607b11e9..7abc4959 100644 --- a/vel/optimizer/radam.py +++ b/vel/optimizer/radam.py @@ -1,16 +1,19 @@ """ RAdam implementation from: https://github.com/LiyuanLucasLiu/RAdam/blob/master/cifar_imagenet/utils/radam.py """ -import math import collections -import torch.optim +import math +import torch +import typing + +from torch.optim.optimizer import Optimizer import vel.util.module_util as mu -from vel.api import OptimizerFactory, Model +from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy -class RAdam(torch.optim.Optimizer): +class RAdam(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.buffer = [[None, None, None] for ind in range(10)] @@ -86,48 +89,35 @@ def step(self, closure=None): class RAdamFactory(OptimizerFactory): - """ RAdam optimizer factory """ + """ Adam optimizer factory """ - def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, layer_groups=False): + def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, + max_grad_norm: typing.Optional[float] = None): self.lr = lr self.betas = betas self.eps = eps self.weight_decay = weight_decay - self.layer_groups = layer_groups - - def instantiate(self, model: Model) -> RAdam: - if self.layer_groups: - parameters = mu.to_parameter_groups(model.get_layer_groups()) - - if isinstance(self.lr, collections.Sequence): - for idx, lr in enumerate(self.lr): - parameters[idx]['lr'] = lr - - default_lr = self.lr[0] - else: - default_lr = float(self.lr) + self.max_grad_norm = max_grad_norm - if isinstance(self.weight_decay, collections.Sequence): - for idx, weight_decay in enumerate(self.weight_decay): - parameters[idx]['weight_decay'] = weight_decay + def instantiate(self, parameters) -> VelOptimizer: + return VelOptimizerProxy(RAdam( + parameters, + lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay + ), self.max_grad_norm) - default_weight_decay = self.weight_decay[0] - else: - default_weight_decay = self.weight_decay + def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: + settings_dict = { + 'lr': self.lr, + 'eps': self.eps, + 'weight_decay': self.weight_decay + } - return RAdam( - parameters, - lr=default_lr, betas=self.betas, eps=self.eps, weight_decay=default_weight_decay, - ) - else: - parameters = filter(lambda p: p.requires_grad, model.parameters()) + out_parameters = out_parameters.copy() + out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) - return RAdam( - parameters, - lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay, - ) + return VelOptimizerProxy(RAdam(out_parameters, betas=self.betas, **out_settings_dict), self.max_grad_norm) -def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, layer_groups=False): +def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, max_grad_norm=None): """ Vel factory function """ - return RAdamFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, layer_groups=layer_groups) + return RAdamFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, max_grad_norm=max_grad_norm) diff --git a/vel/optimizer/ranger.py b/vel/optimizer/ranger.py index 3ab7f896..9f688ead 100644 --- a/vel/optimizer/ranger.py +++ b/vel/optimizer/ranger.py @@ -1,31 +1,15 @@ -#Ranger deep learning optimizer - RAdam + Lookahead combined. -#https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer - -#Ranger has now been used to capture 12 records on the FastAI leaderboard. - -#This version = 9.3.19 - -#Credits: -#RAdam --> https://github.com/LiyuanLucasLiu/RAdam -#Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code. -#Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610 - -#summary of changes: -#full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights), -#supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues. -#changes 8/31/19 - fix references to *self*.N_sma_threshold; - #changed eps to 1e-5 as better default than 1e-8. +# Ranger deep learning optimizer - RAdam + Lookahead combined. +# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer import math import torch -import collections +import typing from torch.optim.optimizer import Optimizer - import vel.util.module_util as mu -from vel.api import OptimizerFactory, Model +from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy class Ranger(Optimizer): @@ -170,49 +154,78 @@ def step(self, closure=None): return loss +# class RangerFactory(OptimizerFactory): +# """ RAdam optimizer factory """ +# +# def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, layer_groups=False): +# self.lr = lr +# self.betas = betas +# self.eps = eps +# self.weight_decay = weight_decay +# self.layer_groups = layer_groups +# +# def instantiate(self, model: Model) -> Ranger: +# if self.layer_groups: +# parameters = mu.to_parameter_groups(model.get_layer_groups()) +# +# if isinstance(self.lr, collections.Sequence): +# for idx, lr in enumerate(self.lr): +# parameters[idx]['lr'] = lr +# +# default_lr = self.lr[0] +# else: +# default_lr = float(self.lr) +# +# if isinstance(self.weight_decay, collections.Sequence): +# for idx, weight_decay in enumerate(self.weight_decay): +# parameters[idx]['weight_decay'] = weight_decay +# +# default_weight_decay = self.weight_decay[0] +# else: +# default_weight_decay = self.weight_decay +# +# return Ranger( +# parameters, +# lr=default_lr, betas=self.betas, eps=self.eps, weight_decay=default_weight_decay, +# ) +# else: +# parameters = filter(lambda p: p.requires_grad, model.parameters()) +# +# return Ranger( +# parameters, +# lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay, +# ) + class RangerFactory(OptimizerFactory): - """ RAdam optimizer factory """ + """ Adam optimizer factory """ - def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, layer_groups=False): + def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, + max_grad_norm: typing.Optional[float] = None): self.lr = lr self.betas = betas self.eps = eps self.weight_decay = weight_decay - self.layer_groups = layer_groups - - def instantiate(self, model: Model) -> Ranger: - if self.layer_groups: - parameters = mu.to_parameter_groups(model.get_layer_groups()) - - if isinstance(self.lr, collections.Sequence): - for idx, lr in enumerate(self.lr): - parameters[idx]['lr'] = lr - - default_lr = self.lr[0] - else: - default_lr = float(self.lr) + self.max_grad_norm = max_grad_norm - if isinstance(self.weight_decay, collections.Sequence): - for idx, weight_decay in enumerate(self.weight_decay): - parameters[idx]['weight_decay'] = weight_decay + def instantiate(self, parameters) -> VelOptimizer: + return VelOptimizerProxy(Ranger( + parameters, + lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay + ), self.max_grad_norm) - default_weight_decay = self.weight_decay[0] - else: - default_weight_decay = self.weight_decay + def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: + settings_dict = { + 'lr': self.lr, + 'eps': self.eps, + 'weight_decay': self.weight_decay + } - return Ranger( - parameters, - lr=default_lr, betas=self.betas, eps=self.eps, weight_decay=default_weight_decay, - ) - else: - parameters = filter(lambda p: p.requires_grad, model.parameters()) + out_parameters = out_parameters.copy() + out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) - return Ranger( - parameters, - lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay, - ) + return VelOptimizerProxy(Ranger(out_parameters, betas=self.betas, **out_settings_dict), self.max_grad_norm) -def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, layer_groups=False): +def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, max_grad_norm=None): """ Vel factory function """ - return RangerFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, layer_groups=layer_groups) + return RangerFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, max_grad_norm=max_grad_norm) diff --git a/vel/optimizer/rmsprop.py b/vel/optimizer/rmsprop.py index fd80f327..eacf02ac 100644 --- a/vel/optimizer/rmsprop.py +++ b/vel/optimizer/rmsprop.py @@ -1,27 +1,51 @@ -import torch.optim +import typing -from vel.api import OptimizerFactory, Model +from torch.optim.rmsprop import RMSprop + +import vel.util.module_util as mu + +from vel.api import OptimizerFactory, VelOptimizerProxy, VelOptimizer class RMSpropFactory(OptimizerFactory): """ RMSprop optimizer factory """ - def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False): + def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, + max_grad_norm: typing.Optional[float] = None): self.lr = lr self.alpha = alpha self.eps = eps self.weight_decay = weight_decay self.momentum = momentum self.centered = centered + self.max_grad_norm = max_grad_norm - def instantiate(self, model: Model) -> torch.optim.RMSprop: - return torch.optim.RMSprop( - filter(lambda p: p.requires_grad, model.parameters()), + def instantiate(self, parameters) -> VelOptimizer: + return VelOptimizerProxy(RMSprop( + parameters, lr=self.lr, alpha=self.alpha, eps=self.eps, weight_decay=self.weight_decay, momentum=self.momentum, centered=self.centered - ) + ), self.max_grad_norm) + + def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: + settings_dict = { + 'lr': self.lr, + 'alpha': self.alpha, + 'eps': self.eps, + 'weight_decay': self.weight_decay, + 'momentum': self.momentum, + 'centered': self.centered + } + + out_parameters = out_parameters.copy() + out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) + + return VelOptimizerProxy(RMSprop(out_parameters, **out_settings_dict), self.max_grad_norm) -def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8): +def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8, max_grad_norm=None): """ Vel factory function """ - return RMSpropFactory(lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon)) + return RMSpropFactory( + lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon), + max_grad_norm=max_grad_norm + ) diff --git a/vel/optimizer/rmsprop_tf.py b/vel/optimizer/rmsprop_tf.py index 5b0197da..4e80a401 100644 --- a/vel/optimizer/rmsprop_tf.py +++ b/vel/optimizer/rmsprop_tf.py @@ -1,9 +1,11 @@ -import torch.optim +from torch.optim.optimizer import Optimizer -from vel.api import OptimizerFactory, Model +import vel.util.module_util as mu +from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy -class RMSpropTF(torch.optim.Optimizer): + +class RMSpropTF(Optimizer): """Implements RMSprop algorithm. A TensorFlow version with epsilon under the square root Proposed by G. Hinton in his @@ -113,22 +115,42 @@ def step(self, closure=None): class RMSpropTFFactory(OptimizerFactory): """ RMSprop optimizer factory - A Tensorflow version with epsilon under square root """ - def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False): + def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, + max_grad_norm: typing.Optional[float] = None): self.lr = lr self.alpha = alpha self.eps = eps self.weight_decay = weight_decay self.momentum = momentum self.centered = centered + self.max_grad_norm = max_grad_norm - def instantiate(self, model: Model) -> RMSpropTF: - return RMSpropTF( - filter(lambda p: p.requires_grad, model.parameters()), + def instantiate(self, parameters) -> VelOptimizer: + return VelOptimizerProxy(RMSpropTF( + parameters, lr=self.lr, alpha=self.alpha, eps=self.eps, weight_decay=self.weight_decay, momentum=self.momentum, centered=self.centered - ) + ), self.max_grad_norm) + + def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: + settings_dict = { + 'lr': self.lr, + 'alpha': self.alpha, + 'eps': self.eps, + 'weight_decay': self.weight_decay, + 'momentum': self.momentum, + 'centered': self.centered + } + + out_parameters = out_parameters.copy() + out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) + + return VelOptimizerProxy(RMSpropTF(out_parameters, **out_settings_dict), self.max_grad_norm) -def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8): +def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8, max_grad_norm=None): """ Vel factory function """ - return RMSpropTFFactory(lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon)) + return RMSpropTFFactory( + lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon), + max_grad_norm=max_grad_norm + ) diff --git a/vel/optimizer/sgd.py b/vel/optimizer/sgd.py index 128ccd1e..383053d5 100644 --- a/vel/optimizer/sgd.py +++ b/vel/optimizer/sgd.py @@ -1,34 +1,52 @@ -import torch.optim +import typing + +from torch.optim.sgd import SGD import vel.util.module_util as mu -from vel.api import OptimizerFactory, Model +from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy class SgdFactory(OptimizerFactory): """ SGD optimizer factory """ - def __init__(self, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, layer_groups: bool = False): + def __init__(self, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, + max_grad_norm: typing.Optional[float] = None): self.lr = lr self.momentum = momentum self.dampening = dampening self.weight_decay = weight_decay self.nesterov = nesterov - self.layer_groups = layer_groups - - def instantiate(self, model: Model) -> torch.optim.SGD: - if self.layer_groups: - parameters = mu.to_parameter_groups(model.get_layer_groups()) - else: - parameters = filter(lambda p: p.requires_grad, model.parameters()) - - return torch.optim.SGD( - parameters, - lr=self.lr, momentum=self.momentum, dampening=self.dampening, weight_decay=self.weight_decay, - nesterov=self.nesterov + self.max_grad_norm = max_grad_norm + + def instantiate(self, parameters) -> VelOptimizer: + return VelOptimizerProxy( + SGD( + parameters, + lr=self.lr, momentum=self.momentum, dampening=self.dampening, weight_decay=self.weight_decay, + nesterov=self.nesterov + ), self.max_grad_norm ) + def instantiate_parameter_groups(self, parameters) -> VelOptimizer: + settings_dict = { + 'lr': self.lr, + 'momentum': self.momentum, + 'dampening': self.dampening, + 'weight_decay': self.weight_decay, + 'nesterov': self.nesterov + } + + parameters = parameters.copy() + out_settings_dict = mu.optimizer_parameter_helper(parameters, settings_dict) + + return VelOptimizerProxy(SGD(parameters, **out_settings_dict), self.max_grad_norm) + -def create(lr, weight_decay=0, momentum=0, layer_groups=False): +def create(lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, + max_grad_norm: typing.Optional[float] = None): """ Vel factory function """ - return SgdFactory(lr=lr, weight_decay=weight_decay, momentum=momentum, layer_groups=layer_groups) + return SgdFactory( + lr=lr, momentum=momentum, dampening=dampening, + weight_decay=weight_decay, nesterov=nesterov, max_grad_norm=max_grad_norm + ) diff --git a/vel/rl/algo/distributional_dqn.py b/vel/rl/algo/distributional_dqn.py index 95ca440e..adbee949 100644 --- a/vel/rl/algo/distributional_dqn.py +++ b/vel/rl/algo/distributional_dqn.py @@ -10,7 +10,7 @@ class DistributionalDeepQLearning(OptimizerAlgoBase): """ Deep Q-Learning algorithm """ def __init__(self, model_factory: ModelFactory, discount_factor: float, double_dqn: bool, - target_update_frequency: int, max_grad_norm: float): + target_update_frequency: int): super().__init__(max_grad_norm) self.model_factory = model_factory diff --git a/vel/rl/api/algo_base.py b/vel/rl/api/algo_base.py index 305e2624..b25374b1 100644 --- a/vel/rl/api/algo_base.py +++ b/vel/rl/api/algo_base.py @@ -1,18 +1,3 @@ -import torch.nn.utils - - -def clip_gradients(batch_result, model, max_grad_norm): - """ Clip gradients to a given maximum length """ - if max_grad_norm is not None: - grad_norm = torch.nn.utils.clip_grad_norm_( - filter(lambda p: p.requires_grad, model.parameters()), - max_norm=max_grad_norm - ) - else: - grad_norm = 0.0 - - batch_result['grad_norm'] = grad_norm - class AlgoBase: """ Base class for algo reinforcement calculations """ @@ -37,9 +22,6 @@ def metrics(self) -> list: class OptimizerAlgoBase(AlgoBase): """ RL algo that does a simple optimizer update """ - def __init__(self, max_grad_norm): - self.max_grad_norm = max_grad_norm - def calculate_gradient(self, batch_info, device, model, rollout): """ Calculate loss of the supplied rollout """ raise NotImplementedError @@ -54,8 +36,6 @@ def optimize(self, batch_info, device, model, rollout): batch_result = self.calculate_gradient(batch_info=batch_info, device=device, model=model, rollout=rollout) - clip_gradients(batch_result, model, self.max_grad_norm) - batch_info.optimizer.step(closure=None) self.post_optimization_step(batch_info, device, model, rollout) diff --git a/vel/rl/command/rl_train_command.py b/vel/rl/command/rl_train_command.py index f0363cbb..539cf5d3 100644 --- a/vel/rl/command/rl_train_command.py +++ b/vel/rl/command/rl_train_command.py @@ -117,7 +117,7 @@ def gather_callbacks(self, optimizer) -> list: def resume_training(self, reinforcer, callbacks, metrics) -> TrainingInfo: """ Possibly resume training from a saved state from the storage """ - if self.model_config.continue_training: + if self.model_config.resume_training: start_epoch = self.storage.last_epoch_idx() else: start_epoch = 0 diff --git a/vel/scheduler/ladder.py b/vel/scheduler/ladder.py index 0699c266..780ed013 100644 --- a/vel/scheduler/ladder.py +++ b/vel/scheduler/ladder.py @@ -2,7 +2,7 @@ import numpy as np -from vel.api import Callback, SchedulerFactory +from vel.api import Callback, SchedulerFactory, EpochInfo class LadderScheduler(Callback): @@ -16,7 +16,7 @@ def lambda_fn(self, epoch_idx): idx = np.minimum(np.searchsorted(self.schedule_limits, epoch_idx), len(self.schedule_limits) - 1) return self.schedule_numbers[idx] - def on_epoch_begin(self, epoch_info): + def on_epoch_end(self, epoch_info: EpochInfo) -> None: self.scheduler.step(epoch=epoch_info.global_epoch_idx) diff --git a/vel/storage/streaming/stdout.py b/vel/storage/streaming/stdout.py index 7ef02893..7dc18f8e 100644 --- a/vel/storage/streaming/stdout.py +++ b/vel/storage/streaming/stdout.py @@ -29,12 +29,12 @@ def _print_metrics_line(metrics, dataset=None): dataset = 'Metrics:' metrics_list = [ - "{}/{} {:.06f}".format(k.scope, k.name, metrics[k]) + "{}/{} {:.04f}".format(k.scope, k.name, metrics[k]) for k in sorted([k for k in metrics.keys() if k.dataset is None]) ] else: metrics_list = [ - "{}/{} {:.06f}".format(k.scope, k.name, metrics[k]) + "{}/{} {:.04f}".format(k.scope, k.name, metrics[k]) for k in sorted([k for k in metrics.keys() if k.dataset == dataset]) ] diff --git a/vel/train/phase/cycle.py b/vel/train/phase/cycle.py index 1f3358c0..63fae205 100644 --- a/vel/train/phase/cycle.py +++ b/vel/train/phase/cycle.py @@ -3,7 +3,7 @@ import vel.util.interpolate as interp -from vel.api import BatchInfo, EpochInfo, TrainingInfo, Callback +from vel.api import BatchInfo, EpochInfo, TrainingInfo, Callback, OptimizedModel from vel.train import TrainPhase @@ -77,16 +77,7 @@ def on_batch_begin(self, batch_info: BatchInfo, dataset: typing.Optional[str] = else: lr = interp.interpolate_single(self.max_lr, self.min_lr, interpolation_number, how=self.interpolate) - self.set_lr(lr) - - def set_lr(self, lr): - """ Set a learning rate for the optimizer """ - if isinstance(lr, list): - for group_lr, param_group in zip(lr, self.optimizer.param_groups): - param_group['lr'] = group_lr - else: - for param_group in self.optimizer.param_groups: - param_group['lr'] = lr + self.optimizer.set_lr(lr) class CyclePhase(TrainPhase): @@ -123,10 +114,10 @@ def __init__(self, optimizer_factory, max_lr, min_lr, cycles, cycle_len=1, cycle def number_of_epochs(self) -> int: return self.epochs - def set_up_phase(self, training_info, model, loader): + def set_up_phase(self, training_info, model: OptimizedModel, loader): """ Prepare the phase for learning """ # To parameter groups handles properly filtering parameters that don't require gradient - self._optimizer_instance = self.optimizer_factory.instantiate(model) + self._optimizer_instance = model.create_optimizer(self.optimizer_factory) self._loader = loader self.special_callback = CycleCallback( diff --git a/vel/train/phase/generic.py b/vel/train/phase/generic.py index db790fed..00c86660 100644 --- a/vel/train/phase/generic.py +++ b/vel/train/phase/generic.py @@ -1,4 +1,4 @@ -from vel.api import TrainingInfo, EpochInfo +from vel.api import TrainingInfo, EpochInfo, OptimizedModel from vel.data import DatasetLoader from vel.train import TrainPhase @@ -18,9 +18,9 @@ def __init__(self, lr, epochs, optimizer_factory): def number_of_epochs(self) -> int: return self.epochs - def set_up_phase(self, training_info, model, loader: DatasetLoader): + def set_up_phase(self, training_info: TrainingInfo, model: OptimizedModel, loader: DatasetLoader): """ Prepare the phase for learning """ - self._optimizer_instance = self.optimizer_factory.instantiate(model) + self._optimizer_instance = model.create_optimizer(self.optimizer_factory) self._loader = loader def epoch_info(self, training_info: TrainingInfo, global_idx: int, local_idx: int) -> EpochInfo: diff --git a/vel/train/train_phase.py b/vel/train/train_phase.py index daeb5733..13733d12 100644 --- a/vel/train/train_phase.py +++ b/vel/train/train_phase.py @@ -1,6 +1,4 @@ -from torch.optim import Optimizer - -from vel.api import TrainingInfo, EpochInfo, Model +from vel.api import TrainingInfo, EpochInfo, Model, VelOptimizer from vel.data import DatasetLoader from .trainer import Trainer @@ -14,7 +12,7 @@ def number_of_epochs(self) -> int: """ How many epochs does this phase take """ raise NotImplementedError - def set_up_phase(self, training_info: TrainingInfo, model: Model, loader: DatasetLoader) -> Optimizer: + def set_up_phase(self, training_info: TrainingInfo, model: Model, loader: DatasetLoader) -> VelOptimizer: """ Prepare the phase for learning, returns phase optimizer """ pass diff --git a/vel/train/trainer.py b/vel/train/trainer.py index 9bddd3d5..6222067c 100644 --- a/vel/train/trainer.py +++ b/vel/train/trainer.py @@ -2,9 +2,8 @@ import torch import torch.nn import tqdm -import typing -from vel.api import GradientModel, TrainingInfo, EpochInfo, BatchInfo +from vel.api import OptimizedModel, TrainingInfo, EpochInfo, BatchInfo from vel.data import DatasetLoader from vel.util.tensor_util import to_device @@ -13,19 +12,14 @@ class Trainer: """ Manages training process of a single model """ - def __init__(self, device: torch.device, model: GradientModel, max_grad_norm: typing.Optional[float] = None): + def __init__(self, device: torch.device, model: OptimizedModel): self.device = device - self.model = model.to(device) - self.max_grad_norm = max_grad_norm + self.model: OptimizedModel = model.to(device) def metrics(self): """ Return metrics for given learner/model """ return self.model.metrics() - def summary(self): - """ Print summary for given learner/model """ - return self.model.summary() - def train(self): """ Set model in the training mode """ return self.model.train() @@ -49,7 +43,7 @@ def run_epoch(self, epoch_info: EpochInfo, loader: DatasetLoader): """ Run full epoch of learning """ epoch_info.on_epoch_begin() - lr = epoch_info.optimizer.param_groups[-1]['lr'] + lr = epoch_info.optimizer.get_lr() print("|-------- Epoch {:06} Lr={:.6f} ----------|".format(epoch_info.global_epoch_idx, lr)) self.train_epoch(epoch_info, loader) @@ -74,7 +68,9 @@ def train_epoch(self, epoch_info, loader: DatasetLoader, interactive=True): batch_info['datapoint'] = datapoint batch_info.on_batch_begin('train') - self.train_batch(batch_info, datapoint) + datapoint = to_device(datapoint, self.device) # Move a data batch into the right device + metrics = self.model.optimize(datapoint, batch_info.optimizer) + batch_info.update(metrics) batch_info.on_batch_end('train') iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss')) @@ -94,26 +90,9 @@ def validation_epoch(self, epoch_info, loader: DatasetLoader, interactive=True): batch_info['datapoint'] = datapoint batch_info.on_batch_begin('val') - self.feed_batch(batch_info, datapoint) - batch_info.on_batch_end('val') - - def feed_batch(self, batch_info, data): - """ Run single batch of data """ - data = to_device(data, self.device) # Move a data batch into the right device - - metrics = self.model.calculate_gradient(data) - batch_info.update(metrics) + datapoint = to_device(datapoint, self.device) # Move a data batch into the right device + metrics = self.model.validate(datapoint) + batch_info.update(metrics) - def train_batch(self, batch_info, data): - """ Train single batch of data """ - batch_info.optimizer.zero_grad() - self.feed_batch(batch_info, data) - - if self.max_grad_norm is not None: - batch_info['grad_norm'] = torch.nn.utils.clip_grad_norm_( - filter(lambda p: p.requires_grad, self.model.parameters()), - max_norm=self.max_grad_norm - ) - - batch_info.optimizer.step() + batch_info.on_batch_end('val') diff --git a/vel/util/dataloader.py b/vel/util/dataloader.py new file mode 100644 index 00000000..b6b03fe5 --- /dev/null +++ b/vel/util/dataloader.py @@ -0,0 +1,21 @@ +class IteratorDictWrapper: + """ Transform dataset loader into a dictionary """ + + def __init__(self, iterator, field_mapping): + self.iterator = iterator + self.field_mapping = field_mapping + + def __iter__(self): + return map(self.map_values, iter(self.iterator)) + + def __len__(self): + return len(self.iterator) + + def map_values(self, item): + """ Map iterator values into a dictionary """ + return { + name: getattr(item, argument) for name, argument in self.field_mapping.items() + } + + + diff --git a/vel/util/module_util.py b/vel/util/module_util.py index 1c501336..ae415425 100644 --- a/vel/util/module_util.py +++ b/vel/util/module_util.py @@ -81,3 +81,19 @@ def chain_params(p): def to_parameter_groups(layer_groups): """ Convert from list of layer groups into list of parameter settings for an optimizer """ return [{'params': chain_params(x)} for x in layer_groups] + + +def optimizer_parameter_helper(parameters, parameter_dict): + """ Helper function for creating layer group optimizer instances """ + out_dict = parameter_dict.copy() + + for parameter, value in parameter_dict.items(): + if isinstance(value, collections.Sequence): + for idx, this_value in enumerate(value): + parameters[idx][parameter] = this_value + + out_dict[parameter] = value[0] + + return out_dict + + From 7e70b3d75a9c0ce2b8f23aaaa0d910cd4a4dd9b7 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 26 Sep 2019 22:20:40 -0700 Subject: [PATCH 098/162] Bring back RL algos. --- examples-configs/rl/atari/atari_a2c.yaml | 31 +- examples-configs/rl/atari/atari_ppo.yaml | 43 +- examples-configs/rl/atari/atari_trpo.yaml | 50 +-- .../atari/{ => purgatory}/atari_a2c_lstm.yaml | 0 .../{ => purgatory}/atari_a2c_tf_rmsprop.yaml | 0 .../rl/atari/{ => purgatory}/atari_acer.yaml | 0 .../atari_acer_trust_region.yaml | 0 .../atari/{ => purgatory}/atari_ppo_gru.yaml | 0 .../rl/mujoco/a2c/reacher_a2c.yaml | 67 --- examples-configs/rl/mujoco/mujoco_a2c.yaml | 65 +++ examples-configs/rl/mujoco/mujoco_ppo.yaml | 77 ++++ examples-configs/rl/mujoco/mujoco_trpo.yaml | 74 ++++ .../rl/mujoco/ppo/half_cheetah_ppo.yaml | 77 ---- .../rl/mujoco/ppo/hopper_ppo.yaml | 77 ---- .../rl/mujoco/ppo/reacher_ppo.yaml | 77 ---- .../rl/mujoco/ppo/walker_ppo.yaml | 77 ---- .../rl/mujoco/trpo/half_cheetah_trpo.yaml | 81 ---- .../rl/mujoco/trpo/hopper_trpo.yaml | 82 ---- .../rl/mujoco/trpo/reacher_trpo.yaml | 81 ---- .../multilayer_rnn_sequence_classification.py | 2 +- .../rnn/multilayer_rnn_sequence_model.py | 2 +- vel/openai/baselines/common/atari_wrappers.py | 76 +++- vel/openai/baselines/common/retro_wrappers.py | 63 +-- .../baselines/common/running_mean_std.py | 24 +- .../baselines/common/vec_env/__init__.py | 45 +- .../baselines/common/vec_env/dummy_vec_env.py | 9 +- .../baselines/common/vec_env/shmem_vec_env.py | 38 +- .../common/vec_env/subproc_vec_env.py | 32 +- vel/openai/baselines/common/wrappers.py | 31 ++ vel/openai/baselines/logger.py | 131 +++--- vel/rl/algo/a2c.py | 142 ++++++ vel/rl/algo/policy_gradient/trpo.py | 274 ------------ vel/rl/algo/ppo.py | 193 ++++++++ vel/rl/algo/trpo.py | 413 ++++++++++++++++++ vel/rl/api/__init__.py | 8 +- vel/rl/api/algo_base.py | 47 -- vel/rl/api/evaluator.py | 154 ------- vel/rl/api/policy.py | 22 - vel/rl/api/reinforcer_base.py | 9 +- vel/rl/api/rl_model.py | 65 +++ vel/rl/command/rl_train_command.py | 53 +-- vel/rl/env_roller/step_env_roller.py | 4 +- .../trajectory_replay_env_roller.py | 4 +- .../transition_replay_env_roller.py | 4 +- .../policy/purgatory/deterministic_policy.py | 4 +- ...tic_policy.py => old_stochastic_policy.py} | 4 +- .../old_stochastic_rnn_policy.py} | 49 +-- .../purgatory/q_distributional_policy.py | 4 +- vel/rl/policy/purgatory/q_model.py | 4 +- vel/rl/policy/purgatory/q_noisy_model.py | 4 +- .../policy/purgatory/stochastic_rnn_policy.py | 49 ++- .../semipurgatory}/__init__.py | 0 .../semipurgatory/a2c_rnn.py} | 7 +- .../semipurgatory}/acer.py | 0 .../semipurgatory}/ddpg.py | 0 vel/rl/{algo => policy/semipurgatory}/dqn.py | 0 .../semipurgatory/ppo_rnn.py} | 0 vel/rl/policy/stochastic_policy.py | 53 +-- ...arate.py => stochastic_policy_separate.py} | 43 +- ...fered_mixed_policy_iteration_reinforcer.py | 26 +- ...uffered_off_policy_iteration_reinforcer.py | 24 +- .../on_policy_iteration_reinforcer.py | 67 ++- vel/rl/test/test_integration.py | 4 +- vel/rl/util/actor.py | 4 +- 64 files changed, 1522 insertions(+), 1628 deletions(-) rename examples-configs/rl/atari/{ => purgatory}/atari_a2c_lstm.yaml (100%) rename examples-configs/rl/atari/{ => purgatory}/atari_a2c_tf_rmsprop.yaml (100%) rename examples-configs/rl/atari/{ => purgatory}/atari_acer.yaml (100%) rename examples-configs/rl/atari/{ => purgatory}/atari_acer_trust_region.yaml (100%) rename examples-configs/rl/atari/{ => purgatory}/atari_ppo_gru.yaml (100%) delete mode 100644 examples-configs/rl/mujoco/a2c/reacher_a2c.yaml create mode 100644 examples-configs/rl/mujoco/mujoco_a2c.yaml create mode 100644 examples-configs/rl/mujoco/mujoco_ppo.yaml create mode 100644 examples-configs/rl/mujoco/mujoco_trpo.yaml delete mode 100644 examples-configs/rl/mujoco/ppo/half_cheetah_ppo.yaml delete mode 100644 examples-configs/rl/mujoco/ppo/hopper_ppo.yaml delete mode 100644 examples-configs/rl/mujoco/ppo/reacher_ppo.yaml delete mode 100644 examples-configs/rl/mujoco/ppo/walker_ppo.yaml delete mode 100644 examples-configs/rl/mujoco/trpo/half_cheetah_trpo.yaml delete mode 100644 examples-configs/rl/mujoco/trpo/hopper_trpo.yaml delete mode 100644 examples-configs/rl/mujoco/trpo/reacher_trpo.yaml create mode 100644 vel/openai/baselines/common/wrappers.py create mode 100644 vel/rl/algo/a2c.py delete mode 100644 vel/rl/algo/policy_gradient/trpo.py create mode 100644 vel/rl/algo/ppo.py create mode 100644 vel/rl/algo/trpo.py delete mode 100644 vel/rl/api/algo_base.py delete mode 100644 vel/rl/api/evaluator.py delete mode 100644 vel/rl/api/policy.py create mode 100644 vel/rl/api/rl_model.py rename vel/rl/policy/purgatory/{stochastic_policy.py => old_stochastic_policy.py} (97%) rename vel/rl/policy/{stochastic_rnn_policy.py => purgatory/old_stochastic_rnn_policy.py} (76%) rename vel/rl/{algo/policy_gradient => policy/semipurgatory}/__init__.py (100%) rename vel/rl/{algo/policy_gradient/a2c.py => policy/semipurgatory/a2c_rnn.py} (92%) rename vel/rl/{algo/policy_gradient => policy/semipurgatory}/acer.py (100%) rename vel/rl/{algo/policy_gradient => policy/semipurgatory}/ddpg.py (100%) rename vel/rl/{algo => policy/semipurgatory}/dqn.py (100%) rename vel/rl/{algo/policy_gradient/ppo.py => policy/semipurgatory/ppo_rnn.py} (100%) rename vel/rl/policy/{purgatory/stochastic_policy_model_separate.py => stochastic_policy_separate.py} (67%) diff --git a/examples-configs/rl/atari/atari_a2c.yaml b/examples-configs/rl/atari/atari_a2c.yaml index 1d15f2dd..cdacb76c 100644 --- a/examples-configs/rl/atari/atari_a2c.yaml +++ b/examples-configs/rl/atari/atari_a2c.yaml @@ -12,34 +12,34 @@ vec_env: model: - name: vel.rl.policy.stochastic_policy + name: vel.rl.algo.a2c - input_block: - name: vel.module.input.image_to_tensor + entropy_coefficient: 0.01 + value_coefficient: 0.5 + discount_factor: 0.99 - backbone: - name: vel.rl.backbone.nature_cnn + policy: + name: vel.rl.policy.stochastic_policy + input_block: + name: vel.module.input.image_to_tensor - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history + backbone: + name: vel.rl.backbone.nature_cnn + + input_width: 84 + input_height: 84 + input_channels: 4 # The same as frame_history reinforcer: name: vel.rl.reinforcer.on_policy_iteration_reinforcer - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 - env_roller: name: vel.rl.env_roller.step_env_roller number_of_steps: 5 # How many environment steps go into a single batch parallel_envs: 16 # How many environments to run in parallel + batch_size: 256 # How many samples can go into the model once optimizer: @@ -47,6 +47,7 @@ optimizer: lr: 7.0e-4 alpha: 0.99 epsilon: 1.0e-3 + max_grad_norm: 0.5 commands: diff --git a/examples-configs/rl/atari/atari_ppo.yaml b/examples-configs/rl/atari/atari_ppo.yaml index 013f8ef0..c96c518c 100644 --- a/examples-configs/rl/atari/atari_ppo.yaml +++ b/examples-configs/rl/atari/atari_ppo.yaml @@ -12,36 +12,34 @@ vec_env: model: - name: vel.rl.policy.stochastic_policy + name: vel.rl.algo.ppo - input_block: - name: vel.module.input.image_to_tensor + cliprange: + name: vel.function.linear + initial_value: 0.1 + final_value: 0.0 - backbone: - name: vel.rl.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history + entropy_coefficient: 0.01 + value_coefficient: 0.5 + discount_factor: 0.99 # Discount factor for the rewards + gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter -reinforcer: - name: vel.rl.reinforcer.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.ppo + policy: + name: vel.rl.policy.stochastic_policy - entropy_coefficient: 0.01 - value_coefficient: 0.5 + input_block: + name: vel.module.input.image_to_tensor - discount_factor: 0.99 # Discount factor for the rewards - gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter + backbone: + name: vel.rl.backbone.nature_cnn + input_width: 84 + input_height: 84 + input_channels: 4 # The same as frame_history - max_grad_norm: 0.5 # Gradient clipping parameter - cliprange: - name: vel.function.linear - initial_value: 0.1 - final_value: 0.0 +reinforcer: + name: vel.rl.reinforcer.on_policy_iteration_reinforcer env_roller: name: vel.rl.env_roller.step_env_roller @@ -56,6 +54,7 @@ optimizer: name: vel.optimizer.adam lr: 2.5e-4 epsilon: 1.0e-5 + max_grad_norm: 0.5 # Gradient clipping parameter scheduler: diff --git a/examples-configs/rl/atari/atari_trpo.yaml b/examples-configs/rl/atari/atari_trpo.yaml index 6b363274..df2446ab 100644 --- a/examples-configs/rl/atari/atari_trpo.yaml +++ b/examples-configs/rl/atari/atari_trpo.yaml @@ -12,40 +12,37 @@ vec_env: model: - name: vel.rl.models.stochastic_policy_model_separate + name: vel.rl.algo.trpo + + max_kl: 0.001 + cg_iters: 10 + line_search_iters: 10 + improvement_acceptance_ratio: 0.1 + cg_damping: 0.001 + vf_iters: 3 + entropy_coefficient: 0.1 + discount_factor: 0.99 + + gae_lambda: 1.00 # Generalized Advantage Estimator Lambda parameter input_block: - name: vel.modules.input.image_to_tensor + name: vel.module.input.image_to_tensor policy_backbone: - name: vel.rl.models.backbone.nature_cnn_small + name: vel.rl.backbone.nature_cnn_small input_width: 84 input_height: 84 input_channels: 4 # The same as frame_history value_backbone: - name: vel.rl.models.backbone.nature_cnn_small + name: vel.rl.backbone.nature_cnn_small input_width: 84 input_height: 84 input_channels: 4 # The same as frame_history reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.trpo - max_kl: 0.001 - cg_iters: 10 - line_search_iters: 10 - improvement_acceptance_ratio: 0.1 - cg_damping: 0.001 - vf_iters: 3 - entropy_coef: 0.1 - discount_factor: 0.99 - -# max_grad_norm: 0.5 - gae_lambda: 1.00 # Generalized Advantage Estimator Lambda parameter + name: vel.rl.reinforcer.on_policy_iteration_reinforcer env_roller: name: vel.rl.env_roller.step_env_roller @@ -58,25 +55,20 @@ reinforcer: optimizer: -# name: vel.optimizers.rmsprop -# lr: 7.0e-4 -# alpha: 0.99 -## epsilon: 1.0e-5 -# epsilon: 1.0e-3 - - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 1.0e-4 epsilon: 1.0e-3 +# max_grad_norm: 0.5 commands: train: - name: vel.rl.commands.rl_train_command + name: vel.rl.command.rl_train_command total_frames: 1.1e7 batches_per_epoch: 16 record: - name: vel.rl.commands.record_movie_command + name: vel.rl.command.record_movie_command takes: 10 videoname: 'atari_trpo_vid_{:04}.avi' frame_history: 4 @@ -84,7 +76,7 @@ commands: argmax_sampling: true evaluate: - name: vel.rl.commands.evaluate_env_command + name: vel.rl.command.evaluate_env_command takes: 100 frame_history: 4 sample_args: diff --git a/examples-configs/rl/atari/atari_a2c_lstm.yaml b/examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml similarity index 100% rename from examples-configs/rl/atari/atari_a2c_lstm.yaml rename to examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml diff --git a/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml b/examples-configs/rl/atari/purgatory/atari_a2c_tf_rmsprop.yaml similarity index 100% rename from examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml rename to examples-configs/rl/atari/purgatory/atari_a2c_tf_rmsprop.yaml diff --git a/examples-configs/rl/atari/atari_acer.yaml b/examples-configs/rl/atari/purgatory/atari_acer.yaml similarity index 100% rename from examples-configs/rl/atari/atari_acer.yaml rename to examples-configs/rl/atari/purgatory/atari_acer.yaml diff --git a/examples-configs/rl/atari/atari_acer_trust_region.yaml b/examples-configs/rl/atari/purgatory/atari_acer_trust_region.yaml similarity index 100% rename from examples-configs/rl/atari/atari_acer_trust_region.yaml rename to examples-configs/rl/atari/purgatory/atari_acer_trust_region.yaml diff --git a/examples-configs/rl/atari/atari_ppo_gru.yaml b/examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml similarity index 100% rename from examples-configs/rl/atari/atari_ppo_gru.yaml rename to examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml diff --git a/examples-configs/rl/mujoco/a2c/reacher_a2c.yaml b/examples-configs/rl/mujoco/a2c/reacher_a2c.yaml deleted file mode 100644 index f3a313ef..00000000 --- a/examples-configs/rl/mujoco/a2c/reacher_a2c.yaml +++ /dev/null @@ -1,67 +0,0 @@ -name: 'reacher_a2c' - - -env: - name: vel.rl.env.mujoco - game: 'Reacher-v2' - normalize_returns: true - - -vec_env: - name: vel.rl.vecenv.dummy - - -model: - name: vel.rl.models.stochastic_policy_model - - input_block: - name: vel.modules.input.normalize_observations - input_shape: 11 - - backbone: - name: vel.rl.models.backbone.mlp - input_length: 11 - hidden_layers: [64, 64] - activation: 'tanh' - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - - entropy_coefficient: 0.0 - value_coefficient: 0.5 - max_grad_norm: 0.5 - - gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - discount_factor: 0.99 # Discount factor for the rewards - - env_roller: - name: vel.rl.env_roller.step_env_roller - - parallel_envs: 1 # How many environments to run in parallel - number_of_steps: 2048 # How many environment steps go into a single batch - batch_size: 2048 # How many samples can go into the model once - - -optimizer: - name: vel.optimizers.adam - lr: 3.0e-4 - epsilon: 1.0e-5 - - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.0e6 - batches_per_epoch: 1 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'reacher_vid_{:04}.avi' - sample_args: - argmax_sampling: true diff --git a/examples-configs/rl/mujoco/mujoco_a2c.yaml b/examples-configs/rl/mujoco/mujoco_a2c.yaml new file mode 100644 index 00000000..266f7353 --- /dev/null +++ b/examples-configs/rl/mujoco/mujoco_a2c.yaml @@ -0,0 +1,65 @@ +name: 'mujoco_a2c' + + +env: + name: vel.rl.env.mujoco + game: !param game = 'Reacher-v2' + normalize_returns: true + + +vec_env: + name: vel.rl.vecenv.dummy + + +model: + name: vel.rl.algo.a2c + + entropy_coefficient: 0.0 + value_coefficient: 0.5 + gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter + discount_factor: 0.99 # Discount factor for the rewards + + policy: + name: vel.rl.policy.stochastic_policy + + input_block: + name: vel.module.input.normalize_observations + input_shape: 11 + + backbone: + name: vel.rl.backbone.mlp + input_length: 11 + hidden_layers: [64, 64] + activation: 'tanh' + + +reinforcer: + name: vel.rl.reinforcer.on_policy_iteration_reinforcer + + env_roller: + name: vel.rl.env_roller.step_env_roller + + parallel_envs: 1 # How many environments to run in parallel + number_of_steps: 2048 # How many environment steps go into a single batch + batch_size: 2048 # How many samples can go into the model once + + +optimizer: + name: vel.optimizer.adam + lr: 3.0e-4 + epsilon: 1.0e-5 + max_grad_norm: 0.5 + + +commands: + train: + name: vel.rl.command.rl_train_command + total_frames: 1.0e6 + batches_per_epoch: 1 + + record: + name: vel.rl.command.record_movie_command + takes: 10 + videoname: 'reacher_vid_{:04}.avi' + sample_args: + argmax_sampling: true diff --git a/examples-configs/rl/mujoco/mujoco_ppo.yaml b/examples-configs/rl/mujoco/mujoco_ppo.yaml new file mode 100644 index 00000000..a1cc2113 --- /dev/null +++ b/examples-configs/rl/mujoco/mujoco_ppo.yaml @@ -0,0 +1,77 @@ +name: 'mujoco_ppo' + + +env: + name: vel.rl.env.mujoco + game: !param game = 'Reacher-v2' + normalize_returns: true + + +vec_env: + name: vel.rl.vecenv.dummy + + +model: + name: vel.rl.algo.ppo + + entropy_coefficient: 0.0 + value_coefficient: 0.5 + + cliprange: 0.2 + + discount_factor: 0.99 # Discount factor for the rewards + gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter + + policy: + name: vel.rl.policy.stochastic_policy_separate + + input_block: + name: vel.module.input.normalize_observations + input_shape: 11 + + policy_backbone: + name: vel.rl.backbone.mlp + input_length: 11 + hidden_layers: [64, 64] + activation: 'tanh' + + value_backbone: + name: vel.rl.backbone.mlp + input_length: 11 + hidden_layers: [64, 64] + activation: 'tanh' + + +reinforcer: + name: vel.rl.reinforcer.on_policy_iteration_reinforcer + + env_roller: + name: vel.rl.env_roller.step_env_roller + + parallel_envs: 1 # How many environments to run in parallel + batch_size: 64 # How many samples can go into the model in one batch + number_of_steps: 2048 # How many environment steps go into a single batch + experience_replay: 10 # How many times to replay the experience + + +optimizer: + name: vel.optimizer.adam + lr: 3.0e-4 + epsilon: 1.0e-5 + max_grad_norm: 0.5 # Gradient clipping parameter + + +scheduler: + name: vel.scheduler.linear_batch_scaler + + +commands: + train: + name: vel.rl.command.rl_train_command + total_frames: 1.0e6 + batches_per_epoch: 1 + + record: + name: vel.rl.command.record_movie_command + takes: 10 + videoname: 'half_cheetah_vid_{:04}.avi' diff --git a/examples-configs/rl/mujoco/mujoco_trpo.yaml b/examples-configs/rl/mujoco/mujoco_trpo.yaml new file mode 100644 index 00000000..47356e0d --- /dev/null +++ b/examples-configs/rl/mujoco/mujoco_trpo.yaml @@ -0,0 +1,74 @@ +name: 'mujoco_trpo' + +env: + name: vel.rl.env.mujoco + game: !param game = 'Reacher-v2' + normalize_returns: true + + +vec_env: + name: vel.rl.vecenv.dummy + + +model: + name: vel.rl.algo.trpo + + discount_factor: 0.99 # Discount factor for the rewards + gae_lambda: 0.98 # Generalized Advantage Estimator Lambda parameter + + max_kl: 0.01 + cg_iters: 10 + line_search_iters: 10 + improvement_acceptance_ratio: 0.1 + cg_damping: 0.1 + vf_iters: 5 + entropy_coefficient: 0.0 + + input_block: + name: vel.module.input.normalize_observations + input_shape: 11 + + policy_backbone: + name: vel.rl.backbone.mlp + input_length: 11 + hidden_layers: [32, 32] + activation: 'tanh' + + value_backbone: + name: vel.rl.backbone.mlp + input_length: 11 + hidden_layers: [32, 32] + activation: 'tanh' + + +reinforcer: + name: vel.rl.reinforcer.on_policy_iteration_reinforcer + + env_roller: + name: vel.rl.env_roller.step_env_roller + + parallel_envs: 1 # How many environments to run in parallel + number_of_steps: 1024 # How many environment steps go into a single batch + batch_size: 1024 # How many samples can go into the model once + + + +optimizer: + name: vel.optimizer.adam + lr: 1.0e-3 + epsilon: 1.0e-8 + # max_grad_norm: 0.5 + + +commands: + train: + name: vel.rl.command.rl_train_command + total_frames: 1.0e6 + batches_per_epoch: 4 + + record: + name: vel.rl.command.record_movie_command + takes: 10 + videoname: 'reacher_vid_{:04}.avi' + sample_args: + argmax_sampling: true diff --git a/examples-configs/rl/mujoco/ppo/half_cheetah_ppo.yaml b/examples-configs/rl/mujoco/ppo/half_cheetah_ppo.yaml deleted file mode 100644 index 202e0d4f..00000000 --- a/examples-configs/rl/mujoco/ppo/half_cheetah_ppo.yaml +++ /dev/null @@ -1,77 +0,0 @@ -name: 'half_cheetah_ppo' - - -env: - name: vel.rl.env.mujoco - game: 'HalfCheetah-v2' - normalize_returns: true - - -vec_env: - name: vel.rl.vecenv.dummy - - -model: - name: vel.rl.models.stochastic_policy_model_separate - - input_block: - name: vel.modules.input.normalize_observations - input_shape: 17 - - policy_backbone: - name: vel.rl.models.backbone.mlp - input_length: 17 - hidden_layers: [64, 64] - activation: 'tanh' - - value_backbone: - name: vel.rl.models.backbone.mlp - input_length: 17 - hidden_layers: [64, 64] - activation: 'tanh' - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.ppo - - entropy_coefficient: 0.0 - value_coefficient: 0.5 - - cliprange: 0.2 - - max_grad_norm: 0.5 # Gradient clipping parameter - discount_factor: 0.99 # Discount factor for the rewards - gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - - env_roller: - name: vel.rl.env_roller.step_env_roller - - parallel_envs: 1 # How many environments to run in parallel - batch_size: 64 # How many samples can go into the model in one batch - number_of_steps: 2048 # How many environment steps go into a single batch - experience_replay: 10 # How many times to replay the experience - - -optimizer: - name: vel.optimizers.adam - lr: 3.0e-4 - epsilon: 1.0e-5 - - -scheduler: - name: vel.scheduler.linear_batch_scaler - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.0e6 - batches_per_epoch: 1 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'half_cheetah_vid_{:04}.avi' diff --git a/examples-configs/rl/mujoco/ppo/hopper_ppo.yaml b/examples-configs/rl/mujoco/ppo/hopper_ppo.yaml deleted file mode 100644 index 2001d0c9..00000000 --- a/examples-configs/rl/mujoco/ppo/hopper_ppo.yaml +++ /dev/null @@ -1,77 +0,0 @@ -name: 'hopper_ppo' - - -env: - name: vel.rl.env.mujoco - game: 'Hopper-v2' - normalize_returns: true - - -vec_env: - name: vel.rl.vecenv.dummy - - -model: - name: vel.rl.models.stochastic_policy_model_separate - - input_block: - name: vel.modules.input.normalize_observations - input_shape: 11 - - policy_backbone: - name: vel.rl.models.backbone.mlp - input_length: 11 - hidden_layers: [64, 64] - activation: 'tanh' - - value_backbone: - name: vel.rl.models.backbone.mlp - input_length: 11 - hidden_layers: [64, 64] - activation: 'tanh' - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.ppo - - entropy_coefficient: 0.0 - value_coefficient: 0.5 - - cliprange: 0.2 - - max_grad_norm: 0.5 # Gradient clipping parameter - discount_factor: 0.99 # Discount factor for the rewards - gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - - env_roller: - name: vel.rl.env_roller.step_env_roller - - parallel_envs: 1 # How many environments to run in parallel - batch_size: 64 # How many samples can go into the model once - number_of_steps: 2048 # How many environment steps go into a single batch - experience_replay: 10 # How many times to replay the experience - - -optimizer: - name: vel.optimizers.adam - lr: 3.0e-4 - epsilon: 1.0e-5 - - -scheduler: - name: vel.scheduler.linear_batch_scaler - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.0e6 - batches_per_epoch: 1 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'hopper_vid_{:04}.avi' diff --git a/examples-configs/rl/mujoco/ppo/reacher_ppo.yaml b/examples-configs/rl/mujoco/ppo/reacher_ppo.yaml deleted file mode 100644 index f0da8742..00000000 --- a/examples-configs/rl/mujoco/ppo/reacher_ppo.yaml +++ /dev/null @@ -1,77 +0,0 @@ -name: 'reacher_ppo' - - -env: - name: vel.rl.env.mujoco - game: 'Reacher-v2' - normalize_returns: true - - -vec_env: - name: vel.rl.vecenv.dummy - - -model: - name: vel.rl.models.stochastic_policy_model_separate - - input_block: - name: vel.modules.input.normalize_observations - input_shape: 11 - - policy_backbone: - name: vel.rl.models.backbone.mlp - input_length: 11 - hidden_layers: [64, 64] - activation: 'tanh' - - value_backbone: - name: vel.rl.models.backbone.mlp - input_length: 11 - hidden_layers: [64, 64] - activation: 'tanh' - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.ppo - - entropy_coefficient: 0.0 - value_coefficient: 0.5 - - cliprange: 0.2 - - max_grad_norm: 0.5 # Gradient clipping parameter - discount_factor: 0.99 # Discount factor for the rewards - gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - - env_roller: - name: vel.rl.env_roller.step_env_roller - - parallel_envs: 1 # How many environments to run in parallel - batch_size: 64 # How many samples can go into the model once - number_of_steps: 2048 # How many environment steps go into a single batch - experience_replay: 10 # How many times to replay the experience - - -optimizer: - name: vel.optimizers.adam - lr: 3.0e-4 - epsilon: 1.0e-5 - - -scheduler: - name: vel.scheduler.linear_batch_scaler - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.0e6 - batches_per_epoch: 1 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'reacher_vid_{:04}.avi' diff --git a/examples-configs/rl/mujoco/ppo/walker_ppo.yaml b/examples-configs/rl/mujoco/ppo/walker_ppo.yaml deleted file mode 100644 index a07777f0..00000000 --- a/examples-configs/rl/mujoco/ppo/walker_ppo.yaml +++ /dev/null @@ -1,77 +0,0 @@ -name: 'walker_ppo' - - -env: - name: vel.rl.env.mujoco - game: 'Walker2d-v2' - normalize_returns: true - - -vec_env: - name: vel.rl.vecenv.dummy - - -model: - name: vel.rl.models.stochastic_policy_model_separate - - input_block: - name: vel.modules.input.normalize_observations - input_shape: 17 - - policy_backbone: - name: vel.rl.models.backbone.mlp - input_length: 17 - hidden_layers: [64, 64] - activation: 'tanh' - - value_backbone: - name: vel.rl.models.backbone.mlp - input_length: 17 - hidden_layers: [64, 64] - activation: 'tanh' - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.ppo - - entropy_coefficient: 0.0 - value_coefficient: 0.5 - - cliprange: 0.2 - - max_grad_norm: 0.5 # Gradient clipping parameter - discount_factor: 0.99 # Discount factor for the rewards - gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - - env_roller: - name: vel.rl.env_roller.step_env_roller - - parallel_envs: 1 # How many environments to run in parallel - batch_size: 64 # How many samples can go into the model once - number_of_steps: 2048 # How many environment steps go into a single batch - experience_replay: 10 # How many times to replay the experience - - -optimizer: - name: vel.optimizers.adam - lr: 3.0e-4 - epsilon: 1.0e-5 - - -scheduler: - name: vel.scheduler.linear_batch_scaler - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.0e6 - batches_per_epoch: 1 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'walker_vid_{:04}.avi' diff --git a/examples-configs/rl/mujoco/trpo/half_cheetah_trpo.yaml b/examples-configs/rl/mujoco/trpo/half_cheetah_trpo.yaml deleted file mode 100644 index 131bb6e3..00000000 --- a/examples-configs/rl/mujoco/trpo/half_cheetah_trpo.yaml +++ /dev/null @@ -1,81 +0,0 @@ -name: 'half_cheetah_trpo' - -env: - name: vel.rl.env.mujoco - game: 'HalfCheetah-v2' - normalize_returns: true - - -vec_env: - name: vel.rl.vecenv.dummy - - -model: - name: vel.rl.models.stochastic_policy_model_separate - - input_block: - name: vel.modules.input.normalize_observations - input_shape: 17 - - policy_backbone: - name: vel.rl.models.backbone.mlp - input_length: 17 - hidden_layers: [32, 32] - activation: 'tanh' - - value_backbone: - name: vel.rl.models.backbone.mlp - input_length: 17 - hidden_layers: [32, 32] - activation: 'tanh' - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.trpo - discount_factor: 0.99 # Discount factor for the rewards - gae_lambda: 0.98 # Generalized Advantage Estimator Lambda parameter - - max_kl: 0.01 - cg_iters: 10 - line_search_iters: 10 - improvement_acceptance_ratio: 0.1 - cg_damping: 0.1 - vf_iters: 5 - entropy_coef: 0.0 -# max_grad_norm: 0.5 - - env_roller: - name: vel.rl.env_roller.step_env_roller - - parallel_envs: 1 # How many environments to run in parallel - number_of_steps: 1024 # How many environment steps go into a single batch - batch_size: 1024 # How many samples can go into the model once -# experience_replay: 10 # How many times to replay the experience - - - -optimizer: - name: vel.optimizers.adam - lr: 1.0e-3 - epsilon: 1.0e-8 - - -#scheduler: -# name: vel.scheduler.linear_batch_scaler - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.0e6 - batches_per_epoch: 4 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'reacher_vid_{:04}.avi' - sample_args: - argmax_sampling: true diff --git a/examples-configs/rl/mujoco/trpo/hopper_trpo.yaml b/examples-configs/rl/mujoco/trpo/hopper_trpo.yaml deleted file mode 100644 index 41444d79..00000000 --- a/examples-configs/rl/mujoco/trpo/hopper_trpo.yaml +++ /dev/null @@ -1,82 +0,0 @@ -name: 'hopper_trpo' - -env: - name: vel.rl.env.mujoco - game: 'Hopper-v2' - normalize_returns: true - - -vec_env: - name: vel.rl.vecenv.dummy - - -model: - name: vel.rl.models.policy_gradient_model_separate - - input_block: - name: vel.modules.input.normalize_observations - input_shape: 17 - - policy_backbone: - name: vel.rl.models.backbone.mlp - input_length: 11 - hidden_layers: [32, 32] - activation: 'tanh' - - value_backbone: - name: vel.rl.models.backbone.mlp - input_length: 11 - hidden_layers: [32, 32] - activation: 'tanh' - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.trpo - max_kl: 0.01 - cg_iters: 10 - line_search_iters: 10 - improvement_acceptance_ratio: 0.1 - cg_damping: 0.1 - vf_iters: 5 - entropy_coef: 0.0 -# max_grad_norm: 0.5 - - env_roller: - name: vel.rl.env_roller.vec.step_env_roller - gae_lambda: 0.98 # Generalized Advantage Estimator Lambda parameter - - - parallel_envs: 1 # How many environments to run in parallel - batch_size: 1024 # How many samples can go into the model once - number_of_steps: 1024 # How many environment steps go into a single batch -# experience_replay: 10 # How many times to replay the experience - - discount_factor: 0.99 # Discount factor for the rewards - - -optimizer: - name: vel.optimizers.adam - lr: 0.001 - epsilon: 1.0e-8 - - -#scheduler: -# name: vel.scheduler.linear_batch_scaler - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.0e6 - batches_per_epoch: 2 - openai_logging: true - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'reacher_vid_{:04}.avi' - sample_args: - argmax_sampling: true diff --git a/examples-configs/rl/mujoco/trpo/reacher_trpo.yaml b/examples-configs/rl/mujoco/trpo/reacher_trpo.yaml deleted file mode 100644 index 65aa7bc0..00000000 --- a/examples-configs/rl/mujoco/trpo/reacher_trpo.yaml +++ /dev/null @@ -1,81 +0,0 @@ -name: 'reacher_trpo' - -env: - name: vel.rl.env.mujoco - game: 'Reacher-v2' - normalize_returns: true - - -vec_env: - name: vel.rl.vecenv.dummy - - -model: - name: vel.rl.models.policy_gradient_model_separate - - input_block: - name: vel.modules.input.normalize_observations - input_shape: 17 - - policy_backbone: - name: vel.rl.models.backbone.mlp - input_length: 11 - hidden_layers: [32, 32] - activation: 'tanh' - - value_backbone: - name: vel.rl.models.backbone.mlp - input_length: 11 - hidden_layers: [32, 32] - activation: 'tanh' - - -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.trpo - max_kl: 0.01 - cg_iters: 10 - line_search_iters: 10 - improvement_acceptance_ratio: 0.1 - cg_damping: 0.1 - vf_iters: 5 - entropy_coef: 0.0 -# max_grad_norm: 0.5 - - env_roller: - name: vel.rl.env_roller.vec.step_env_roller - gae_lambda: 0.98 # Generalized Advantage Estimator Lambda parameter - - parallel_envs: 1 # How many environments to run in parallel - batch_size: 1024 # How many samples can go into the model once - number_of_steps: 1024 # How many environment steps go into a single batch -# experience_replay: 10 # How many times to replay the experience - - discount_factor: 0.99 # Discount factor for the rewards - - -optimizer: - name: vel.optimizers.adam - lr: 0.001 - epsilon: 1.0e-8 - - -#scheduler: -# name: vel.scheduler.linear_batch_scaler - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.0e6 - batches_per_epoch: 2 - openai_logging: true - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'reacher_vid_{:04}.avi' - sample_args: - argmax_sampling: true diff --git a/vel/model/rnn/multilayer_rnn_sequence_classification.py b/vel/model/rnn/multilayer_rnn_sequence_classification.py index 4c724c76..d19f40f3 100644 --- a/vel/model/rnn/multilayer_rnn_sequence_classification.py +++ b/vel/model/rnn/multilayer_rnn_sequence_classification.py @@ -13,7 +13,7 @@ class MultilayerRnnSequenceClassification(LossFunctionModel): - """ Multilayer GRU network for sequence modeling (n:1) """ + """ Multilayer RNN network for sequence modeling (n:1) """ def __init__(self, input_block: LinearBackboneModel, rnn_type: str, output_dim: int, rnn_layers: typing.List[int], rnn_dropout: float = 0.0, bidirectional: bool = False, diff --git a/vel/model/rnn/multilayer_rnn_sequence_model.py b/vel/model/rnn/multilayer_rnn_sequence_model.py index 2e90c2d3..959741a5 100644 --- a/vel/model/rnn/multilayer_rnn_sequence_model.py +++ b/vel/model/rnn/multilayer_rnn_sequence_model.py @@ -9,7 +9,7 @@ class MultilayerRnnSequenceModel(LossFunctionModel): - """ Multilayer GRU network for sequence modeling (n:n) """ + """ Multilayer RNN network for sequence modeling (n:n) """ def __init__(self, input_block: LinearBackboneModel, rnn_type: str, hidden_layers: typing.List[int], output_dim: int, dropout: float = 0.0): diff --git a/vel/openai/baselines/common/atari_wrappers.py b/vel/openai/baselines/common/atari_wrappers.py index a5a7fa9a..3b3540b7 100644 --- a/vel/openai/baselines/common/atari_wrappers.py +++ b/vel/openai/baselines/common/atari_wrappers.py @@ -4,6 +4,8 @@ from gym import spaces import cv2 cv2.ocl.setUseOpenCL(False) +from .wrappers import TimeLimit + class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): @@ -174,27 +176,60 @@ def reward(self, reward): """Bin reward to {+1, 0, -1} by its sign.""" return np.sign(reward) + class WarpFrame(gym.ObservationWrapper): - def __init__(self, env, width=84, height=84, grayscale=True): - """Warp frames to 84x84 as done in the Nature paper and later work.""" - gym.ObservationWrapper.__init__(self, env) - self.width = width - self.height = height - self.grayscale = grayscale - if self.grayscale: - self.observation_space = spaces.Box(low=0, high=255, - shape=(self.height, self.width, 1), dtype=np.uint8) + def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None): + """ + Warp frames to 84x84 as done in the Nature paper and later work. + + If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which + observation should be warped. + """ + super().__init__(env) + self._width = width + self._height = height + self._grayscale = grayscale + self._key = dict_space_key + if self._grayscale: + num_colors = 1 + else: + num_colors = 3 + + new_space = gym.spaces.Box( + low=0, + high=255, + shape=(self._height, self._width, num_colors), + dtype=np.uint8, + ) + if self._key is None: + original_space = self.observation_space + self.observation_space = new_space else: - self.observation_space = spaces.Box(low=0, high=255, - shape=(self.height, self.width, 3), dtype=np.uint8) + original_space = self.observation_space.spaces[self._key] + self.observation_space.spaces[self._key] = new_space + assert original_space.dtype == np.uint8 and len(original_space.shape) == 3 - def observation(self, frame): - if self.grayscale: + def observation(self, obs): + if self._key is None: + frame = obs + else: + frame = obs[self._key] + + if self._grayscale: frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) - frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA) - if self.grayscale: + frame = cv2.resize( + frame, (self._width, self._height), interpolation=cv2.INTER_AREA + ) + if self._grayscale: frame = np.expand_dims(frame, -1) - return frame + + if self._key is None: + obs = frame + else: + obs = obs.copy() + obs[self._key] = frame + return obs + class FrameStack(gym.Wrapper): def __init__(self, env, k): @@ -265,16 +300,15 @@ def __len__(self): return len(self._force()) def __getitem__(self, i): - return self._force()[i] + return self._force()[..., i] -def make_atari(env_id, timelimit=True): - # XXX(john): remove timelimit argument after gym is upgraded to allow double wrapping +def make_atari(env_id, max_episode_steps=None): env = gym.make(env_id) - if not timelimit: - env = env.env assert 'NoFrameskip' in env.spec.id env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) + if max_episode_steps is not None: + env = TimeLimit(env, max_episode_steps=max_episode_steps) return env def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False): diff --git a/vel/openai/baselines/common/retro_wrappers.py b/vel/openai/baselines/common/retro_wrappers.py index 6e8fe912..badbbdd6 100644 --- a/vel/openai/baselines/common/retro_wrappers.py +++ b/vel/openai/baselines/common/retro_wrappers.py @@ -1,28 +1,12 @@ - # flake8: noqa F403, F405 -from .atari_wrappers import * +from collections import deque +import cv2 +cv2.ocl.setUseOpenCL(False) +from .atari_wrappers import WarpFrame, ClipRewardEnv, FrameStack, ScaledFloatFrame +from .wrappers import TimeLimit import numpy as np import gym -class TimeLimit(gym.Wrapper): - def __init__(self, env, max_episode_steps=None): - super(TimeLimit, self).__init__(env) - self._max_episode_steps = max_episode_steps - self._elapsed_steps = 0 - - def step(self, ac): - observation, reward, done, info = self.env.step(ac) - self._elapsed_steps += 1 - if self._elapsed_steps >= self._max_episode_steps: - done = True - info['TimeLimit.truncated'] = True - return observation, reward, done, info - - def reset(self, **kwargs): - self._elapsed_steps = 0 - return self.env.reset(**kwargs) - - class StochasticFrameSkip(gym.Wrapper): def __init__(self, env, n, stickprob): gym.Wrapper.__init__(self, env) @@ -61,7 +45,6 @@ def step(self, ac): def seed(self, s): self.rng.seed(s) - class PartialFrameStack(gym.Wrapper): def __init__(self, env, k, channel=1): """ @@ -71,8 +54,8 @@ def __init__(self, env, k, channel=1): shp = env.observation_space.shape self.channel = channel self.observation_space = gym.spaces.Box(low=0, high=255, - shape=(shp[0], shp[1], shp[2] + k - 1), - dtype=env.observation_space.dtype) + shape=(shp[0], shp[1], shp[2] + k - 1), + dtype=env.observation_space.dtype) self.k = k self.frames = deque([], maxlen=k) shp = env.observation_space.shape @@ -92,8 +75,7 @@ def step(self, ac): def _get_ob(self): assert len(self.frames) == self.k return np.concatenate([frame if i==self.k-1 else frame[:,:,self.channel:self.channel+1] - for (i, frame) in enumerate(self.frames)], axis=2) - + for (i, frame) in enumerate(self.frames)], axis=2) class Downsample(gym.ObservationWrapper): def __init__(self, env, ratio): @@ -103,10 +85,8 @@ def __init__(self, env, ratio): gym.ObservationWrapper.__init__(self, env) (oldh, oldw, oldc) = env.observation_space.shape newshape = (oldh//ratio, oldw//ratio, oldc) - self.observation_space = spaces.Box( - low=0, high=255, - shape=newshape, dtype=np.uint8 - ) + self.observation_space = gym.spaces.Box(low=0, high=255, + shape=newshape, dtype=np.uint8) def observation(self, frame): height, width, _ = self.observation_space.shape @@ -115,7 +95,6 @@ def observation(self, frame): frame = frame[:,:,None] return frame - class Rgb2gray(gym.ObservationWrapper): def __init__(self, env): """ @@ -123,10 +102,8 @@ def __init__(self, env): """ gym.ObservationWrapper.__init__(self, env) (oldh, oldw, _oldc) = env.observation_space.shape - self.observation_space = spaces.Box( - low=0, high=255, - shape=(oldh, oldw, 1), dtype=np.uint8 - ) + self.observation_space = gym.spaces.Box(low=0, high=255, + shape=(oldh, oldw, 1), dtype=np.uint8) def observation(self, frame): frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) @@ -148,7 +125,6 @@ def reset(self): self.epcount += 1 return self.env.reset() - class AppendTimeout(gym.Wrapper): def __init__(self, env): gym.Wrapper.__init__(self, env) @@ -165,7 +141,7 @@ def __init__(self, env): self.observation_space = gym.spaces.Dict({ 'original': self.original_os, 'value_estimation_timeout': self.timeout_space - }) + }) self.dict_mode = False self.ac_count = None while 1: @@ -191,7 +167,6 @@ def _process(self, ob): else: return { 'original': ob, 'value_estimation_timeout': fracmissing } - class StartDoingRandomActionsWrapper(gym.Wrapper): """ Warning: can eat info dicts, not good if you depend on them @@ -224,28 +199,28 @@ def step(self, a): self.some_random_steps() return self.last_obs, rew, done, info - -def make_retro(*, game, state, max_episode_steps, **kwargs): +def make_retro(*, game, state=None, max_episode_steps=4500, **kwargs): import retro + if state is None: + state = retro.State.DEFAULT env = retro.make(game, state, **kwargs) env = StochasticFrameSkip(env, n=4, stickprob=0.25) if max_episode_steps is not None: env = TimeLimit(env, max_episode_steps=max_episode_steps) return env - def wrap_deepmind_retro(env, scale=True, frame_stack=4): """ Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind """ env = WarpFrame(env) env = ClipRewardEnv(env) - env = FrameStack(env, frame_stack) + if frame_stack > 1: + env = FrameStack(env, frame_stack) if scale: env = ScaledFloatFrame(env) return env - class SonicDiscretizer(gym.ActionWrapper): """ Wrap a gym-retro environment and make it use discrete @@ -267,7 +242,6 @@ def __init__(self, env): def action(self, a): # pylint: disable=W0221 return self._actions[a].copy() - class RewardScaler(gym.RewardWrapper): """ Bring rewards to a reasonable scale for PPO. @@ -281,7 +255,6 @@ def __init__(self, env, scale=0.01): def reward(self, reward): return reward * self.scale - class AllowBacktracking(gym.Wrapper): """ Use deltas in max(X) as the reward, rather than deltas diff --git a/vel/openai/baselines/common/running_mean_std.py b/vel/openai/baselines/common/running_mean_std.py index a8daae81..fb891589 100644 --- a/vel/openai/baselines/common/running_mean_std.py +++ b/vel/openai/baselines/common/running_mean_std.py @@ -15,18 +15,20 @@ def update(self, x): self.update_from_moments(batch_mean, batch_var, batch_count) def update_from_moments(self, batch_mean, batch_var, batch_count): - delta = batch_mean - self.mean - tot_count = self.count + batch_count + self.mean, self.var, self.count = update_mean_var_count_from_moments( + self.mean, self.var, self.count, batch_mean, batch_var, batch_count) - new_mean = self.mean + delta * batch_count / tot_count - m_a = self.var * self.count - m_b = batch_var * batch_count - M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count) - new_var = M2 / (self.count + batch_count) - new_count = batch_count + self.count +def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count): + delta = batch_mean - mean + tot_count = count + batch_count - self.mean = new_mean - self.var = new_var - self.count = new_count + new_mean = mean + delta * batch_count / tot_count + m_a = var * count + m_b = batch_var * batch_count + M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count + new_var = M2 / tot_count + new_count = tot_count + + return new_mean, new_var, new_count diff --git a/vel/openai/baselines/common/vec_env/__init__.py b/vel/openai/baselines/common/vec_env/__init__.py index 0a124cb9..a0d5a348 100644 --- a/vel/openai/baselines/common/vec_env/__init__.py +++ b/vel/openai/baselines/common/vec_env/__init__.py @@ -1,4 +1,7 @@ +import contextlib +import os from abc import ABC, abstractmethod + from vel.openai.baselines.common.tile_images import tile_images @@ -135,7 +138,6 @@ def get_viewer(self): self.viewer = rendering.SimpleImageViewer() return self.viewer - class VecEnvWrapper(VecEnv): """ An environment wrapper that applies to an entire batch @@ -144,8 +146,7 @@ class VecEnvWrapper(VecEnv): def __init__(self, venv, observation_space=None, action_space=None): self.venv = venv - VecEnv.__init__(self, - num_envs=venv.num_envs, + super().__init__(num_envs=venv.num_envs, observation_space=observation_space or venv.observation_space, action_space=action_space or venv.action_space) @@ -169,6 +170,25 @@ def render(self, mode='human'): def get_images(self): return self.venv.get_images() + def __getattr__(self, name): + if name.startswith('_'): + raise AttributeError("attempted to get missing private attribute '{}'".format(name)) + return getattr(self.venv, name) + + +class VecEnvObservationWrapper(VecEnvWrapper): + @abstractmethod + def process(self, obs): + pass + + def reset(self): + obs = self.venv.reset() + return self.process(obs) + + def step_wait(self): + obs, rews, dones, infos = self.venv.step_wait() + return self.process(obs), rews, dones, infos + class CloudpickleWrapper(object): """ @@ -185,3 +205,22 @@ def __getstate__(self): def __setstate__(self, ob): import pickle self.x = pickle.loads(ob) + + +@contextlib.contextmanager +def clear_mpi_env_vars(): + """ + from mpi4py import MPI will call MPI_Init by default. If the child process has MPI environment variables, MPI will think that the child process is an MPI process just like the parent and do bad things such as hang. + This context manager is a hacky way to clear those environment variables temporarily such as when we are starting multiprocessing + Processes. + """ + removed_environment = {} + for k, v in list(os.environ.items()): + for prefix in ['OMPI_', 'PMI_']: + if k.startswith(prefix): + removed_environment[k] = v + del os.environ[k] + try: + yield + finally: + os.environ.update(removed_environment) diff --git a/vel/openai/baselines/common/vec_env/dummy_vec_env.py b/vel/openai/baselines/common/vec_env/dummy_vec_env.py index 4f5c106f..ea21f130 100644 --- a/vel/openai/baselines/common/vec_env/dummy_vec_env.py +++ b/vel/openai/baselines/common/vec_env/dummy_vec_env.py @@ -1,8 +1,8 @@ import numpy as np -from gym import spaces from . import VecEnv from .util import copy_obs_dict, dict_to_obs, obs_space_info + class DummyVecEnv(VecEnv): """ VecEnv that does runs multiple environments sequentially, that is, @@ -13,6 +13,7 @@ class DummyVecEnv(VecEnv): def __init__(self, env_fns): """ Arguments: + env_fns: iterable of callables functions that build environments """ self.envs = [fn() for fn in env_fns] @@ -26,7 +27,7 @@ def __init__(self, env_fns): self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32) self.buf_infos = [{} for _ in range(self.num_envs)] self.actions = None - self.specs = [e.spec for e in self.envs] + self.spec = self.envs[0].spec def step_async(self, actions): listify = True @@ -45,8 +46,8 @@ def step_async(self, actions): def step_wait(self): for e in range(self.num_envs): action = self.actions[e] - if isinstance(self.envs[e].action_space, spaces.Discrete): - action = int(action) + # if isinstance(self.envs[e].action_space, spaces.Discrete): + # action = int(action) obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(action) if self.buf_dones[e]: diff --git a/vel/openai/baselines/common/vec_env/shmem_vec_env.py b/vel/openai/baselines/common/vec_env/shmem_vec_env.py index 4d941043..fcee5ad5 100644 --- a/vel/openai/baselines/common/vec_env/shmem_vec_env.py +++ b/vel/openai/baselines/common/vec_env/shmem_vec_env.py @@ -2,12 +2,12 @@ An interface for asynchronous vectorized environments. """ -from multiprocessing import Pipe, Array, Process - +import multiprocessing as mp import numpy as np from . import VecEnv, CloudpickleWrapper import ctypes from vel.openai.baselines import logger +from . import clear_mpi_env_vars from .util import dict_to_obs, obs_space_info, obs_to_dict @@ -23,11 +23,12 @@ class ShmemVecEnv(VecEnv): Optimized version of SubprocVecEnv that uses shared variables to communicate observations. """ - def __init__(self, env_fns, spaces=None): + def __init__(self, env_fns, spaces=None, context='spawn'): """ If you don't specify observation_space, we'll have to create a dummy environment to get it. """ + ctx = mp.get_context(context) if spaces: observation_space, action_space = spaces else: @@ -40,22 +41,24 @@ def __init__(self, env_fns, spaces=None): VecEnv.__init__(self, len(env_fns), observation_space, action_space) self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space) self.obs_bufs = [ - {k: Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys} + {k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in + self.obs_keys} for _ in env_fns] self.parent_pipes = [] self.procs = [] - for env_fn, obs_buf in zip(env_fns, self.obs_bufs): - wrapped_fn = CloudpickleWrapper(env_fn) - parent_pipe, child_pipe = Pipe() - proc = Process(target=_subproc_worker, - args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys)) - proc.daemon = True - self.procs.append(proc) - self.parent_pipes.append(parent_pipe) - proc.start() - child_pipe.close() + with clear_mpi_env_vars(): + for env_fn, obs_buf in zip(env_fns, self.obs_bufs): + wrapped_fn = CloudpickleWrapper(env_fn) + parent_pipe, child_pipe = ctx.Pipe() + proc = ctx.Process(target=_subproc_worker, + args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, + self.obs_dtypes, self.obs_keys)) + proc.daemon = True + self.procs.append(proc) + self.parent_pipes.append(parent_pipe) + proc.start() + child_pipe.close() self.waiting_step = False - self.specs = [f().spec for f in env_fns] self.viewer = None def reset(self): @@ -70,9 +73,11 @@ def step_async(self, actions): assert len(actions) == len(self.parent_pipes) for pipe, act in zip(self.parent_pipes, actions): pipe.send(('step', act)) + self.waiting_step = True def step_wait(self): outs = [pipe.recv() for pipe in self.parent_pipes] + self.waiting_step = False obs, rews, dones, infos = zip(*outs) return self._decode_obses(obs), np.array(rews), np.array(dones), infos @@ -95,18 +100,17 @@ def get_images(self, mode='human'): def _decode_obses(self, obs): result = {} for k in self.obs_keys: - bufs = [b[k] for b in self.obs_bufs] o = [np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(self.obs_shapes[k]) for b in bufs] result[k] = np.array(o) return dict_to_obs(result) - def _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys): """ Control a single environment instance using IPC and shared memory. """ + def _write_obs(maybe_dict_obs): flatdict = obs_to_dict(maybe_dict_obs) for k in keys: diff --git a/vel/openai/baselines/common/vec_env/subproc_vec_env.py b/vel/openai/baselines/common/vec_env/subproc_vec_env.py index fe46ff06..a5b72ace 100644 --- a/vel/openai/baselines/common/vec_env/subproc_vec_env.py +++ b/vel/openai/baselines/common/vec_env/subproc_vec_env.py @@ -1,6 +1,7 @@ +import multiprocessing as mp + import numpy as np -from multiprocessing import Process, Pipe -from . import VecEnv, CloudpickleWrapper +from . import VecEnv, CloudpickleWrapper, clear_mpi_env_vars def worker(remote, parent_remote, env_fn_wrapper): @@ -22,8 +23,8 @@ def worker(remote, parent_remote, env_fn_wrapper): elif cmd == 'close': remote.close() break - elif cmd == 'get_spaces': - remote.send((env.observation_space, env.action_space)) + elif cmd == 'get_spaces_spec': + remote.send((env.observation_space, env.action_space, env.spec)) else: raise NotImplementedError except KeyboardInterrupt: @@ -37,7 +38,7 @@ class SubprocVecEnv(VecEnv): VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes. Recommended to use when num_envs > 1 and step() can be a bottleneck. """ - def __init__(self, env_fns, spaces=None): + def __init__(self, env_fns, spaces=None, context='spawn'): """ Arguments: @@ -46,19 +47,20 @@ def __init__(self, env_fns, spaces=None): self.waiting = False self.closed = False nenvs = len(env_fns) - self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)]) - self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) + ctx = mp.get_context(context) + self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(nenvs)]) + self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = True # if the main process crashes, we should not cause things to hang - p.start() + with clear_mpi_env_vars(): + p.start() for remote in self.work_remotes: remote.close() - self.remotes[0].send(('get_spaces', None)) - observation_space, action_space = self.remotes[0].recv() + self.remotes[0].send(('get_spaces_spec', None)) + observation_space, action_space, self.spec = self.remotes[0].recv() self.viewer = None - self.specs = [f().spec for f in env_fns] VecEnv.__init__(self, len(env_fns), observation_space, action_space) def step_async(self, actions): @@ -100,16 +102,16 @@ def get_images(self): def _assert_not_closed(self): assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()" + def __del__(self): + if not self.closed: + self.close() def _flatten_obs(obs): - assert isinstance(obs, list) or isinstance(obs, tuple) + assert isinstance(obs, (list, tuple)) assert len(obs) > 0 if isinstance(obs[0], dict): - import collections - assert isinstance(obs, collections.OrderedDict) keys = obs[0].keys() return {k: np.stack([o[k] for o in obs]) for k in keys} else: return np.stack(obs) - diff --git a/vel/openai/baselines/common/wrappers.py b/vel/openai/baselines/common/wrappers.py new file mode 100644 index 00000000..95919264 --- /dev/null +++ b/vel/openai/baselines/common/wrappers.py @@ -0,0 +1,31 @@ +import gym + + +class TimeLimit(gym.Wrapper): + def __init__(self, env, max_episode_steps=None): + super(TimeLimit, self).__init__(env) + self._max_episode_steps = max_episode_steps + self._elapsed_steps = 0 + + def step(self, ac): + observation, reward, done, info = self.env.step(ac) + self._elapsed_steps += 1 + if self._elapsed_steps >= self._max_episode_steps: + done = True + info['TimeLimit.truncated'] = True + return observation, reward, done, info + + def reset(self, **kwargs): + self._elapsed_steps = 0 + return self.env.reset(**kwargs) + + +class ClipActionsWrapper(gym.Wrapper): + def step(self, action): + import numpy as np + action = np.nan_to_num(action) + action = np.clip(action, self.action_space.low, self.action_space.high) + return self.env.step(action) + + def reset(self, **kwargs): + return self.env.reset(**kwargs) diff --git a/vel/openai/baselines/logger.py b/vel/openai/baselines/logger.py index 05eab9ac..e92776ca 100644 --- a/vel/openai/baselines/logger.py +++ b/vel/openai/baselines/logger.py @@ -7,6 +7,7 @@ import datetime import tempfile from collections import defaultdict +from contextlib import contextmanager DEBUG = 10 INFO = 20 @@ -37,8 +38,8 @@ def writekvs(self, kvs): # Create strings for printing key2str = {} for (key, val) in sorted(kvs.items()): - if isinstance(val, float): - valstr = '%-8.3g' % (val,) + if hasattr(val, '__float__'): + valstr = '%-8.3g' % val else: valstr = str(val) key2str[self._truncate(key)] = self._truncate(valstr) @@ -68,7 +69,8 @@ def writekvs(self, kvs): self.file.flush() def _truncate(self, s): - return s[:20] + '...' if len(s) > 23 else s + maxlen = 30 + return s[:maxlen-3] + '...' if len(s) > maxlen else s def writeseq(self, seq): seq = list(seq) @@ -90,7 +92,6 @@ def __init__(self, filename): def writekvs(self, kvs): for k, v in sorted(kvs.items()): if hasattr(v, 'dtype'): - v = v.tolist() kvs[k] = float(v) self.file.write(json.dumps(kvs) + '\n') self.file.flush() @@ -195,13 +196,13 @@ def logkv(key, val): Call this once for each diagnostic quantity, each iteration If called many times, last value will be used. """ - Logger.CURRENT.logkv(key, val) + get_current().logkv(key, val) def logkv_mean(key, val): """ The same as logkv(), but if called many times, values averaged. """ - Logger.CURRENT.logkv_mean(key, val) + get_current().logkv_mean(key, val) def logkvs(d): """ @@ -213,21 +214,18 @@ def logkvs(d): def dumpkvs(): """ Write all of the diagnostics from the current iteration - - level: int. (see logger.py docs) If the global logger level is higher than - the level argument here, don't print to stdout. """ - Logger.CURRENT.dumpkvs() + return get_current().dumpkvs() def getkvs(): - return Logger.CURRENT.name2val + return get_current().name2val def log(*args, level=INFO): """ Write the sequence of args, with no separators, to the console and output files (if you've configured an output file). """ - Logger.CURRENT.log(*args, level=level) + get_current().log(*args, level=level) def debug(*args): log(*args, level=DEBUG) @@ -246,30 +244,29 @@ def set_level(level): """ Set logging threshold on current logger. """ - Logger.CURRENT.set_level(level) + get_current().set_level(level) + +def set_comm(comm): + get_current().set_comm(comm) def get_dir(): """ Get directory that log files are being written to. will be None if there is no output directory (i.e., if you didn't call start) """ - return Logger.CURRENT.get_dir() + return get_current().get_dir() record_tabular = logkv dump_tabular = dumpkvs -class ProfileKV: - """ - Usage: - with logger.ProfileKV("interesting_scope"): - code - """ - def __init__(self, n): - self.n = "wait_" + n - def __enter__(self): - self.t1 = time.time() - def __exit__(self ,type, value, traceback): - Logger.CURRENT.name2val[self.n] += time.time() - self.t1 +@contextmanager +def profile_kv(scopename): + logkey = 'wait_' + scopename + tstart = time.time() + try: + yield + finally: + get_current().name2val[logkey] += time.time() - tstart def profile(n): """ @@ -279,7 +276,7 @@ def my_func(): code """ def decorator_with_name(func): def func_wrapper(*args, **kwargs): - with ProfileKV(n): + with profile_kv(n): return func(*args, **kwargs) return func_wrapper return decorator_with_name @@ -289,17 +286,25 @@ def func_wrapper(*args, **kwargs): # Backend # ================================================================ +def get_current(): + if Logger.CURRENT is None: + _configure_default_logger() + + return Logger.CURRENT + + class Logger(object): DEFAULT = None # A logger with no output files. (See right below class definition) # So that you can still log to the terminal without setting up any output files CURRENT = None # Current logger being used by the free functions above - def __init__(self, dir, output_formats): + def __init__(self, dir, output_formats, comm=None): self.name2val = defaultdict(float) # values this iteration self.name2cnt = defaultdict(int) self.level = INFO self.dir = dir self.output_formats = output_formats + self.comm = comm # Logging API, forwarded # ---------------------------------------- @@ -307,20 +312,19 @@ def logkv(self, key, val): self.name2val[key] = val def logkv_mean(self, key, val): - if val is None: - self.name2val[key] = None - return oldval, cnt = self.name2val[key], self.name2cnt[key] self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1) self.name2cnt[key] = cnt + 1 def dumpkvs(self): - if self.level == DISABLED: return + d = self.name2val + out = d.copy() # Return the dict for unit testing purposes for fmt in self.output_formats: if isinstance(fmt, KVWriter): - fmt.writekvs(self.name2val) + fmt.writekvs(d) self.name2val.clear() self.name2cnt.clear() + return out def log(self, *args, level=INFO): if self.level <= level: @@ -331,6 +335,9 @@ def log(self, *args, level=INFO): def set_level(self, level): self.level = level + def set_comm(self, comm): + self.comm = comm + def get_dir(self): return self.dir @@ -345,7 +352,19 @@ def _do_log(self, args): if isinstance(fmt, SeqWriter): fmt.writeseq(map(str, args)) -def configure(dir=None, format_strs=None): +def get_rank_without_mpi_import(): + # check environment variables here instead of importing mpi4py + # to avoid calling MPI_Init() when this module is imported + for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']: + if varname in os.environ: + return int(os.environ[varname]) + return 0 + + +def configure(dir=None, format_strs=None, comm=None, log_suffix=''): + """ + If comm is provided, average all numerical stats across that comm + """ if dir is None: dir = os.getenv('OPENAI_LOGDIR') if dir is None: @@ -354,15 +373,9 @@ def configure(dir=None, format_strs=None): assert isinstance(dir, str) os.makedirs(dir, exist_ok=True) - log_suffix = '' - rank = 0 - # check environment variables here instead of importing mpi4py - # to avoid calling MPI_Init() when this module is imported - for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']: - if varname in os.environ: - rank = int(os.environ[varname]) + rank = get_rank_without_mpi_import() if rank > 0: - log_suffix = "-rank%03i" % rank + log_suffix = log_suffix + "-rank%03i" % rank if format_strs is None: if rank == 0: @@ -372,15 +385,11 @@ def configure(dir=None, format_strs=None): format_strs = filter(None, format_strs) output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs] - Logger.CURRENT = Logger(dir=dir, output_formats=output_formats) - # log('Logging to %s'%dir) + Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm) + log('Logging to %s'%dir) def _configure_default_logger(): - format_strs = None - # keep the old default of only writing to stdout - if 'OPENAI_LOG_FORMAT' not in os.environ: - format_strs = ['stdout'] - configure(format_strs=format_strs) + configure() Logger.DEFAULT = Logger.CURRENT def reset(): @@ -389,17 +398,15 @@ def reset(): Logger.CURRENT = Logger.DEFAULT log('Reset logger') -class scoped_configure(object): - def __init__(self, dir=None, format_strs=None): - self.dir = dir - self.format_strs = format_strs - self.prevlogger = None - def __enter__(self): - self.prevlogger = Logger.CURRENT - configure(dir=self.dir, format_strs=self.format_strs) - def __exit__(self, *args): +@contextmanager +def scoped_configure(dir=None, format_strs=None, comm=None): + prevlogger = Logger.CURRENT + configure(dir=dir, format_strs=format_strs, comm=comm) + try: + yield + finally: Logger.CURRENT.close() - Logger.CURRENT = self.prevlogger + Logger.CURRENT = prevlogger # ================================================================ @@ -423,7 +430,7 @@ def _demo(): logkv_mean("b", -44.4) logkv("a", 5.5) dumpkvs() - info("^^^ should see b = 33.3") + info("^^^ should see b = -33.3") logkv("b", -2.5) dumpkvs() @@ -456,7 +463,6 @@ def read_tb(path): import pandas import numpy as np from glob import glob - from collections import defaultdict import tensorflow as tf if osp.isdir(path): fnames = glob(osp.join(path, "events.*")) @@ -482,8 +488,5 @@ def read_tb(path): data[step-1, colidx] = value return pandas.DataFrame(data, columns=tags) -# configure the default logger on import -_configure_default_logger() - if __name__ == "__main__": _demo() diff --git a/vel/rl/algo/a2c.py b/vel/rl/algo/a2c.py new file mode 100644 index 00000000..69b7926d --- /dev/null +++ b/vel/rl/algo/a2c.py @@ -0,0 +1,142 @@ +import torch +import torch.nn.functional as F + +from vel.metric.base import AveragingNamedMetric +from vel.calc.function import explained_variance +from vel.api import BackboneModel, ModelFactory, BatchInfo + +from vel.rl.api import RlPolicy, Rollout, Trajectories +from vel.rl.discount_bootstrap import discount_bootstrap_gae + + +class A2C(RlPolicy): + """ Simplest policy gradient - calculate loss as an advantage of an actor versus value function """ + def __init__(self, policy: BackboneModel, entropy_coefficient, value_coefficient, discount_factor: float, + gae_lambda=1.0): + super().__init__(discount_factor) + + self.entropy_coefficient = entropy_coefficient + self.value_coefficient = value_coefficient + self.gae_lambda = gae_lambda + + self.policy = policy + + def reset_weights(self): + """ Initialize properly model weights """ + self.policy.reset_weights() + + def forward(self, observation): + """ Calculate model outputs """ + return self.policy(observation) + + def act(self, observation, state=None, deterministic=False): + """ Select actions based on model's output """ + action_pd_params, value_output = self(observation) + actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + + # log likelihood of selected action + logprobs = self.policy.action_head.logprob(actions, action_pd_params) + + return { + 'actions': actions, + 'values': value_output, + 'action:logprobs': logprobs + } + + def process_rollout(self, rollout: Rollout) -> Rollout: + """ Process rollout for optimization before any chunking/shuffling """ + assert isinstance(rollout, Trajectories), "A2C requires trajectory rollouts" + + advantages = discount_bootstrap_gae( + rewards_buffer=rollout.transition_tensors['rewards'], + dones_buffer=rollout.transition_tensors['dones'], + values_buffer=rollout.transition_tensors['values'], + final_values=rollout.rollout_tensors['final_values'], + discount_factor=self.discount_factor, + gae_lambda=self.gae_lambda, + number_of_steps=rollout.num_steps + ) + + returns = advantages + rollout.transition_tensors['values'] + + rollout.transition_tensors['advantages'] = advantages + rollout.transition_tensors['returns'] = returns + + return rollout + + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ Calculate loss of the supplied rollout """ + observations = rollout.batch_tensor('observations') + + actions = rollout.batch_tensor('actions') + advantages = rollout.batch_tensor('advantages') + returns = rollout.batch_tensor('returns') + rollout_values = rollout.batch_tensor('values') + + pd_params, model_values = self(observations) + + log_probs = self.policy.action_head.logprob(actions, pd_params) + entropy = self.policy.action_head.entropy(pd_params) + + # Actual calculations. Pretty trivial + policy_loss = -torch.mean(advantages * log_probs) + value_loss = 0.5 * F.mse_loss(model_values, returns) + policy_entropy = torch.mean(entropy) + + loss_value = ( + policy_loss - self.entropy_coefficient * policy_entropy + self.value_coefficient * value_loss + ) + + loss_value.backward() + + return { + 'policy_loss': policy_loss.item(), + 'value_loss': value_loss.item(), + 'policy_entropy': policy_entropy.item(), + 'advantage_norm': torch.norm(advantages).item(), + 'explained_variance': explained_variance(returns, rollout_values) + } + + def metrics(self) -> list: + """ List of metrics to track for this learning process """ + return [ + AveragingNamedMetric("value_loss", scope="model"), + AveragingNamedMetric("policy_entropy", scope="model"), + AveragingNamedMetric("policy_loss", scope="model"), + AveragingNamedMetric("advantage_norm", scope="model"), + AveragingNamedMetric("explained_variance", scope="model") + ] + + +class A2CFactory(ModelFactory): + """ Factory class for policy gradient models """ + def __init__(self, policy, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): + self.policy = policy + self.entropy_coefficient = entropy_coefficient + self.value_coefficient = value_coefficient + self.discount_factor = discount_factor + self.gae_lambda = gae_lambda + + def instantiate(self, **extra_args): + """ Instantiate the model """ + # action_space = extra_args.pop('action_space') + policy = self.policy.instantiate(**extra_args) + + return A2C( + policy=policy, + entropy_coefficient=self.entropy_coefficient, + value_coefficient=self.value_coefficient, + discount_factor=self.discount_factor, + gae_lambda=self.gae_lambda + ) + + +def create(policy: BackboneModel, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): + """ Vel factory function """ + return A2CFactory( + policy=policy, + entropy_coefficient=entropy_coefficient, + value_coefficient=value_coefficient, + discount_factor=discount_factor, + gae_lambda=gae_lambda + ) diff --git a/vel/rl/algo/policy_gradient/trpo.py b/vel/rl/algo/policy_gradient/trpo.py deleted file mode 100644 index f4fa4206..00000000 --- a/vel/rl/algo/policy_gradient/trpo.py +++ /dev/null @@ -1,274 +0,0 @@ -import numpy as np -import torch -import torch.autograd as autograd -import torch.nn.functional as F -import torch.nn.utils - -from vel.calc.function import explained_variance -from vel.metric.base import AveragingNamedMetric -from vel.rl.api import AlgoBase, Rollout, Trajectories -from vel.rl.discount_bootstrap import discount_bootstrap_gae - - -def p2v(params): - """ Parameters to vector - shorthand utility version """ - return torch.nn.utils.parameters_to_vector(params) - - -def v2p(vector, params): - """ Vector to parameters - shorthand utility version """ - return torch.nn.utils.vector_to_parameters(vector, params) - - -def conjugate_gradient_method(matrix_vector_operator, loss_gradient, nsteps, rdotr_tol=1e-10): - """ Conjugate gradient algorithm """ - x = torch.zeros_like(loss_gradient) - - r = loss_gradient.clone() - p = loss_gradient.clone() - - rdotr = torch.dot(r, r) - - for i in range(nsteps): - avp = matrix_vector_operator(p) - alpha = rdotr / torch.dot(p, avp) - - x += alpha * p - r -= alpha * avp - - new_rdotr = torch.dot(r, r) - betta = new_rdotr / rdotr - p = r + betta * p - rdotr = new_rdotr - - if rdotr < rdotr_tol: - break - - return x - - -class TrpoPolicyGradient(AlgoBase): - """ Trust Region Policy Optimization - https://arxiv.org/abs/1502.05477 """ - - def __init__(self, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, - discount_factor, gae_lambda, improvement_acceptance_ratio, max_grad_norm): - self.mak_kl = max_kl - self.cg_iters = cg_iters - self.line_search_iters = line_search_iters - self.cg_damping = cg_damping - self.entropy_coef = entropy_coef - self.vf_iters = vf_iters - self.discount_factor = discount_factor - self.gae_lambda = gae_lambda - self.improvement_acceptance_ratio = improvement_acceptance_ratio - self.max_grad_norm = max_grad_norm - - def process_rollout(self, batch_info, rollout: Rollout): - """ Process rollout for ALGO before any chunking/shuffling """ - assert isinstance(rollout, Trajectories), "TRPO requires trajectory rollouts" - - advantages = discount_bootstrap_gae( - rewards_buffer=rollout.transition_tensors['rewards'], - dones_buffer=rollout.transition_tensors['dones'], - values_buffer=rollout.transition_tensors['values'], - final_values=rollout.rollout_tensors['final_values'], - discount_factor=self.discount_factor, - gae_lambda=self.gae_lambda, - number_of_steps=rollout.num_steps - ) - - returns = advantages + rollout.transition_tensors['values'] - - rollout.transition_tensors['advantages'] = advantages - rollout.transition_tensors['returns'] = returns - - return rollout - - def optimize(self, batch_info, device, model, rollout): - """ Single optimization step for a model """ - rollout = rollout.to_transitions() - - # This algorithm makes quote strong assumptions about how does the model look - # so it does not make that much sense to switch to the evaluator interface - # As it would be more of a problem than actual benefit - - observations = rollout.batch_tensor('observations') - returns = rollout.batch_tensor('returns') - - # Evaluate model on the observations - policy_params = model.policy(observations) - policy_entropy = torch.mean(model.entropy(policy_params)) - - policy_loss = self.calc_policy_loss(model, policy_params, policy_entropy, rollout) - policy_grad = p2v(autograd.grad(policy_loss, model.policy_parameters(), retain_graph=True)).detach() - - # Calculate gradient of KL divergence of model with fixed version of itself - # Value of kl_divergence will be 0, but what we need is the gradient, actually the 2nd derivarive - kl_divergence = torch.mean(model.kl_divergence(policy_params.detach(), policy_params)) - kl_divergence_gradient = p2v(torch.autograd.grad(kl_divergence, model.policy_parameters(), create_graph=True)) - - step_direction = conjugate_gradient_method( - matrix_vector_operator=lambda x: self.fisher_vector_product(x, kl_divergence_gradient, model), - # Because we want to decrease the loss, we want to go into the direction of -gradient - loss_gradient=-policy_grad, - nsteps=self.cg_iters - ) - - shs = 0.5 * step_direction @ self.fisher_vector_product(step_direction, kl_divergence_gradient, model) - lm = torch.sqrt(shs / self.mak_kl) - full_step = step_direction / lm - - # Because we want to decrease the loss, we want to go into the direction of -gradient - expected_improvement = (-policy_grad) @ full_step - original_parameter_vec = p2v(model.policy_parameters()).detach_() - - (policy_optimization_success, ratio, policy_loss_improvement, new_policy_loss, kl_divergence_step) = ( - self.line_search( - model, rollout, policy_loss, policy_params, original_parameter_vec, full_step, expected_improvement - ) - ) - - gradient_norms = [] - - for i in range(self.vf_iters): - batch_info.optimizer.zero_grad() - value_loss = self.value_loss(model, observations, returns) - - value_loss.backward() - - # Gradient clipping - if self.max_grad_norm is not None: - grad_norm = torch.nn.utils.clip_grad_norm_( - filter(lambda p: p.requires_grad, model.parameters()), - max_norm=self.max_grad_norm - ) - - gradient_norms.append(grad_norm) - - batch_info.optimizer.step(closure=None) - - if gradient_norms: - gradient_norm = np.mean(gradient_norms) - else: - gradient_norm = 0.0 - - # noinspection PyUnboundLocalVariable - return { - 'new_policy_loss': new_policy_loss.item(), - 'policy_entropy': policy_entropy.item(), - 'value_loss': value_loss.item(), - 'policy_optimization_success': float(policy_optimization_success), - 'policy_improvement_ratio': ratio.item(), - 'kl_divergence_step': kl_divergence_step.item(), - 'policy_loss_improvement': policy_loss_improvement.item(), - 'grad_norm': gradient_norm, - 'advantage_norm': torch.norm(rollout.batch_tensor('advantages')).item(), - 'explained_variance': explained_variance(returns, rollout.batch_tensor('values')) - } - - def line_search(self, model, rollout, original_policy_loss, original_policy_params, original_parameter_vec, - full_step, expected_improvement_full): - """ Find the right stepsize to make sure policy improves """ - current_parameter_vec = original_parameter_vec.clone() - - for idx in range(self.line_search_iters): - stepsize = 0.5 ** idx - - new_parameter_vec = current_parameter_vec + stepsize * full_step - - # Update model parameters - v2p(new_parameter_vec, model.policy_parameters()) - - # Calculate new loss - with torch.no_grad(): - policy_params = model.policy(rollout.batch_tensor('observations')) - policy_entropy = torch.mean(model.entropy(policy_params)) - kl_divergence = torch.mean(model.kl_divergence(original_policy_params, policy_params)) - - new_loss = self.calc_policy_loss(model, policy_params, policy_entropy, rollout) - - actual_improvement = original_policy_loss - new_loss - expected_improvement = expected_improvement_full * stepsize - - ratio = actual_improvement / expected_improvement - - if kl_divergence.item() > self.mak_kl * 1.5: - # KL divergence bound exceeded - continue - elif ratio < expected_improvement: - # Not enough loss improvement - continue - else: - # Optimization successful - return True, ratio, actual_improvement, new_loss, kl_divergence - - # Optimization failed, revert to initial parameters - v2p(original_parameter_vec, model.policy_parameters()) - return False, torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0) - - def fisher_vector_product(self, vector, kl_divergence_gradient, model): - """ Calculate product Hessian @ vector """ - assert not vector.requires_grad, "Vector must not propagate gradient" - dot_product = vector @ kl_divergence_gradient - - # at least one dimension spans across two contiguous subspaces - double_gradient = torch.autograd.grad(dot_product, model.policy_parameters(), retain_graph=True) - fvp = p2v(x.contiguous() for x in double_gradient) - - return fvp + vector * self.cg_damping - - def value_loss(self, model, observations, discounted_rewards): - """ Loss of value estimator """ - value_outputs = model.value(observations) - value_loss = 0.5 * F.mse_loss(value_outputs, discounted_rewards) - return value_loss - - def calc_policy_loss(self, model, policy_params, policy_entropy, rollout): - """ - Policy gradient loss - calculate from probability distribution - - Calculate surrogate loss - advantage * policy_probability / fixed_initial_policy_probability - - Because we operate with logarithm of -probability (neglogp) we do - - advantage * exp(fixed_neglogps - model_neglogps) - """ - actions = rollout.batch_tensor('actions') - advantages = rollout.batch_tensor('advantages') - fixed_logprobs = rollout.batch_tensor('action:logprobs') - - model_logprobs = model.logprob(actions, policy_params) - - # Normalize advantages - advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) - - # We put - in front because we want to maximize the surrogate objective - policy_loss = -advantages * torch.exp(model_logprobs - fixed_logprobs) - - return policy_loss.mean() - policy_entropy * self.entropy_coef - - def metrics(self) -> list: - """ List of metrics to track for this learning process """ - return [ - AveragingNamedMetric("new_policy_loss"), - AveragingNamedMetric("policy_entropy"), - AveragingNamedMetric("value_loss"), - AveragingNamedMetric("policy_optimization_success"), - AveragingNamedMetric("policy_improvement_ratio"), - AveragingNamedMetric("kl_divergence_step"), - AveragingNamedMetric("policy_loss_improvement"), - AveragingNamedMetric("grad_norm"), - AveragingNamedMetric("advantage_norm"), - AveragingNamedMetric("explained_variance") - ] - - -def create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, discount_factor, - gae_lambda=1.0, improvement_acceptance_ratio=0.1, max_grad_norm=0.5): - """ Vel factory function """ - return TrpoPolicyGradient( - max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy_coef, vf_iters, - discount_factor=discount_factor, - gae_lambda=gae_lambda, - improvement_acceptance_ratio=improvement_acceptance_ratio, - max_grad_norm=max_grad_norm - ) diff --git a/vel/rl/algo/ppo.py b/vel/rl/algo/ppo.py new file mode 100644 index 00000000..483b4830 --- /dev/null +++ b/vel/rl/algo/ppo.py @@ -0,0 +1,193 @@ +import torch + +import numbers + +from vel.api import BackboneModel, BatchInfo, ModelFactory +from vel.calc.function import explained_variance +from vel.function.constant import ConstantSchedule +from vel.metric.base import AveragingNamedMetric + +from vel.rl.api import RlPolicy, Rollout, Trajectories +from vel.rl.discount_bootstrap import discount_bootstrap_gae + + +class PPO(RlPolicy): + """ Proximal Policy Optimization - https://arxiv.org/abs/1707.06347 """ + def __init__(self, policy: BackboneModel, + entropy_coefficient, value_coefficient, cliprange, discount_factor: float, + normalize_advantage: bool = True, gae_lambda: float = 1.0): + super().__init__(discount_factor) + + self.entropy_coefficient = entropy_coefficient + self.value_coefficient = value_coefficient + self.normalize_advantage = normalize_advantage + self.gae_lambda = gae_lambda + + if isinstance(cliprange, numbers.Number): + self.cliprange = ConstantSchedule(cliprange) + else: + self.cliprange = cliprange + + self.policy = policy + + def reset_weights(self): + """ Initialize properly model weights """ + self.policy.reset_weights() + + def forward(self, observation): + """ Calculate model outputs """ + return self.policy.forward(observation) + + def act(self, observation, state=None, deterministic=False): + """ Select actions based on model's output """ + action_pd_params, value_output = self(observation) + actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + + # log likelihood of selected action + logprobs = self.policy.action_head.logprob(actions, action_pd_params) + + return { + 'actions': actions, + 'values': value_output, + 'action:logprobs': logprobs + } + + def process_rollout(self, rollout: Rollout): + """ Process rollout for optimization before any chunking/shuffling """ + assert isinstance(rollout, Trajectories), "PPO requires trajectory rollouts" + + advantages = discount_bootstrap_gae( + rewards_buffer=rollout.transition_tensors['rewards'], + dones_buffer=rollout.transition_tensors['dones'], + values_buffer=rollout.transition_tensors['values'], + final_values=rollout.rollout_tensors['final_values'], + discount_factor=self.discount_factor, + gae_lambda=self.gae_lambda, + number_of_steps=rollout.num_steps + ) + + returns = advantages + rollout.transition_tensors['values'] + + rollout.transition_tensors['advantages'] = advantages + rollout.transition_tensors['returns'] = returns + + return rollout + + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ Calculate loss of the supplied rollout """ + observations = rollout.batch_tensor('observations') + + # Part 0.0 - Rollout values + actions = rollout.batch_tensor('actions') + advantages = rollout.batch_tensor('advantages') + returns = rollout.batch_tensor('returns') + rollout_values = rollout.batch_tensor('values') + + rollout_action_logprobs = rollout.batch_tensor('action:logprobs') + + # PART 0.1 - Model evaluation + pd_params, model_values = self(observations) + + model_action_logprobs = self.policy.action_head.logprob(actions, pd_params) + entropy = self.policy.action_head.entropy(pd_params) + + # Select the cliprange + current_cliprange = self.cliprange.value(batch_info['progress']) + + # Normalize the advantages? + if self.normalize_advantage: + advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) + + # PART 1 - policy entropy + policy_entropy = torch.mean(entropy) + + # PART 2 - value function + value_output_clipped = rollout_values + torch.clamp( + model_values - rollout_values, -current_cliprange, current_cliprange + ) + value_loss_part1 = (model_values - returns).pow(2) + value_loss_part2 = (value_output_clipped - returns).pow(2) + value_loss = 0.5 * torch.mean(torch.max(value_loss_part1, value_loss_part2)) + + # PART 3 - policy gradient loss + ratio = torch.exp(model_action_logprobs - rollout_action_logprobs) + + pg_loss_part1 = -advantages * ratio + pg_loss_part2 = -advantages * torch.clamp(ratio, 1.0 - current_cliprange, 1.0 + current_cliprange) + policy_loss = torch.mean(torch.max(pg_loss_part1, pg_loss_part2)) + + loss_value = ( + policy_loss - self.entropy_coefficient * policy_entropy + self.value_coefficient * value_loss + ) + + loss_value.backward() + + with torch.no_grad(): + approx_kl_divergence = 0.5 * torch.mean((model_action_logprobs - rollout_action_logprobs).pow(2)) + clip_fraction = torch.mean((torch.abs(ratio - 1.0) > current_cliprange).to(dtype=torch.float)) + + return { + 'policy_loss': policy_loss.item(), + 'value_loss': value_loss.item(), + 'policy_entropy': policy_entropy.item(), + 'approx_kl_divergence': approx_kl_divergence.item(), + 'clip_fraction': clip_fraction.item(), + 'advantage_norm': torch.norm(advantages).item(), + 'explained_variance': explained_variance(returns, rollout_values) + } + + def metrics(self) -> list: + """ List of metrics to track for this learning process """ + return [ + AveragingNamedMetric("policy_loss", scope="model"), + AveragingNamedMetric("value_loss", scope="model"), + AveragingNamedMetric("policy_entropy", scope="model"), + AveragingNamedMetric("approx_kl_divergence", scope="model"), + AveragingNamedMetric("clip_fraction", scope="model"), + AveragingNamedMetric("advantage_norm", scope="model"), + AveragingNamedMetric("explained_variance", scope="model") + ] + + +class PPOFactory(ModelFactory): + """ Factory class for policy gradient models """ + def __init__(self, policy: BackboneModel, + entropy_coefficient, value_coefficient, cliprange, discount_factor: float, + normalize_advantage: bool = True, gae_lambda: float = 1.0): + self.policy = policy + self.entropy_coefficient = entropy_coefficient + self.value_coefficient = value_coefficient + self.cliprange = cliprange + self.discount_factor = discount_factor + self.normalize_advantage = normalize_advantage + self.gae_lambda = gae_lambda + + def instantiate(self, **extra_args): + """ Instantiate the model """ + policy = self.policy.instantiate(**extra_args) + + return PPO( + policy=policy, + entropy_coefficient=self.entropy_coefficient, + value_coefficient=self.value_coefficient, + cliprange=self.cliprange, + discount_factor=self.discount_factor, + normalize_advantage=self.normalize_advantage, + gae_lambda=self.gae_lambda, + ) + + +def create(policy: BackboneModel, + entropy_coefficient, value_coefficient, cliprange, discount_factor: float, + normalize_advantage: bool = True, gae_lambda: float = 1.0): + """ Vel factory function """ + return PPOFactory( + policy=policy, + entropy_coefficient=entropy_coefficient, + value_coefficient=value_coefficient, + cliprange=cliprange, + discount_factor=discount_factor, + normalize_advantage=normalize_advantage, + gae_lambda=gae_lambda + ) + diff --git a/vel/rl/algo/trpo.py b/vel/rl/algo/trpo.py new file mode 100644 index 00000000..6c92d9dc --- /dev/null +++ b/vel/rl/algo/trpo.py @@ -0,0 +1,413 @@ +import gym +import numpy as np +import itertools as it + +import torch +import torch.autograd as autograd +import torch.nn.functional as F +import torch.nn.utils +import typing + +from vel.api import BatchInfo, VelOptimizer, BackboneModel, LinearBackboneModel, OptimizerFactory, ModelFactory +from vel.calc.function import explained_variance +from vel.metric.base import AveragingNamedMetric +from vel.module.input.identity import IdentityFactory + +from vel.rl.api import Rollout, Trajectories, RlPolicy +from vel.rl.discount_bootstrap import discount_bootstrap_gae +from vel.rl.module.stochastic_action_head import StochasticActionHead +from vel.rl.module.value_head import ValueHead + + +def p2v(params): + """ Parameters to vector - shorthand utility version """ + return torch.nn.utils.parameters_to_vector(params) + + +def v2p(vector, params): + """ Vector to parameters - shorthand utility version """ + return torch.nn.utils.vector_to_parameters(vector, params) + + +def conjugate_gradient_method(matrix_vector_operator, loss_gradient, nsteps, rdotr_tol=1e-10): + """ Conjugate gradient algorithm """ + x = torch.zeros_like(loss_gradient) + + r = loss_gradient.clone() + p = loss_gradient.clone() + + rdotr = torch.dot(r, r) + + for i in range(nsteps): + avp = matrix_vector_operator(p) + alpha = rdotr / torch.dot(p, avp) + + x += alpha * p + r -= alpha * avp + + new_rdotr = torch.dot(r, r) + betta = new_rdotr / rdotr + p = r + betta * p + rdotr = new_rdotr + + if rdotr < rdotr_tol: + break + + return x + + +class TRPO(RlPolicy): + """ Trust Region Policy Optimization - https://arxiv.org/abs/1502.05477 """ + + def __init__(self, + input_block: BackboneModel, + policy_backbone: LinearBackboneModel, value_backbone: LinearBackboneModel, + action_space: gym.Space, + max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, + discount_factor, gae_lambda, improvement_acceptance_ratio): + super().__init__(discount_factor) + + self.input_block = input_block + self.policy_backbone = policy_backbone + self.value_backbone = value_backbone + + self.action_head = StochasticActionHead( + action_space=action_space, + input_dim=self.policy_backbone.output_dim + ) + + self.value_head = ValueHead(input_dim=self.value_backbone.output_dim) + + self.mak_kl = max_kl + self.cg_iters = cg_iters + self.line_search_iters = line_search_iters + self.cg_damping = cg_damping + self.entropy_coefficient = entropy_coefficient + self.vf_iters = vf_iters + self.gae_lambda = gae_lambda + self.improvement_acceptance_ratio = improvement_acceptance_ratio + + def reset_weights(self): + """ Initialize properly model weights """ + self.input_block.reset_weights() + + self.policy_backbone.reset_weights() + self.value_backbone.reset_weights() + + self.action_head.reset_weights() + self.value_head.reset_weights() + + def forward(self, observations): + """ Calculate model outputs """ + input_data = self.input_block(observations) + + policy_base_output = self.policy_backbone(input_data) + value_base_output = self.value_backbone(input_data) + + action_output = self.action_head(policy_base_output) + value_output = self.value_head(value_base_output) + + return action_output, value_output + + def value(self, observations, state=None): + """ Calculate only value head for given state """ + input_data = self.input_block(observations) + base_output = self.value_backbone(input_data) + value_output = self.value_head(base_output) + return value_output + + def policy(self, observations): + """ Calculate only action head for given state """ + input_data = self.input_block(observations) + policy_base_output = self.policy_backbone(input_data) + policy_params = self.action_head(policy_base_output) + return policy_params + + def act(self, observation, state=None, deterministic=False): + """ Select actions based on model's output """ + action_pd_params, value_output = self(observation) + actions = self.action_head.sample(action_pd_params, deterministic=deterministic) + + # log likelihood of selected action + logprobs = self.action_head.logprob(actions, action_pd_params) + + return { + 'actions': actions, + 'values': value_output, + 'action:logprobs': logprobs + } + + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + """ Create optimizer for the purpose of optimizing this model """ + parameters = filter(lambda p: p.requires_grad, self.value_parameters()) + return optimizer_factory.instantiate(parameters) + + def policy_parameters(self): + """ Parameters of policy """ + return it.chain( + self.input_block.parameters(), + self.policy_backbone.parameters(), + self.action_head.parameters() + ) + + def value_parameters(self): + """ Parameters of value function """ + return it.chain( + self.input_block.parameters(), + self.value_backbone.parameters(), + self.value_head.parameters() + ) + + def process_rollout(self, rollout: Rollout): + """ Process rollout for optimization before any chunking/shuffling """ + assert isinstance(rollout, Trajectories), "PPO requires trajectory rollouts" + + advantages = discount_bootstrap_gae( + rewards_buffer=rollout.transition_tensors['rewards'], + dones_buffer=rollout.transition_tensors['dones'], + values_buffer=rollout.transition_tensors['values'], + final_values=rollout.rollout_tensors['final_values'], + discount_factor=self.discount_factor, + gae_lambda=self.gae_lambda, + number_of_steps=rollout.num_steps + ) + + returns = advantages + rollout.transition_tensors['values'] + + rollout.transition_tensors['advantages'] = advantages + rollout.transition_tensors['returns'] = returns + + return rollout + + def optimize(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ Single optimization step for a model """ + rollout = rollout.to_transitions() + + observations = rollout.batch_tensor('observations') + returns = rollout.batch_tensor('returns') + + # Evaluate model on the observations + action_pd_params = self.policy(observations) + policy_entropy = torch.mean(self.action_head.entropy(action_pd_params)) + + policy_loss = self.calc_policy_loss(action_pd_params, policy_entropy, rollout) + policy_grad = p2v(autograd.grad(policy_loss, self.policy_parameters(), retain_graph=True)).detach() + + # Calculate gradient of KL divergence of model with fixed version of itself + # Value of kl_divergence will be 0, but what we need is the gradient, actually the 2nd derivarive + kl_divergence = torch.mean(self.action_head.kl_divergence(action_pd_params.detach(), action_pd_params)) + kl_divergence_gradient = p2v(torch.autograd.grad(kl_divergence, self.policy_parameters(), create_graph=True)) + + step_direction = conjugate_gradient_method( + matrix_vector_operator=lambda x: self.fisher_vector_product(x, kl_divergence_gradient), + # Because we want to decrease the loss, we want to go into the direction of -gradient + loss_gradient=-policy_grad, + nsteps=self.cg_iters + ) + + shs = 0.5 * step_direction @ self.fisher_vector_product(step_direction, kl_divergence_gradient) + lm = torch.sqrt(shs / self.mak_kl) + full_step = step_direction / lm + + # Because we want to decrease the loss, we want to go into the direction of -gradient + expected_improvement = (-policy_grad) @ full_step + original_parameter_vec = p2v(self.policy_parameters()).detach_() + + (policy_optimization_success, ratio, policy_loss_improvement, new_policy_loss, kl_divergence_step) = ( + self.line_search( + rollout, policy_loss, action_pd_params, original_parameter_vec, full_step, expected_improvement + ) + ) + + gradient_norms = [] + + for i in range(self.vf_iters): + batch_info.optimizer.zero_grad() + value_loss = self.value_loss(observations, returns) + + value_loss.backward() + + batch_info.optimizer.step(closure=None) + + if gradient_norms: + gradient_norm = np.mean(gradient_norms) + else: + gradient_norm = 0.0 + + # noinspection PyUnboundLocalVariable + return { + 'new_policy_loss': new_policy_loss.item(), + 'policy_entropy': policy_entropy.item(), + 'value_loss': value_loss.item(), + 'policy_optimization_success': float(policy_optimization_success), + 'policy_improvement_ratio': ratio.item(), + 'kl_divergence_step': kl_divergence_step.item(), + 'policy_loss_improvement': policy_loss_improvement.item(), + 'grad_norm': gradient_norm, + 'advantage_norm': torch.norm(rollout.batch_tensor('advantages')).item(), + 'explained_variance': explained_variance(returns, rollout.batch_tensor('values')) + } + + def line_search(self, rollout, original_policy_loss, original_policy_params, original_parameter_vec, + full_step, expected_improvement_full): + """ Find the right stepsize to make sure policy improves """ + current_parameter_vec = original_parameter_vec.clone() + + for idx in range(self.line_search_iters): + stepsize = 0.5 ** idx + + new_parameter_vec = current_parameter_vec + stepsize * full_step + + # Update model parameters + v2p(new_parameter_vec, self.policy_parameters()) + + # Calculate new loss + with torch.no_grad(): + policy_params = self.policy(rollout.batch_tensor('observations')) + policy_entropy = torch.mean(self.action_head.entropy(policy_params)) + kl_divergence = torch.mean(self.action_head.kl_divergence(original_policy_params, policy_params)) + + new_loss = self.calc_policy_loss(policy_params, policy_entropy, rollout) + + actual_improvement = original_policy_loss - new_loss + expected_improvement = expected_improvement_full * stepsize + + ratio = actual_improvement / expected_improvement + + if kl_divergence.item() > self.mak_kl * 1.5: + # KL divergence bound exceeded + continue + elif ratio < expected_improvement: + # Not enough loss improvement + continue + else: + # Optimization successful + return True, ratio, actual_improvement, new_loss, kl_divergence + + # Optimization failed, revert to initial parameters + v2p(original_parameter_vec, self.policy_parameters()) + return False, torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0), torch.tensor(0.0) + + def fisher_vector_product(self, vector, kl_divergence_gradient): + """ Calculate product Hessian @ vector """ + assert not vector.requires_grad, "Vector must not propagate gradient" + dot_product = vector @ kl_divergence_gradient + + # at least one dimension spans across two contiguous subspaces + double_gradient = torch.autograd.grad(dot_product, self.policy_parameters(), retain_graph=True) + fvp = p2v(x.contiguous() for x in double_gradient) + + return fvp + vector * self.cg_damping + + def value_loss(self, observations, returns): + """ Loss of value function head """ + value_outputs = self.value(observations) + value_loss = 0.5 * F.mse_loss(value_outputs, returns) + return value_loss + + def calc_policy_loss(self, policy_params, policy_entropy, rollout): + """ + Policy gradient loss - calculate from probability distribution + + Calculate surrogate loss - advantage * policy_probability / fixed_initial_policy_probability + + Because we operate with logarithm of -probability (neglogp) we do + - advantage * exp(fixed_neglogps - model_neglogps) + """ + actions = rollout.batch_tensor('actions') + advantages = rollout.batch_tensor('advantages') + fixed_logprobs = rollout.batch_tensor('action:logprobs') + + model_logprobs = self.action_head.logprob(actions, policy_params) + + # Normalize advantages + advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) + + # We put - in front because we want to maximize the surrogate objective + policy_loss = -advantages * torch.exp(model_logprobs - fixed_logprobs) + + return policy_loss.mean() - policy_entropy * self.entropy_coefficient + + def metrics(self) -> list: + """ List of metrics to track for this learning process """ + return [ + AveragingNamedMetric("new_policy_loss", scope="model"), + AveragingNamedMetric("policy_entropy", scope="model"), + AveragingNamedMetric("value_loss", scope="model"), + AveragingNamedMetric("policy_optimization_success", scope="model"), + AveragingNamedMetric("policy_improvement_ratio", scope="model"), + AveragingNamedMetric("kl_divergence_step", scope="model"), + AveragingNamedMetric("policy_loss_improvement", scope="model"), + AveragingNamedMetric("advantage_norm", scope="model"), + AveragingNamedMetric("explained_variance", scope="model") + ] + + +class TRPOFactory(ModelFactory): + """ Factory class for policy gradient models """ + def __init__(self, input_block, policy_backbone: ModelFactory, value_backbone: ModelFactory, + max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, + discount_factor, gae_lambda, improvement_acceptance_ratio): + self.policy_backbone = policy_backbone + self.value_backbone = value_backbone + self.input_block = input_block + self.entropy_coefficient = entropy_coefficient + + self.mak_kl = max_kl + self.cg_iters = cg_iters + self.line_search_iters = line_search_iters + self.cg_damping = cg_damping + self.vf_iters = vf_iters + self.discount_factor = discount_factor + self.gae_lambda = gae_lambda + self.improvement_acceptance_ratio = improvement_acceptance_ratio + + def instantiate(self, **extra_args): + """ Instantiate the model """ + action_space = extra_args.pop('action_space') + + input_block = self.input_block.instantiate() + + policy_backbone = self.policy_backbone.instantiate(**extra_args) + value_backbone = self.value_backbone.instantiate(**extra_args) + + return TRPO( + input_block=input_block, + policy_backbone=policy_backbone, + value_backbone=value_backbone, + action_space=action_space, + max_kl=self.mak_kl, + cg_iters=self.cg_iters, + line_search_iters=self.line_search_iters, + cg_damping=self.cg_damping, + entropy_coefficient=self.entropy_coefficient, + vf_iters=self.vf_iters, + discount_factor=self.discount_factor, + gae_lambda=self.gae_lambda, + improvement_acceptance_ratio=self.improvement_acceptance_ratio + ) + + +def create(policy_backbone: ModelFactory, value_backbone: ModelFactory, + max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, + discount_factor, gae_lambda, improvement_acceptance_ratio, + input_block: typing.Optional[ModelFactory] = None): + """ Vel factory function """ + if input_block is None: + input_block = IdentityFactory() + + return TRPOFactory( + input_block=input_block, + policy_backbone=policy_backbone, + value_backbone=value_backbone, + max_kl=max_kl, + cg_iters=cg_iters, + line_search_iters=line_search_iters, + cg_damping=cg_damping, + entropy_coefficient=entropy_coefficient, + vf_iters=vf_iters, + discount_factor=discount_factor, + gae_lambda=gae_lambda, + improvement_acceptance_ratio=improvement_acceptance_ratio, + ) + diff --git a/vel/rl/api/__init__.py b/vel/rl/api/__init__.py index 4e80755c..699a8bdb 100644 --- a/vel/rl/api/__init__.py +++ b/vel/rl/api/__init__.py @@ -1,8 +1,6 @@ -from .algo_base import AlgoBase, OptimizerAlgoBase from .env_base import EnvFactory, VecEnvFactory from .env_roller import EnvRollerBase, ReplayEnvRollerBase, EnvRollerFactoryBase, ReplayEnvRollerFactoryBase -from .evaluator import Evaluator -from .policy import Policy -from .reinforcer_base import ReinforcerBase, ReinforcerFactory -from .replay_buffer import ReplayBuffer, ReplayBufferFactory from .rollout import Rollout, Trajectories, Transitions +from .rl_model import RlPolicy +from .reinforcer_base import Reinforcer, ReinforcerFactory +from .replay_buffer import ReplayBuffer, ReplayBufferFactory diff --git a/vel/rl/api/algo_base.py b/vel/rl/api/algo_base.py deleted file mode 100644 index b25374b1..00000000 --- a/vel/rl/api/algo_base.py +++ /dev/null @@ -1,47 +0,0 @@ - -class AlgoBase: - """ Base class for algo reinforcement calculations """ - - def initialize(self, training_info, model, environment, device): - """ Initialize algo from reinforcer settings """ - pass - - def process_rollout(self, batch_info, rollout): - """ Process rollout for ALGO before any chunking/shuffling """ - return rollout - - def optimize(self, batch_info, device, model, rollout): - """ Single optimization step for a model """ - raise NotImplementedError - - def metrics(self) -> list: - """ List of metrics to track for this learning process """ - return [] - - -class OptimizerAlgoBase(AlgoBase): - """ RL algo that does a simple optimizer update """ - - def calculate_gradient(self, batch_info, device, model, rollout): - """ Calculate loss of the supplied rollout """ - raise NotImplementedError - - def post_optimization_step(self, batch_info, device, model, rollout): - """ Steps to take after optimization has been done""" - pass - - def optimize(self, batch_info, device, model, rollout): - """ Single optimization step for a model """ - batch_info.optimizer.zero_grad() - - batch_result = self.calculate_gradient(batch_info=batch_info, device=device, model=model, rollout=rollout) - - batch_info.optimizer.step(closure=None) - - self.post_optimization_step(batch_info, device, model, rollout) - - return batch_result - - def metrics(self) -> list: - """ List of metrics to track for this learning process """ - return [] diff --git a/vel/rl/api/evaluator.py b/vel/rl/api/evaluator.py deleted file mode 100644 index e0c15d1c..00000000 --- a/vel/rl/api/evaluator.py +++ /dev/null @@ -1,154 +0,0 @@ -class EvaluatorMeta(type): - """ Metaclass for Evaluator - gathers all provider methods in a class attribute """ - def __new__(mcs, name, bases, attributes): - providers = {} - use_cache = {} - - for name, attr in attributes.items(): - if callable(attr): - proper_name = getattr(attr, '_vel_evaluator_provides', None) - if proper_name is not None: - providers[proper_name] = attr - - cache = getattr(attr, '_vel_use_cache', None) - if cache is not None: - use_cache[proper_name] = cache - - attributes['_use_cache'] = use_cache - attributes['_providers'] = providers - - return super().__new__(mcs, name, bases, attributes) - - -class Evaluator(metaclass=EvaluatorMeta): - """ - Different models may have different outputs and approach evaluating environment differently. - - Evaluator is an object that abstracts over that, providing unified interface between algorithms - which just need certain outputs from models and models that may provide them in different ways. - - I'll try to maintain here a dictionary of possible common values that can be requested from the evaluator. - Rollouts should communicate using the same names - - - rollout:estimated_returns - - Bootstrapped return (sum of discounted future rewards) estimated using returns and value estimates - - rollout:values - - Value estimates from the model that was used to generate the rollout - - rollout:estimated_advantages - - Advantage of a rollout (state, action) pair by the model that was used to generate the rollout - - rollout:actions - - Actions performed in a rollout - - rollout:logprobs - - Logarithm of probability for **all** actions of a policy used to perform rollout - (defined only for finite action spaces) - - rollout:action:logprobs - - Logarithm of probability only for selected actions - - rollout:dones - - Whether given observation is last in a trajectory - - rollout:dones - - Raw rewards received from the environment in this learning process - - rollout:final_values - - Value estimates for observation after final observation in the rollout - - rollout:observations - - Observations of the rollout - - rollout:observations_next - - Next observations in the rollout - - rollout:weights - - Error weights of rollout samples - - rollout:q - - Action-values for each action in current space - (defined only for finite action spaces) - - - model:logprobs - - Logarithm of probability of **all** actions in an environment as in current model policy - (defined only for finite action spaces) - - model:q - - Action-value for **all** actions - (defined only for finite action spaces) - - model:q_dist - - Action-value histogram for **all** actions - (defined only for finite action spaces) - - model:q_dist_next - - Action-value histogram for **all** actions from the 'next' state in the rollout - (defined only for finite action spaces) - - model:q_next - - Action-value for **all** actions from the 'next' state in the rollout - (defined only for finite action spaces) - - model:entropy - - Policy entropy for selected states - - model:action:q - - Action-value for actions selected in the rollout - - model:model_action:q - - Action-value for actions that model would perform (Deterministic policy only) - - model:actions - - Actions that model would perform (Deterministic policy only) - - model:action:logprobs - - Logarithm of probability for performed actions - - model:policy_params - - Parametrizations of policy for each state - - model:values - - Value estimates for each state, estimated by the current model - - model:values_next - - Value estimates for 'next' state of each transition - """ - - @staticmethod - def provides(name, cache=True): - """ Function decorator - value provided by the evaluator """ - def decorator(func): - func._vel_evaluator_provides = name - func._vel_use_cache = cache - - return func - - return decorator - - def __init__(self, rollout): - self._storage = {} - self.rollout = rollout - - def is_provided(self, name): - """ Capability check if evaluator provides given value """ - if name in self._storage: - return True - elif name in self._providers: - return True - elif name.startswith('rollout:'): - rollout_name = name[8:] - return self.is_provided(rollout_name) - else: - return False - - def get(self, name, cache=True): - """ - Return a value from this evaluator. - - Because tensor calculated is cached, it may lead to suble bugs if the same value is used multiple times - with and without no_grad() context. - - It is advised in such cases to not use no_grad and stick to .detach() - - If you want to disable the cache you can pass 'cache=False' to the decorator to disable it - for the attribute or to the get() function to disable it just for that call - """ - if name in self._use_cache and not self._use_cache[name]: - cache = False - - if name in self._storage and cache: - value = self._storage[name] - elif name in self._providers: - value = self._providers[name](self) - elif name.startswith('rollout:'): - rollout_name = name[8:] - value = self.rollout.batch_tensor(rollout_name) - else: - raise RuntimeError(f"Key {name} is not provided by this evaluator") - - if cache: - self._storage[name] = value - - return value - - def provide(self, name, value): - """ Provide given value under specified name """ - self._storage[name] = value diff --git a/vel/rl/api/policy.py b/vel/rl/api/policy.py deleted file mode 100644 index 34082364..00000000 --- a/vel/rl/api/policy.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch -from vel.api import Model - - -class Policy(Model): - """ Base class for reinforcement learning policies """ - - def act(self, observation, state=None, deterministic=False) -> dict: - """ Make an action based on the observation from the environment. """ - raise NotImplementedError - - def value(self, observation, state=None) -> torch.tensor: - """ Return the expected reward from current state """ - return self.act(observation=observation, state=state)['value'] - - def reset_state(self, state, dones): - """ Reset the state after the episode has been terminated """ - raise NotImplementedError - - def evaluate(self, rollout) -> object: - """ Return an evaluator object evaluating given rollout that may be used for gradient computations etc. """ - raise NotImplementedError diff --git a/vel/rl/api/reinforcer_base.py b/vel/rl/api/reinforcer_base.py index 1db2ed34..3f9e7c66 100644 --- a/vel/rl/api/reinforcer_base.py +++ b/vel/rl/api/reinforcer_base.py @@ -1,9 +1,10 @@ import torch -from vel.api import TrainingInfo, EpochInfo, BatchInfo, Model +from vel.api import TrainingInfo, EpochInfo, BatchInfo +from vel.rl.api import RlPolicy -class ReinforcerBase: +class Reinforcer: """ Manages training process of a single model. Learner version for reinforcement-learning problems. @@ -26,13 +27,13 @@ def metrics(self) -> list: raise NotImplementedError @property - def model(self) -> Model: + def policy(self) -> RlPolicy: """ Model trained by this reinforcer """ raise NotImplementedError class ReinforcerFactory: """ A reinforcer factory """ - def instantiate(self, device: torch.device) -> ReinforcerBase: + def instantiate(self, device: torch.device) -> Reinforcer: """ Create new reinforcer instance """ raise NotImplementedError diff --git a/vel/rl/api/rl_model.py b/vel/rl/api/rl_model.py new file mode 100644 index 00000000..0e0eb609 --- /dev/null +++ b/vel/rl/api/rl_model.py @@ -0,0 +1,65 @@ +from vel.api import Model, VelOptimizer, OptimizerFactory, BatchInfo +from vel.rl.api import Rollout + + +class RlPolicy(Model): + """ Base class for reinforcement learning policies """ + + def __init__(self, discount_factor: float): + super().__init__() + + self.discount_factor = discount_factor + + def process_rollout(self, rollout: Rollout) -> Rollout: + """ Process rollout for optimization before any chunking/shuffling """ + raise NotImplementedError + + def act(self, observation, state=None, deterministic=False) -> dict: + """ + Make an action based on the observation from the environment. + Returned dictionary must have 'actions' key that contains an action per + each env in the observations + """ + raise NotImplementedError + + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + """ Create optimizer for the purpose of optimizing this model """ + parameters = filter(lambda p: p.requires_grad, self.parameters()) + return optimizer_factory.instantiate(parameters) + + def optimize(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ + Perform one step of optimization of the policy based on provided rollout data + :returns a dictionary of metrics + """ + batch_info.optimizer.zero_grad() + + metrics = self.calculate_gradient(batch_info, rollout) + + opt_metrics = batch_info.optimizer.step() + + for key, value in opt_metrics.items(): + metrics[key] = value + + return metrics + + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ + Calculate gradient for given batch of training data. + :returns a dictionary of metrics + """ + raise NotImplementedError + + def reset_state(self, state, dones): + """ Reset the state after the episode has been terminated """ + raise NotImplementedError + + #################################################################################################################### + # Utility Methods - that provide default implementations but may be short circuited by some implementations + def value(self, observation, state=None): + """ Return value for given observation """ + return self.act(observation, state=state)['values'] + + def action(self, observation, state=None, deterministic=False): + """ Return policy action for given observation """ + return self.act(observation, state=state, deterministic=deterministic)['actions'] diff --git a/vel/rl/command/rl_train_command.py b/vel/rl/command/rl_train_command.py index 539cf5d3..a879a0f0 100644 --- a/vel/rl/command/rl_train_command.py +++ b/vel/rl/command/rl_train_command.py @@ -1,8 +1,9 @@ import typing -from vel.api import ModelConfig, EpochInfo, TrainingInfo, BatchInfo, OptimizerFactory, Storage, Callback -from vel.rl.api import ReinforcerFactory +from vel.api import ModelConfig, EpochInfo, TrainingInfo, BatchInfo, OptimizerFactory, Storage, Callback, VelOptimizer from vel.callback.time_tracker import TimeTracker +from vel.metric.samples_per_sec import SamplesPerSec +from vel.rl.api import ReinforcerFactory, Reinforcer import vel.openai.baselines.logger as openai_logger @@ -65,21 +66,13 @@ def run(self): # Reinforcer is the learner for the reinforcement learning model reinforcer = self.reinforcer.instantiate(device) - optimizer = self.optimizer_factory.instantiate(reinforcer.model) + optimizer = reinforcer.policy.create_optimizer(self.optimizer_factory) - # All callbacks used for learning - callbacks = self.gather_callbacks(optimizer) - # Metrics to track through this training - metrics = reinforcer.metrics() - - training_info = self.resume_training(reinforcer, callbacks, metrics) + training_info = self.start_training(reinforcer, optimizer) reinforcer.initialize_training(training_info) training_info.on_train_begin() - if training_info.optimizer_initial_state: - optimizer.load_state_dict(training_info.optimizer_initial_state) - global_epoch_idx = training_info.start_epoch_idx + 1 while training_info['frames'] < self.total_frames: @@ -95,7 +88,7 @@ def run(self): if self.openai_logging: self._openai_logging(epoch_info.result) - self.storage.checkpoint(epoch_info, reinforcer.model) + self.storage.checkpoint(epoch_info, reinforcer.policy) global_epoch_idx += 1 @@ -103,39 +96,47 @@ def run(self): return training_info - def gather_callbacks(self, optimizer) -> list: - """ Gather all the callbacks to be used in this training run """ + def start_training(self, reinforcer: Reinforcer, optimizer: VelOptimizer) -> TrainingInfo: + """ Possibly resume training from a saved state from the storage """ + + if self.model_config.resume_training: + start_epoch = self.storage.last_epoch_idx() + else: + start_epoch = 0 + callbacks = [FrameTracker(self.total_frames), TimeTracker()] if self.scheduler_factory is not None: - callbacks.append(self.scheduler_factory.instantiate(optimizer)) + callbacks.extend( + optimizer.create_scheduler(scheduler_factory=self.scheduler_factory, last_epoch=start_epoch-1) + ) callbacks.extend(self.callbacks) callbacks.extend(self.storage.streaming_callbacks()) - return callbacks - - def resume_training(self, reinforcer, callbacks, metrics) -> TrainingInfo: - """ Possibly resume training from a saved state from the storage """ - if self.model_config.resume_training: - start_epoch = self.storage.last_epoch_idx() - else: - start_epoch = 0 + # Metrics to track through this training + metrics = reinforcer.metrics() + optimizer.metrics() training_info = TrainingInfo( start_epoch_idx=start_epoch, - run_name=self.model_config.run_name, - metrics=metrics, callbacks=callbacks + metrics=metrics, + callbacks=callbacks ) if start_epoch == 0: + self.model_config.write_meta() self.storage.reset(self.model_config.render_configuration()) training_info.initialize() reinforcer.initialize_training(training_info) else: model_state, hidden_state = self.storage.load(training_info) + + training_info.restore(hidden_state) reinforcer.initialize_training(training_info, model_state, hidden_state) + if 'optimizer' in hidden_state: + optimizer.load_state_dict(hidden_state['optimizer']) + return training_info def _openai_logging(self, epoch_result): diff --git a/vel/rl/env_roller/step_env_roller.py b/vel/rl/env_roller/step_env_roller.py index c4ec4700..f749a895 100644 --- a/vel/rl/env_roller/step_env_roller.py +++ b/vel/rl/env_roller/step_env_roller.py @@ -3,7 +3,7 @@ from vel.api import BatchInfo from vel.openai.baselines.common.vec_env import VecEnv -from vel.rl.api import Trajectories, Rollout, EnvRollerBase, EnvRollerFactoryBase, Policy +from vel.rl.api import Trajectories, Rollout, EnvRollerBase, EnvRollerFactoryBase, RlPolicy from vel.rl.util.actor import PolicyActor from vel.util.tensor_accumulator import TensorAccumulator @@ -13,7 +13,7 @@ class StepEnvRoller(EnvRollerBase): Class calculating env rollouts. """ - def __init__(self, environment: VecEnv, policy: Policy, device: torch.device): + def __init__(self, environment: VecEnv, policy: RlPolicy, device: torch.device): self._environment = environment self.device = device diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index e0407a05..1f413f4f 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -4,7 +4,7 @@ from vel.api import BatchInfo from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( - Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, Policy + Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, RlPolicy ) from vel.rl.util.actor import PolicyActor from vel.util.tensor_accumulator import TensorAccumulator @@ -17,7 +17,7 @@ class TrajectoryReplayEnvRoller(ReplayEnvRollerBase): Samples trajectories from the replay buffer (consecutive series of frames) """ - def __init__(self, environment: VecEnv, policy: Policy, device: torch.device, replay_buffer: ReplayBuffer): + def __init__(self, environment: VecEnv, policy: RlPolicy, device: torch.device, replay_buffer: ReplayBuffer): self._environment = environment self.device = device self.replay_buffer = replay_buffer diff --git a/vel/rl/env_roller/transition_replay_env_roller.py b/vel/rl/env_roller/transition_replay_env_roller.py index 25c1541a..14cb282e 100644 --- a/vel/rl/env_roller/transition_replay_env_roller.py +++ b/vel/rl/env_roller/transition_replay_env_roller.py @@ -7,7 +7,7 @@ from vel.openai.baselines.common.vec_env import VecEnv from vel.openai.baselines.common.running_mean_std import RunningMeanStd from vel.rl.api import ( - Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, Policy + Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, RlPolicy ) from vel.rl.util.actor import PolicyActor from vel.util.tensor_accumulator import TensorAccumulator @@ -20,7 +20,7 @@ class TransitionReplayEnvRoller(ReplayEnvRollerBase): Samples transitions from the replay buffer (individual frame transitions) """ - def __init__(self, environment: VecEnv, policy: Policy, device: torch.device, replay_buffer: ReplayBuffer, + def __init__(self, environment: VecEnv, policy: RlPolicy, device: torch.device, replay_buffer: ReplayBuffer, discount_factor: typing.Optional[float] = None, normalize_returns: bool = False, forward_steps: int = 1, action_noise: typing.Optional[nn.Module] = None): self._environment = environment diff --git a/vel/rl/policy/purgatory/deterministic_policy.py b/vel/rl/policy/purgatory/deterministic_policy.py index da7b31d0..58d908fe 100644 --- a/vel/rl/policy/purgatory/deterministic_policy.py +++ b/vel/rl/policy/purgatory/deterministic_policy.py @@ -5,7 +5,7 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Evaluator, RlModel +from vel.rl.api import Rollout, Evaluator, RlPolicy from vel.rl.module.deterministic_action_head import DeterministicActionHead from vel.rl.module.deterministic_critic_head import DeterministicCriticHead @@ -45,7 +45,7 @@ def model_action_q(self): return self.model.value(observations, rollout_actions) -class DeterministicPolicyModel(RlModel): +class DeterministicPolicyModel(RlPolicy): """ Deterministic Policy Gradient - model """ def __init__(self, input_block: BackboneModel, policy_backbone: LinearBackboneModel, diff --git a/vel/rl/policy/purgatory/stochastic_policy.py b/vel/rl/policy/purgatory/old_stochastic_policy.py similarity index 97% rename from vel/rl/policy/purgatory/stochastic_policy.py rename to vel/rl/policy/purgatory/old_stochastic_policy.py index 1788ffc6..4fc5a16b 100644 --- a/vel/rl/policy/purgatory/stochastic_policy.py +++ b/vel/rl/policy/purgatory/old_stochastic_policy.py @@ -3,7 +3,7 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Evaluator, RlModel +from vel.rl.api import Rollout, Evaluator, RlPolicy from vel.rl.module.action_head import StochasticActionHead from vel.rl.module.value_head import ValueHead @@ -33,7 +33,7 @@ def model_entropy(self): return self.model.entropy(policy_params) -class StochasticPolicyModel(RlModel): +class StochasticPolicyModel(RlPolicy): """ Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone """ diff --git a/vel/rl/policy/stochastic_rnn_policy.py b/vel/rl/policy/purgatory/old_stochastic_rnn_policy.py similarity index 76% rename from vel/rl/policy/stochastic_rnn_policy.py rename to vel/rl/policy/purgatory/old_stochastic_rnn_policy.py index a1a87f2e..25551144 100644 --- a/vel/rl/policy/stochastic_rnn_policy.py +++ b/vel/rl/policy/purgatory/old_stochastic_rnn_policy.py @@ -4,22 +4,22 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Trajectories, Evaluator, Policy -from vel.rl.module.stochastic_action_head import StochasticActionHead +from vel.rl.api import Rollout, Trajectories, Evaluator, RlRnnModel +from vel.rl.module.action_head import StochasticActionHead from vel.rl.module.value_head import ValueHead class StochasticPolicyRnnEvaluator(Evaluator): """ Evaluate recurrent model from initial state """ - def __init__(self, model: 'StochasticRnnPolicy', rollout: Rollout): + def __init__(self, model: 'StochasticPolicyRnnModel', rollout: Rollout): assert isinstance(rollout, Trajectories), "For an RNN model, we must evaluate trajectories" super().__init__(rollout) self.model = model observation_trajectories = rollout.transition_tensors['observations'] - hidden_state = rollout.transition_tensors['state'][0] # Initial hidden state + hidden_state = rollout.rollout_tensors['initial_hidden_state'] action_accumulator = [] value_accumulator = [] @@ -45,10 +45,10 @@ def model_action_logprobs(self): @Evaluator.provides('model:entropy') def model_entropy(self): policy_params = self.get('model:policy_params') - return self.model.action_head.entropy(policy_params) + return self.model.entropy(policy_params) -class StochasticRnnPolicy(Policy): +class StochasticPolicyRnnModel(RlRnnModel): """ Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone RNN version @@ -61,8 +61,6 @@ def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, self.input_block = input_block self.backbone = backbone - assert self.backbone.is_stateful, "Must have a stateful backbone" - self.action_head = StochasticActionHead( action_space=action_space, input_dim=self.backbone.output_dim @@ -72,9 +70,9 @@ def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, assert self.backbone.is_stateful, "Backbone must be a recurrent model" @property - def is_stateful(self) -> bool: - """ If the model has a state that needs to be fed between individual observations """ - return True + def state_dim(self) -> int: + """ Dimension of model state """ + return self.backbone.state_dim def reset_weights(self): """ Initialize properly model weights """ @@ -93,9 +91,9 @@ def forward(self, observations, state): return action_output, value_output, new_state - def act(self, observation, state=None, deterministic=False) -> dict: + def step(self, observations, state, deterministic=False): """ Select actions based on model's output """ - action_pd_params, value_output, new_state = self(observation, state) + action_pd_params, value_output, new_state = self(observations, state) actions = self.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action @@ -112,26 +110,25 @@ def evaluate(self, rollout: Rollout) -> Evaluator: """ Evaluate model on a rollout """ return StochasticPolicyRnnEvaluator(self, rollout) - def value(self, observation, state=None): + def logprob(self, action_sample, policy_params): + """ Calculate - log(prob) of selected actions """ + return self.action_head.logprob(action_sample, policy_params) + + def value(self, observations, state): """ Calculate only value head for given state """ - input_data = self.input_block(observation) + input_data = self.input_block(observations) base_output, new_state = self.backbone(input_data, state) value_output = self.value_head(base_output) return value_output - def reset_state(self, state, dones): - """ Reset the state after the episode has been terminated """ - if (dones > 0).any().item(): - zero_state = self.backbone.zero_state(dones.shape[0]).to(state.device) - dones_expanded = dones.unsqueeze(-1) - return state * (1 - dones_expanded) + zero_state * dones_expanded - else: - return state + def entropy(self, action_pd_params): + """ Entropy of a probability distribution """ + return self.action_head.entropy(action_pd_params) -class StochasticRnnPolicyFactory(ModelFactory): +class PolicyGradientRnnModelFactory(ModelFactory): """ Factory class for policy gradient models """ def __init__(self, input_block: ModelFactory, backbone: ModelFactory): self.input_block = input_block @@ -142,7 +139,7 @@ def instantiate(self, **extra_args): input_block = self.input_block.instantiate() backbone = self.backbone.instantiate(**extra_args) - return StochasticRnnPolicy(input_block, backbone, extra_args['action_space']) + return StochasticPolicyRnnModel(input_block, backbone, extra_args['action_space']) def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): @@ -150,7 +147,7 @@ def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = if input_block is None: input_block = IdentityFactory() - return StochasticRnnPolicyFactory( + return PolicyGradientRnnModelFactory( input_block=input_block, backbone=backbone ) diff --git a/vel/rl/policy/purgatory/q_distributional_policy.py b/vel/rl/policy/purgatory/q_distributional_policy.py index 209b002b..4dde37cf 100644 --- a/vel/rl/policy/purgatory/q_distributional_policy.py +++ b/vel/rl/policy/purgatory/q_distributional_policy.py @@ -3,7 +3,7 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, RlModel, Evaluator +from vel.rl.api import Rollout, RlPolicy, Evaluator from vel.rl.module.q_distributional_head import QDistributionalHead @@ -53,7 +53,7 @@ def model_q_dist_next(self): return self.model(observations) -class QDistributionalModel(RlModel): +class QDistributionalModel(RlPolicy): """ A deterministic greedy action-value model that learns a value function distribution rather than just an expectation. diff --git a/vel/rl/policy/purgatory/q_model.py b/vel/rl/policy/purgatory/q_model.py index 7472e0bb..d162a4de 100644 --- a/vel/rl/policy/purgatory/q_model.py +++ b/vel/rl/policy/purgatory/q_model.py @@ -3,7 +3,7 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, RlModel, Evaluator +from vel.rl.api import Rollout, RlPolicy, Evaluator from vel.rl.module.q_head import QHead @@ -33,7 +33,7 @@ def model_q_next(self): return self.model(observations) -class QModel(RlModel): +class QModel(RlPolicy): """ Simple deterministic greedy action-value model. Supports only discrete action spaces (ones that can be enumerated) diff --git a/vel/rl/policy/purgatory/q_noisy_model.py b/vel/rl/policy/purgatory/q_noisy_model.py index b2d747bb..2ef6aab3 100644 --- a/vel/rl/policy/purgatory/q_noisy_model.py +++ b/vel/rl/policy/purgatory/q_noisy_model.py @@ -3,12 +3,12 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, RlModel, Evaluator +from vel.rl.api import Rollout, RlPolicy, Evaluator from vel.rl.model.q_model import QModelEvaluator from vel.rl.module.q_noisy_head import QNoisyHead -class NoisyQModel(RlModel): +class NoisyQModel(RlPolicy): """ NoisyNets action-value model. Supports only discrete action spaces (ones that can be enumerated) diff --git a/vel/rl/policy/purgatory/stochastic_rnn_policy.py b/vel/rl/policy/purgatory/stochastic_rnn_policy.py index 25551144..256346da 100644 --- a/vel/rl/policy/purgatory/stochastic_rnn_policy.py +++ b/vel/rl/policy/purgatory/stochastic_rnn_policy.py @@ -4,22 +4,22 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Trajectories, Evaluator, RlRnnModel -from vel.rl.module.action_head import StochasticActionHead +from vel.rl.api import Rollout, Trajectories, Evaluator, RlPolicy +from vel.rl.module.stochastic_action_head import StochasticActionHead from vel.rl.module.value_head import ValueHead class StochasticPolicyRnnEvaluator(Evaluator): """ Evaluate recurrent model from initial state """ - def __init__(self, model: 'StochasticPolicyRnnModel', rollout: Rollout): + def __init__(self, model: 'StochasticRnnPolicy', rollout: Rollout): assert isinstance(rollout, Trajectories), "For an RNN model, we must evaluate trajectories" super().__init__(rollout) self.model = model observation_trajectories = rollout.transition_tensors['observations'] - hidden_state = rollout.rollout_tensors['initial_hidden_state'] + hidden_state = rollout.transition_tensors['state'][0] # Initial hidden state action_accumulator = [] value_accumulator = [] @@ -45,10 +45,10 @@ def model_action_logprobs(self): @Evaluator.provides('model:entropy') def model_entropy(self): policy_params = self.get('model:policy_params') - return self.model.entropy(policy_params) + return self.model.action_head.entropy(policy_params) -class StochasticPolicyRnnModel(RlRnnModel): +class StochasticRnnPolicy(RlPolicy): """ Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone RNN version @@ -61,6 +61,8 @@ def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, self.input_block = input_block self.backbone = backbone + assert self.backbone.is_stateful, "Must have a stateful backbone" + self.action_head = StochasticActionHead( action_space=action_space, input_dim=self.backbone.output_dim @@ -70,9 +72,9 @@ def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, assert self.backbone.is_stateful, "Backbone must be a recurrent model" @property - def state_dim(self) -> int: - """ Dimension of model state """ - return self.backbone.state_dim + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return True def reset_weights(self): """ Initialize properly model weights """ @@ -91,9 +93,9 @@ def forward(self, observations, state): return action_output, value_output, new_state - def step(self, observations, state, deterministic=False): + def act(self, observation, state=None, deterministic=False) -> dict: """ Select actions based on model's output """ - action_pd_params, value_output, new_state = self(observations, state) + action_pd_params, value_output, new_state = self(observation, state) actions = self.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action @@ -110,25 +112,26 @@ def evaluate(self, rollout: Rollout) -> Evaluator: """ Evaluate model on a rollout """ return StochasticPolicyRnnEvaluator(self, rollout) - def logprob(self, action_sample, policy_params): - """ Calculate - log(prob) of selected actions """ - return self.action_head.logprob(action_sample, policy_params) - - def value(self, observations, state): + def value(self, observation, state=None): """ Calculate only value head for given state """ - input_data = self.input_block(observations) + input_data = self.input_block(observation) base_output, new_state = self.backbone(input_data, state) value_output = self.value_head(base_output) return value_output - def entropy(self, action_pd_params): - """ Entropy of a probability distribution """ - return self.action_head.entropy(action_pd_params) + def reset_state(self, state, dones): + """ Reset the state after the episode has been terminated """ + if (dones > 0).any().item(): + zero_state = self.backbone.zero_state(dones.shape[0]).to(state.device) + dones_expanded = dones.unsqueeze(-1) + return state * (1 - dones_expanded) + zero_state * dones_expanded + else: + return state -class PolicyGradientRnnModelFactory(ModelFactory): +class StochasticRnnPolicyFactory(ModelFactory): """ Factory class for policy gradient models """ def __init__(self, input_block: ModelFactory, backbone: ModelFactory): self.input_block = input_block @@ -139,7 +142,7 @@ def instantiate(self, **extra_args): input_block = self.input_block.instantiate() backbone = self.backbone.instantiate(**extra_args) - return StochasticPolicyRnnModel(input_block, backbone, extra_args['action_space']) + return StochasticRnnPolicy(input_block, backbone, extra_args['action_space']) def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): @@ -147,7 +150,7 @@ def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = if input_block is None: input_block = IdentityFactory() - return PolicyGradientRnnModelFactory( + return StochasticRnnPolicyFactory( input_block=input_block, backbone=backbone ) diff --git a/vel/rl/algo/policy_gradient/__init__.py b/vel/rl/policy/semipurgatory/__init__.py similarity index 100% rename from vel/rl/algo/policy_gradient/__init__.py rename to vel/rl/policy/semipurgatory/__init__.py diff --git a/vel/rl/algo/policy_gradient/a2c.py b/vel/rl/policy/semipurgatory/a2c_rnn.py similarity index 92% rename from vel/rl/algo/policy_gradient/a2c.py rename to vel/rl/policy/semipurgatory/a2c_rnn.py index fdbbbb61..fc38671a 100644 --- a/vel/rl/algo/policy_gradient/a2c.py +++ b/vel/rl/policy/semipurgatory/a2c_rnn.py @@ -9,8 +9,8 @@ class A2CPolicyGradient(OptimizerAlgoBase): """ Simplest policy gradient - calculate loss as an advantage of an actor versus value function """ - def __init__(self, entropy_coefficient, value_coefficient, max_grad_norm, discount_factor: float, gae_lambda=1.0): - super().__init__(max_grad_norm) + def __init__(self, entropy_coefficient, value_coefficient, discount_factor: float, gae_lambda=1.0): + super().__init__() self.entropy_coefficient = entropy_coefficient self.value_coefficient = value_coefficient @@ -82,12 +82,11 @@ def metrics(self) -> list: ] -def create(entropy_coefficient, value_coefficient, max_grad_norm, discount_factor, gae_lambda=1.0): +def create(entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): """ Vel factory function """ return A2CPolicyGradient( entropy_coefficient, value_coefficient, - max_grad_norm, discount_factor, gae_lambda ) diff --git a/vel/rl/algo/policy_gradient/acer.py b/vel/rl/policy/semipurgatory/acer.py similarity index 100% rename from vel/rl/algo/policy_gradient/acer.py rename to vel/rl/policy/semipurgatory/acer.py diff --git a/vel/rl/algo/policy_gradient/ddpg.py b/vel/rl/policy/semipurgatory/ddpg.py similarity index 100% rename from vel/rl/algo/policy_gradient/ddpg.py rename to vel/rl/policy/semipurgatory/ddpg.py diff --git a/vel/rl/algo/dqn.py b/vel/rl/policy/semipurgatory/dqn.py similarity index 100% rename from vel/rl/algo/dqn.py rename to vel/rl/policy/semipurgatory/dqn.py diff --git a/vel/rl/algo/policy_gradient/ppo.py b/vel/rl/policy/semipurgatory/ppo_rnn.py similarity index 100% rename from vel/rl/algo/policy_gradient/ppo.py rename to vel/rl/policy/semipurgatory/ppo_rnn.py diff --git a/vel/rl/policy/stochastic_policy.py b/vel/rl/policy/stochastic_policy.py index 6ecabc12..23db9d13 100644 --- a/vel/rl/policy/stochastic_policy.py +++ b/vel/rl/policy/stochastic_policy.py @@ -1,40 +1,13 @@ import gym -import torch import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Evaluator, Policy from vel.rl.module.stochastic_action_head import StochasticActionHead from vel.rl.module.value_head import ValueHead -class StochasticPolicyEvaluator(Evaluator): - """ Evaluator for a policy gradient model """ - - def __init__(self, model: 'StochasticPolicy', rollout: Rollout): - super().__init__(rollout) - - self.model = model - - pd_params, estimated_values = model(self.rollout.batch_tensor('observations')) - - self.provide('model:pd_params', pd_params) - self.provide('model:values', estimated_values) - - @Evaluator.provides('model:action:logprobs') - def model_action_logprobs(self): - actions = self.get('rollout:actions') - pd_params = self.get('model:pd_params') - return self.model.action_head.logprob(actions, pd_params) - - @Evaluator.provides('model:entropy') - def model_entropy(self): - pd_params = self.get('model:pd_params') - return self.model.action_head.entropy(pd_params) - - -class StochasticPolicy(Policy): +class StochasticPolicy(BackboneModel): """ Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone """ @@ -74,30 +47,6 @@ def forward(self, observation): return action_output, value_output - def act(self, observation, state=None, deterministic=False): - """ Select actions based on model's output """ - action_pd_params, value_output = self(observation) - actions = self.action_head.sample(action_pd_params, deterministic=deterministic) - - # log likelihood of selected action - logprobs = self.action_head.logprob(actions, action_pd_params) - - return { - 'actions': actions, - 'values': value_output, - 'action:logprobs': logprobs - } - - def value(self, observation, state=None) -> torch.tensor: - """ Calculate value only - small optimization """ - input_data = self.input_block(observation) - base_output = self.backbone(input_data) - return self.value_head(base_output) - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return StochasticPolicyEvaluator(self, rollout) - class StochasticPolicyFactory(ModelFactory): """ Factory class for policy gradient models """ diff --git a/vel/rl/policy/purgatory/stochastic_policy_model_separate.py b/vel/rl/policy/stochastic_policy_separate.py similarity index 67% rename from vel/rl/policy/purgatory/stochastic_policy_model_separate.py rename to vel/rl/policy/stochastic_policy_separate.py index 3044459e..afced37c 100644 --- a/vel/rl/policy/purgatory/stochastic_policy_model_separate.py +++ b/vel/rl/policy/stochastic_policy_separate.py @@ -1,16 +1,13 @@ import gym -import itertools as it import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, RlModel, Evaluator -from vel.rl.module.action_head import StochasticActionHead +from vel.rl.module.stochastic_action_head import StochasticActionHead from vel.rl.module.value_head import ValueHead -from vel.rl.model.stochastic_policy_model import StochasticPolicyEvaluator -class StochasticPolicyModelSeparate(RlModel): +class StochasticPolicyModelSeparate(BackboneModel): """ Policy gradient model class with an actor and critic heads that don't share a backbone """ @@ -53,29 +50,7 @@ def forward(self, observations): return action_output, value_output - def step(self, observation, deterministic=False): - """ Select actions based on model's output """ - policy_params, values = self(observation) - actions = self.action_head.sample(policy_params, deterministic=deterministic) - - # log likelihood of selected action - logprobs = self.action_head.logprob(actions, policy_params) - - return { - 'actions': actions, - 'values': values, - 'action:logprobs': logprobs - } - - def policy_parameters(self): - """ Parameters of policy """ - return it.chain(self.policy_backbone.parameters(), self.action_head.parameters()) - - def logprob(self, action_sample, policy_params): - """ Calculate - log(prob) of selected actions """ - return self.action_head.logprob(action_sample, policy_params) - - def value(self, observations): + def value(self, observations, state=None): """ Calculate only value head for given state """ input_data = self.input_block(observations) base_output = self.value_backbone(input_data) @@ -89,18 +64,6 @@ def policy(self, observations): policy_params = self.action_head(policy_base_output) return policy_params - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return StochasticPolicyEvaluator(self, rollout) - - def entropy(self, policy_params): - """ Entropy of a probability distribution """ - return self.action_head.entropy(policy_params) - - def kl_divergence(self, pd_q, pd_p): - """ Calculate KL-divergence between two probability distributions """ - return self.action_head.kl_divergence(pd_q, pd_p) - class StochasticPolicyModelSeparateFactory(ModelFactory): """ Factory class for policy gradient models """ diff --git a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py index 4faa513f..f80694dc 100644 --- a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py @@ -7,7 +7,7 @@ from vel.api import TrainingInfo, EpochInfo, BatchInfo, Model, ModelFactory from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( - ReinforcerBase, ReinforcerFactory, VecEnvFactory, ReplayEnvRollerBase, AlgoBase, ReplayEnvRollerFactoryBase + Reinforcer, ReinforcerFactory, VecEnvFactory, ReplayEnvRollerBase, AlgoBase, ReplayEnvRollerFactoryBase ) from vel.rl.metrics import ( FPSMetric, EpisodeLengthMetric, EpisodeRewardMetricQuantile, EpisodeRewardMetric, FramesMetric @@ -22,7 +22,7 @@ class BufferedMixedPolicyIterationReinforcerSettings: stochastic_experience_replay: bool = True -class BufferedMixedPolicyIterationReinforcer(ReinforcerBase): +class BufferedMixedPolicyIterationReinforcer(Reinforcer): """ A 'mixed' reinforcer that does both, on-policy learning from environment rollouts and off-policy learning from a replay buffer. @@ -57,19 +57,19 @@ def metrics(self) -> list: return my_metrics + self.algo.metrics() + self.env_roller.metrics() @property - def model(self) -> Model: + def policy(self) -> Model: """ Model trained by this reinforcer """ return self._trained_model def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): """ Prepare models for training """ if model_state is not None: - self.model.load_state_dict(model_state) + self.policy.load_state_dict(model_state) else: - self.model.reset_weights() + self.policy.reset_weights() self.algo.initialize( - training_info=training_info, model=self.model, environment=self.environment, device=self.device + training_info=training_info, model=self.policy, environment=self.environment, device=self.device ) def train_epoch(self, epoch_info: EpochInfo, interactive=True): @@ -111,14 +111,14 @@ def train_batch(self, batch_info: BatchInfo): def on_policy_train_batch(self, batch_info: BatchInfo): """ Perform an 'on-policy' training step of evaluating an env and a single backpropagation step """ - self.model.train() + self.policy.train() - rollout = self.env_roller.rollout(batch_info, self.model, self.settings.number_of_steps).to_device(self.device) + rollout = self.env_roller.rollout(batch_info, self.policy, self.settings.number_of_steps).to_device(self.device) batch_result = self.algo.optimize( batch_info=batch_info, device=self.device, - model=self.model, + model=self.policy, rollout=rollout ) @@ -128,14 +128,14 @@ def on_policy_train_batch(self, batch_info: BatchInfo): def off_policy_train_batch(self, batch_info: BatchInfo): """ Perform an 'off-policy' training step of sampling the replay buffer and gradient descent """ - self.model.train() + self.policy.train() - rollout = self.env_roller.sample(batch_info, self.model, self.settings.number_of_steps).to_device(self.device) + rollout = self.env_roller.sample(batch_info, self.policy, self.settings.number_of_steps).to_device(self.device) batch_result = self.algo.optimize( batch_info=batch_info, device=self.device, - model=self.model, + model=self.policy, rollout=rollout ) @@ -155,7 +155,7 @@ def __init__(self, settings, env_factory: VecEnvFactory, model_factory: ModelFac self.algo = algo self.seed = seed - def instantiate(self, device: torch.device) -> ReinforcerBase: + def instantiate(self, device: torch.device) -> Reinforcer: env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, seed=self.seed) model = self.model_factory.instantiate(action_space=env.action_space) env_roller = self.env_roller_factory.instantiate(env, device) diff --git a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py index dbef9bd2..be04f2d7 100644 --- a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py @@ -7,7 +7,7 @@ from vel.api import TrainingInfo, EpochInfo, BatchInfo, Model, ModelFactory from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( - ReinforcerBase, ReinforcerFactory, ReplayEnvRollerBase, AlgoBase, VecEnvFactory, ReplayEnvRollerFactoryBase + Reinforcer, ReinforcerFactory, ReplayEnvRollerBase, AlgoBase, VecEnvFactory, ReplayEnvRollerFactoryBase ) from vel.rl.metrics import ( FPSMetric, EpisodeLengthMetric, EpisodeRewardMetricQuantile, EpisodeRewardMetric, FramesMetric, @@ -26,7 +26,7 @@ class BufferedOffPolicyIterationReinforcerSettings: training_rounds: int = 1 -class BufferedOffPolicyIterationReinforcer(ReinforcerBase): +class BufferedOffPolicyIterationReinforcer(Reinforcer): """ An off-policy reinforcer that rolls out environment and stores transitions in a buffer. Afterwards, it samples experience batches from this buffer to train the policy. @@ -56,18 +56,18 @@ def metrics(self) -> list: return my_metrics + self.algo.metrics() + self.env_roller.metrics() @property - def model(self) -> Model: + def policy(self) -> Model: return self._trained_model def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): """ Prepare models for training """ if model_state is not None: - self.model.load_state_dict(model_state) + self.policy.load_state_dict(model_state) else: - self.model.reset_weights() + self.policy.reset_weights() self.algo.initialize( - training_info=training_info, model=self.model, environment=self.environment, device=self.device + training_info=training_info, model=self.policy, environment=self.environment, device=self.device ) def train_epoch(self, epoch_info: EpochInfo, interactive=True) -> None: @@ -108,10 +108,10 @@ def train_batch(self, batch_info: BatchInfo) -> None: def roll_out_and_store(self, batch_info): """ Roll out environment and store result in the replay buffer """ - self.model.train() + self.policy.train() if self.env_roller.is_ready_for_sampling(): - rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps) + rollout = self.env_roller.rollout(batch_info, self.policy, self.settings.rollout_steps) rollout = rollout.to_device(self.device) # Store some information about the rollout, no training phase @@ -123,7 +123,7 @@ def roll_out_and_store(self, batch_info): with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar: while not self.env_roller.is_ready_for_sampling(): - rollout = self.env_roller.rollout(batch_info, self.model, self.settings.rollout_steps) + rollout = self.env_roller.rollout(batch_info, self.policy, self.settings.rollout_steps) rollout = rollout.to_device(self.device) new_frames = rollout.frames() @@ -138,18 +138,18 @@ def roll_out_and_store(self, batch_info): def train_on_replay_memory(self, batch_info): """ Train agent on a memory gotten from replay buffer """ - self.model.train() + self.policy.train() # Algo will aggregate data into this list: batch_info['sub_batch_data'] = [] for i in range(self.settings.training_rounds): - sampled_rollout = self.env_roller.sample(batch_info, self.model, self.settings.training_steps) + sampled_rollout = self.env_roller.sample(batch_info, self.policy, self.settings.training_steps) batch_result = self.algo.optimize( batch_info=batch_info, device=self.device, - model=self.model, + model=self.policy, rollout=sampled_rollout.to_device(self.device) ) diff --git a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py index d9ff7ab9..93096a6b 100644 --- a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py @@ -4,10 +4,10 @@ import torch import tqdm -from vel.api import Model, ModelFactory, TrainingInfo, EpochInfo, BatchInfo +from vel.api import ModelFactory, TrainingInfo, EpochInfo, BatchInfo from vel.rl.api import ( - ReinforcerBase, ReinforcerFactory, VecEnvFactory, EnvRollerFactoryBase, EnvRollerBase, AlgoBase, - Policy + Reinforcer, ReinforcerFactory, VecEnvFactory, EnvRollerFactoryBase, EnvRollerBase, + RlPolicy ) from vel.rl.metrics import ( FPSMetric, EpisodeLengthMetric, EpisodeRewardMetricQuantile, @@ -30,20 +30,18 @@ class OnPolicyIterationReinforcerSettings: shuffle_transitions: bool = True -class OnPolicyIterationReinforcer(ReinforcerBase): +class OnPolicyIterationReinforcer(Reinforcer): """ A reinforcer that calculates on-policy environment rollouts and uses them to train policy directly. May split the sample into multiple batches and may replay batches a few times. """ - def __init__(self, device: torch.device, settings: OnPolicyIterationReinforcerSettings, policy: Policy, - algo: AlgoBase, env_roller: EnvRollerBase) -> None: + def __init__(self, device: torch.device, settings: OnPolicyIterationReinforcerSettings, policy: RlPolicy, + env_roller: EnvRollerBase) -> None: self.device = device self.settings = settings - self.env_roller = env_roller - self.algo = algo - self._trained_model = policy.to(self.device) + self._model: RlPolicy = policy.to(self.device) def metrics(self) -> list: """ List of metrics to track for this learning process """ @@ -56,23 +54,19 @@ def metrics(self) -> list: EpisodeLengthMetric("episode_length"), ] - return my_metrics + self.algo.metrics() + self.env_roller.metrics() + self.model.metrics() + return my_metrics + self.env_roller.metrics() + self.policy.metrics() @property - def model(self) -> Model: + def policy(self) -> RlPolicy: """ Model trained by this reinforcer """ - return self._trained_model + return self._model def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): """ Prepare models for training """ if model_state is not None: - self.model.load_state_dict(model_state) + self.policy.load_state_dict(model_state) else: - self.model.reset_weights() - - self.algo.initialize( - training_info=training_info, model=self.model, environment=self.env_roller.environment, device=self.device - ) + self.policy.reset_weights() def train_epoch(self, epoch_info: EpochInfo, interactive=True) -> None: """ Train model on an epoch of a fixed number of batch updates """ @@ -97,18 +91,18 @@ def train_batch(self, batch_info: BatchInfo) -> None: """ Batch - the most atomic unit of learning. - For this reinforforcer, that involves: + For this reinforcer, that involves: 1. Roll out the environmnent using current policy 2. Use that rollout to train the policy """ # Calculate environment rollout on the evaluation version of the model - self.model.train() + self.policy.train() rollout = self.env_roller.rollout(batch_info, self.settings.number_of_steps) - # Process rollout by the 'algo' (e.g. perform the advantage estimation) - rollout = self.algo.process_rollout(batch_info, rollout) + # Preprocessing of the rollout for this algorithm + rollout = self.policy.process_rollout(rollout) # Perform the training step # Algo will aggregate data into this list: @@ -119,7 +113,7 @@ def train_batch(self, batch_info: BatchInfo) -> None: if self.settings.stochastic_experience_replay: # Always play experience at least once - experience_replay_count = max(np.random.poisson(self.settings.experience_replay), 1) + experience_replay_count = 1 + np.random.poisson(self.settings.experience_replay - 1) else: experience_replay_count = self.settings.experience_replay @@ -127,25 +121,22 @@ def train_batch(self, batch_info: BatchInfo) -> None: for i in range(experience_replay_count): # We may potentially need to split rollout into multiple batches if self.settings.batch_size >= rollout.frames(): - batch_result = self.algo.optimize( + metrics = self.policy.optimize( batch_info=batch_info, - device=self.device, - model=self.model, - rollout=rollout.to_device(self.device) + rollout=rollout.to_device(self.device), ) - batch_info['sub_batch_data'].append(batch_result) + batch_info['sub_batch_data'].append(metrics) else: # Rollout too big, need to split in batches for batch_rollout in rollout.shuffled_batches(self.settings.batch_size): - batch_result = self.algo.optimize( + + metrics = self.policy.optimize( batch_info=batch_info, - device=self.device, - model=self.model, - rollout=batch_rollout.to_device(self.device) + rollout=batch_rollout.to_device(self.device), ) - batch_info['sub_batch_data'].append(batch_result) + batch_info['sub_batch_data'].append(metrics) batch_info['frames'] = rollout.frames() batch_info['episode_infos'] = rollout.episode_information() @@ -157,24 +148,23 @@ def train_batch(self, batch_info: BatchInfo) -> None: class OnPolicyIterationReinforcerFactory(ReinforcerFactory): """ Vel factory class for the PolicyGradientReinforcer """ def __init__(self, settings, parallel_envs: int, env_factory: VecEnvFactory, model_factory: ModelFactory, - algo: AlgoBase, env_roller_factory: EnvRollerFactoryBase, seed: int): + env_roller_factory: EnvRollerFactoryBase, seed: int): self.settings = settings self.parallel_envs = parallel_envs self.env_factory = env_factory self.model_factory = model_factory - self.algo = algo self.env_roller_factory = env_roller_factory self.seed = seed - def instantiate(self, device: torch.device) -> ReinforcerBase: + def instantiate(self, device: torch.device) -> Reinforcer: env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, seed=self.seed) policy = self.model_factory.instantiate(action_space=env.action_space) env_roller = self.env_roller_factory.instantiate(environment=env, policy=policy, device=device) - return OnPolicyIterationReinforcer(device, self.settings, policy, self.algo, env_roller) + return OnPolicyIterationReinforcer(device, self.settings, policy, env_roller) -def create(model_config, model, vec_env, algo, env_roller, parallel_envs, number_of_steps, +def create(model_config, model, vec_env, env_roller, parallel_envs, number_of_steps, batch_size=256, experience_replay=1, stochastic_experience_replay=False, shuffle_transitions=True): """ Vel factory function """ settings = OnPolicyIterationReinforcerSettings( @@ -190,7 +180,6 @@ def create(model_config, model, vec_env, algo, env_roller, parallel_envs, number parallel_envs=parallel_envs, env_factory=vec_env, model_factory=model, - algo=algo, env_roller_factory=env_roller, seed=model_config.seed ) diff --git a/vel/rl/test/test_integration.py b/vel/rl/test/test_integration.py index ed73a972..dfac3a61 100644 --- a/vel/rl/test/test_integration.py +++ b/vel/rl/test/test_integration.py @@ -100,7 +100,7 @@ def test_a2c_breakout(): ) # Model optimizer - optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3) + optimizer = optim.RMSprop(reinforcer.policy.parameters(), lr=7.0e-4, eps=1e-3) # Overall information store for training information training_info = TrainingInfo( @@ -182,7 +182,7 @@ def test_ppo_breakout(): # Model optimizer # optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3) - optimizer = optim.Adam(reinforcer.model.parameters(), lr=2.5e-4, eps=1e-5) + optimizer = optim.Adam(reinforcer.policy.parameters(), lr=2.5e-4, eps=1e-5) # Overall information store for training information training_info = TrainingInfo( diff --git a/vel/rl/util/actor.py b/vel/rl/util/actor.py index a858c4a7..76fa9d94 100644 --- a/vel/rl/util/actor.py +++ b/vel/rl/util/actor.py @@ -1,13 +1,13 @@ import torch -from vel.rl.api import Policy +from vel.rl.api import RlPolicy from vel.util.tensor_util import to_device class PolicyActor: """ Evaluates policy on a fixed set of environments. Additionally tracks the state """ - def __init__(self, num_envs: int, policy: Policy, device: torch.device): + def __init__(self, num_envs: int, policy: RlPolicy, device: torch.device): self.num_envs = num_envs self.policy = policy.to(device) self.device = device From b5a068ee059578a874ada072b5252ba32fdefa40 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 26 Sep 2019 23:04:51 -0700 Subject: [PATCH 099/162] PPO and A2C RNN policies. --- .../atari/{purgatory => }/atari_a2c_lstm.yaml | 45 ++-- .../atari/{purgatory => }/atari_ppo_gru.yaml | 47 ++-- vel/rl/algo/a2c.py | 9 +- vel/rl/algo/a2c_rnn.py | 173 ++++++++++++++ vel/rl/algo/ppo_rnn.py | 220 ++++++++++++++++++ .../semipurgatory}/distributional_dqn.py | 0 .../{purgatory => }/stochastic_rnn_policy.py | 65 +----- 7 files changed, 449 insertions(+), 110 deletions(-) rename examples-configs/rl/atari/{purgatory => }/atari_a2c_lstm.yaml (53%) rename examples-configs/rl/atari/{purgatory => }/atari_ppo_gru.yaml (62%) create mode 100644 vel/rl/algo/a2c_rnn.py create mode 100644 vel/rl/algo/ppo_rnn.py rename vel/rl/{algo => policy/semipurgatory}/distributional_dqn.py (100%) rename vel/rl/policy/{purgatory => }/stochastic_rnn_policy.py (57%) diff --git a/examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml b/examples-configs/rl/atari/atari_a2c_lstm.yaml similarity index 53% rename from examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml rename to examples-configs/rl/atari/atari_a2c_lstm.yaml index f83f5c50..4db60264 100644 --- a/examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml +++ b/examples-configs/rl/atari/atari_a2c_lstm.yaml @@ -12,28 +12,28 @@ vec_env: model: - name: vel.rl.models.stochastic_policy_rnn_model + name: vel.rl.algo.a2c_rnn - input_block: - name: vel.modules.input.image_to_tensor + entropy_coefficient: 0.01 + value_coefficient: 0.5 + discount_factor: 0.99 - backbone: - name: vel.rl.models.backbone.nature_cnn_rnn - input_width: 84 - input_height: 84 - input_channels: 1 # The same as frame_history - rnn_type: 'lstm' + policy: + name: vel.rl.policy.stochastic_rnn_policy + input_block: + name: vel.module.input.image_to_tensor + + backbone: + name: vel.rl.backbone.nature_cnn_rnn + input_width: 84 + input_height: 84 + input_channels: 1 # The same as frame_history + rnn_type: 'lstm' -reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 +reinforcer: + name: vel.rl.reinforcer.on_policy_iteration_reinforcer env_roller: name: vel.rl.env_roller.step_env_roller @@ -45,28 +45,29 @@ reinforcer: optimizer: - name: vel.optimizers.rmsprop + name: vel.optimizer.rmsprop lr: 7.0e-4 alpha: 0.99 epsilon: 1.0e-3 + max_grad_norm: 0.5 commands: train: - name: vel.rl.commands.rl_train_command + name: vel.rl.command.rl_train_command total_frames: 1.1e7 batches_per_epoch: 100 record: - name: vel.rl.commands.record_movie_command + name: vel.rl.command.record_movie_command takes: 10 videoname: 'atari_vid_{:04}.avi' evaluate: - name: vel.rl.commands.evaluate_env_command + name: vel.rl.command.evaluate_env_command parallel_envs: 16 # How many environments to run in parallel takes: 20 visdom: - name: vel.commands.vis_store_command + name: vel.command.vis_store_command diff --git a/examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml b/examples-configs/rl/atari/atari_ppo_gru.yaml similarity index 62% rename from examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml rename to examples-configs/rl/atari/atari_ppo_gru.yaml index 27303c92..afea6850 100644 --- a/examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml +++ b/examples-configs/rl/atari/atari_ppo_gru.yaml @@ -11,39 +11,37 @@ vec_env: model: - name: vel.rl.policy.stochastic_rnn_policy + name: vel.rl.algo.ppo_rnn - input_block: - name: vel.module.input.image_to_tensor + entropy_coefficient: 0.01 + value_coefficient: 0.5 - backbone: - name: vel.rl.backbone.nature_cnn_rnn - rnn_type: 'gru' - hidden_units: 512 + discount_factor: 0.99 # Discount factor for the rewards + gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - input_width: 84 - input_height: 84 - input_channels: 1 # The same as frame_history + cliprange: + name: vel.function.linear + initial_value: 0.1 + final_value: 0.0 + policy: + name: vel.rl.policy.stochastic_rnn_policy -reinforcer: - name: vel.rl.reinforcer.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.ppo + input_block: + name: vel.module.input.image_to_tensor - entropy_coefficient: 0.01 - value_coefficient: 0.5 + backbone: + name: vel.rl.backbone.nature_cnn_rnn + rnn_type: 'gru' + hidden_units: 512 - discount_factor: 0.99 # Discount factor for the rewards - gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter + input_width: 84 + input_height: 84 + input_channels: 1 # The same as frame_history - max_grad_norm: 0.5 # Gradient clipping parameter - cliprange: - name: vel.function.linear - initial_value: 0.1 - final_value: 0.0 +reinforcer: + name: vel.rl.reinforcer.on_policy_iteration_reinforcer env_roller: name: vel.rl.env_roller.step_env_roller @@ -60,6 +58,7 @@ optimizer: name: vel.optimizer.adam lr: 2.5e-4 epsilon: 1.0e-5 + max_grad_norm: 0.5 # Gradient clipping parameter scheduler: diff --git a/vel/rl/algo/a2c.py b/vel/rl/algo/a2c.py index 69b7926d..4fd529c7 100644 --- a/vel/rl/algo/a2c.py +++ b/vel/rl/algo/a2c.py @@ -21,17 +21,20 @@ def __init__(self, policy: BackboneModel, entropy_coefficient, value_coefficient self.policy = policy + assert not self.policy.is_stateful, "For stateful policies, try A2CRnn" + def reset_weights(self): """ Initialize properly model weights """ self.policy.reset_weights() - def forward(self, observation): + def forward(self, observation, state=None): """ Calculate model outputs """ - return self.policy(observation) + return self.policy(observation, state=state) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ - action_pd_params, value_output = self(observation) + action_pd_params, value_output = self(observation, state=state) + actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action diff --git a/vel/rl/algo/a2c_rnn.py b/vel/rl/algo/a2c_rnn.py new file mode 100644 index 00000000..523b2f13 --- /dev/null +++ b/vel/rl/algo/a2c_rnn.py @@ -0,0 +1,173 @@ +import torch +import torch.nn.functional as F + +from vel.metric.base import AveragingNamedMetric +from vel.calc.function import explained_variance +from vel.api import BackboneModel, ModelFactory, BatchInfo + +from vel.rl.api import RlPolicy, Rollout, Trajectories +from vel.rl.discount_bootstrap import discount_bootstrap_gae + + +class A2CRnn(RlPolicy): + """ Simplest policy gradient - calculate loss as an advantage of an actor versus value function """ + def __init__(self, policy: BackboneModel, entropy_coefficient, value_coefficient, discount_factor: float, + gae_lambda=1.0): + super().__init__(discount_factor) + + self.entropy_coefficient = entropy_coefficient + self.value_coefficient = value_coefficient + self.gae_lambda = gae_lambda + + self.policy = policy + + assert self.policy.is_stateful, "Policy must be stateful" + + def reset_weights(self): + """ Initialize properly model weights """ + self.policy.reset_weights() + + def forward(self, observation, state=None): + """ Calculate model outputs """ + return self.policy(observation, state=state) + + def is_stateful(self) -> bool: + return self.policy.is_stateful + + def zero_state(self, batch_size): + return self.policy.zero_state(batch_size) + + def reset_state(self, state, dones): + return self.policy.reset_state(state, dones) + + def act(self, observation, state=None, deterministic=False): + """ Select actions based on model's output """ + action_pd_params, value_output, next_state = self(observation, state=state) + + actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + + # log likelihood of selected action + logprobs = self.policy.action_head.logprob(actions, action_pd_params) + + return { + 'actions': actions, + 'state': next_state, + 'values': value_output, + 'action:logprobs': logprobs + } + + def process_rollout(self, rollout: Rollout) -> Rollout: + """ Process rollout for optimization before any chunking/shuffling """ + assert isinstance(rollout, Trajectories), "A2C requires trajectory rollouts" + + advantages = discount_bootstrap_gae( + rewards_buffer=rollout.transition_tensors['rewards'], + dones_buffer=rollout.transition_tensors['dones'], + values_buffer=rollout.transition_tensors['values'], + final_values=rollout.rollout_tensors['final_values'], + discount_factor=self.discount_factor, + gae_lambda=self.gae_lambda, + number_of_steps=rollout.num_steps + ) + + returns = advantages + rollout.transition_tensors['values'] + + rollout.transition_tensors['advantages'] = advantages + rollout.transition_tensors['returns'] = returns + + return rollout + + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ Calculate loss of the supplied rollout """ + assert isinstance(rollout, Trajectories), "For an RNN model, we must evaluate trajectories" + + # rollout values + actions = rollout.batch_tensor('actions') + advantages = rollout.batch_tensor('advantages') + returns = rollout.batch_tensor('returns') + rollout_values = rollout.batch_tensor('values') + + # Let's evaluate the model + observations = rollout.transition_tensors['observations'] + hidden_state = rollout.transition_tensors['state'][0] # Initial hidden state + dones = rollout.transition_tensors['dones'] + + action_accumulator = [] + value_accumulator = [] + + # Evaluate recurrent network step by step + for i in range(observations.size(0)): + action_output, value_output, hidden_state = self(observations[i], hidden_state) + hidden_state = self.reset_state(hidden_state, dones[i]) + + action_accumulator.append(action_output) + value_accumulator.append(value_output) + + pd_params = torch.cat(action_accumulator, dim=0) + model_values = torch.cat(value_accumulator, dim=0) + + log_probs = self.policy.action_head.logprob(actions, pd_params) + entropy = self.policy.action_head.entropy(pd_params) + + # Actual calculations. Pretty trivial + policy_loss = -torch.mean(advantages * log_probs) + value_loss = 0.5 * F.mse_loss(model_values, returns) + policy_entropy = torch.mean(entropy) + + loss_value = ( + policy_loss - self.entropy_coefficient * policy_entropy + self.value_coefficient * value_loss + ) + + loss_value.backward() + + return { + 'policy_loss': policy_loss.item(), + 'value_loss': value_loss.item(), + 'policy_entropy': policy_entropy.item(), + 'advantage_norm': torch.norm(advantages).item(), + 'explained_variance': explained_variance(returns, rollout_values) + } + + def metrics(self) -> list: + """ List of metrics to track for this learning process """ + return [ + AveragingNamedMetric("value_loss", scope="model"), + AveragingNamedMetric("policy_entropy", scope="model"), + AveragingNamedMetric("policy_loss", scope="model"), + AveragingNamedMetric("advantage_norm", scope="model"), + AveragingNamedMetric("explained_variance", scope="model") + ] + + +class A2CRnnFactory(ModelFactory): + """ Factory class for policy gradient models """ + def __init__(self, policy, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): + self.policy = policy + self.entropy_coefficient = entropy_coefficient + self.value_coefficient = value_coefficient + self.discount_factor = discount_factor + self.gae_lambda = gae_lambda + + def instantiate(self, **extra_args): + """ Instantiate the model """ + # action_space = extra_args.pop('action_space') + policy = self.policy.instantiate(**extra_args) + + return A2CRnn( + policy=policy, + entropy_coefficient=self.entropy_coefficient, + value_coefficient=self.value_coefficient, + discount_factor=self.discount_factor, + gae_lambda=self.gae_lambda + ) + + +def create(policy: BackboneModel, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): + """ Vel factory function """ + return A2CRnnFactory( + policy=policy, + entropy_coefficient=entropy_coefficient, + value_coefficient=value_coefficient, + discount_factor=discount_factor, + gae_lambda=gae_lambda + ) diff --git a/vel/rl/algo/ppo_rnn.py b/vel/rl/algo/ppo_rnn.py new file mode 100644 index 00000000..76c2daad --- /dev/null +++ b/vel/rl/algo/ppo_rnn.py @@ -0,0 +1,220 @@ +import torch + +import numbers + +from vel.api import BackboneModel, BatchInfo, ModelFactory +from vel.calc.function import explained_variance +from vel.function.constant import ConstantSchedule +from vel.metric.base import AveragingNamedMetric + +from vel.rl.api import RlPolicy, Rollout, Trajectories +from vel.rl.discount_bootstrap import discount_bootstrap_gae + + +class PPORnn(RlPolicy): + """ Proximal Policy Optimization - https://arxiv.org/abs/1707.06347 """ + def __init__(self, policy: BackboneModel, + entropy_coefficient, value_coefficient, cliprange, discount_factor: float, + normalize_advantage: bool = True, gae_lambda: float = 1.0): + super().__init__(discount_factor) + + self.entropy_coefficient = entropy_coefficient + self.value_coefficient = value_coefficient + self.normalize_advantage = normalize_advantage + self.gae_lambda = gae_lambda + + if isinstance(cliprange, numbers.Number): + self.cliprange = ConstantSchedule(cliprange) + else: + self.cliprange = cliprange + + self.policy = policy + + assert self.policy.is_stateful, "Policy must be stateful" + + def reset_weights(self): + """ Initialize properly model weights """ + self.policy.reset_weights() + + def forward(self, observation, state=None): + """ Calculate model outputs """ + return self.policy.forward(observation, state=state) + + def is_stateful(self) -> bool: + return self.policy.is_stateful + + def zero_state(self, batch_size): + return self.policy.zero_state(batch_size) + + def reset_state(self, state, dones): + return self.policy.reset_state(state, dones) + + def act(self, observation, state=None, deterministic=False): + """ Select actions based on model's output """ + action_pd_params, value_output, next_state = self(observation, state=state) + actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + + # log likelihood of selected action + logprobs = self.policy.action_head.logprob(actions, action_pd_params) + + return { + 'actions': actions, + 'values': value_output, + 'state': next_state, + 'action:logprobs': logprobs + } + + def process_rollout(self, rollout: Rollout): + """ Process rollout for optimization before any chunking/shuffling """ + assert isinstance(rollout, Trajectories), "PPO requires trajectory rollouts" + + advantages = discount_bootstrap_gae( + rewards_buffer=rollout.transition_tensors['rewards'], + dones_buffer=rollout.transition_tensors['dones'], + values_buffer=rollout.transition_tensors['values'], + final_values=rollout.rollout_tensors['final_values'], + discount_factor=self.discount_factor, + gae_lambda=self.gae_lambda, + number_of_steps=rollout.num_steps + ) + + returns = advantages + rollout.transition_tensors['values'] + + rollout.transition_tensors['advantages'] = advantages + rollout.transition_tensors['returns'] = returns + + return rollout + + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ Calculate loss of the supplied rollout """ + assert isinstance(rollout, Trajectories), "For an RNN model, we must evaluate trajectories" + + # Part 0.0 - Rollout values + actions = rollout.batch_tensor('actions') + advantages = rollout.batch_tensor('advantages') + returns = rollout.batch_tensor('returns') + rollout_values = rollout.batch_tensor('values') + rollout_action_logprobs = rollout.batch_tensor('action:logprobs') + + # PART 0.1 - Model evaluation + observations = rollout.transition_tensors['observations'] + hidden_state = rollout.transition_tensors['state'][0] # Initial hidden state + dones = rollout.transition_tensors['dones'] + + action_accumulator = [] + value_accumulator = [] + + # Evaluate recurrent network step by step + for i in range(observations.size(0)): + action_output, value_output, hidden_state = self(observations[i], hidden_state) + hidden_state = self.reset_state(hidden_state, dones[i]) + + action_accumulator.append(action_output) + value_accumulator.append(value_output) + + pd_params = torch.cat(action_accumulator, dim=0) + model_values = torch.cat(value_accumulator, dim=0) + + model_action_logprobs = self.policy.action_head.logprob(actions, pd_params) + entropy = self.policy.action_head.entropy(pd_params) + + # Select the cliprange + current_cliprange = self.cliprange.value(batch_info['progress']) + + # Normalize the advantages? + if self.normalize_advantage: + advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) + + # PART 1 - policy entropy + policy_entropy = torch.mean(entropy) + + # PART 2 - value function + value_output_clipped = rollout_values + torch.clamp( + model_values - rollout_values, -current_cliprange, current_cliprange + ) + value_loss_part1 = (model_values - returns).pow(2) + value_loss_part2 = (value_output_clipped - returns).pow(2) + value_loss = 0.5 * torch.mean(torch.max(value_loss_part1, value_loss_part2)) + + # PART 3 - policy gradient loss + ratio = torch.exp(model_action_logprobs - rollout_action_logprobs) + + pg_loss_part1 = -advantages * ratio + pg_loss_part2 = -advantages * torch.clamp(ratio, 1.0 - current_cliprange, 1.0 + current_cliprange) + policy_loss = torch.mean(torch.max(pg_loss_part1, pg_loss_part2)) + + loss_value = ( + policy_loss - self.entropy_coefficient * policy_entropy + self.value_coefficient * value_loss + ) + + loss_value.backward() + + with torch.no_grad(): + approx_kl_divergence = 0.5 * torch.mean((model_action_logprobs - rollout_action_logprobs).pow(2)) + clip_fraction = torch.mean((torch.abs(ratio - 1.0) > current_cliprange).to(dtype=torch.float)) + + return { + 'policy_loss': policy_loss.item(), + 'value_loss': value_loss.item(), + 'policy_entropy': policy_entropy.item(), + 'approx_kl_divergence': approx_kl_divergence.item(), + 'clip_fraction': clip_fraction.item(), + 'advantage_norm': torch.norm(advantages).item(), + 'explained_variance': explained_variance(returns, rollout_values) + } + + def metrics(self) -> list: + """ List of metrics to track for this learning process """ + return [ + AveragingNamedMetric("policy_loss", scope="model"), + AveragingNamedMetric("value_loss", scope="model"), + AveragingNamedMetric("policy_entropy", scope="model"), + AveragingNamedMetric("approx_kl_divergence", scope="model"), + AveragingNamedMetric("clip_fraction", scope="model"), + AveragingNamedMetric("advantage_norm", scope="model"), + AveragingNamedMetric("explained_variance", scope="model") + ] + + +class PPORnnFactory(ModelFactory): + """ Factory class for policy gradient models """ + def __init__(self, policy: BackboneModel, + entropy_coefficient, value_coefficient, cliprange, discount_factor: float, + normalize_advantage: bool = True, gae_lambda: float = 1.0): + self.policy = policy + self.entropy_coefficient = entropy_coefficient + self.value_coefficient = value_coefficient + self.cliprange = cliprange + self.discount_factor = discount_factor + self.normalize_advantage = normalize_advantage + self.gae_lambda = gae_lambda + + def instantiate(self, **extra_args): + """ Instantiate the model """ + policy = self.policy.instantiate(**extra_args) + + return PPORnn( + policy=policy, + entropy_coefficient=self.entropy_coefficient, + value_coefficient=self.value_coefficient, + cliprange=self.cliprange, + discount_factor=self.discount_factor, + normalize_advantage=self.normalize_advantage, + gae_lambda=self.gae_lambda, + ) + + +def create(policy: BackboneModel, + entropy_coefficient, value_coefficient, cliprange, discount_factor: float, + normalize_advantage: bool = True, gae_lambda: float = 1.0): + """ Vel factory function """ + return PPORnnFactory( + policy=policy, + entropy_coefficient=entropy_coefficient, + value_coefficient=value_coefficient, + cliprange=cliprange, + discount_factor=discount_factor, + normalize_advantage=normalize_advantage, + gae_lambda=gae_lambda + ) + diff --git a/vel/rl/algo/distributional_dqn.py b/vel/rl/policy/semipurgatory/distributional_dqn.py similarity index 100% rename from vel/rl/algo/distributional_dqn.py rename to vel/rl/policy/semipurgatory/distributional_dqn.py diff --git a/vel/rl/policy/purgatory/stochastic_rnn_policy.py b/vel/rl/policy/stochastic_rnn_policy.py similarity index 57% rename from vel/rl/policy/purgatory/stochastic_rnn_policy.py rename to vel/rl/policy/stochastic_rnn_policy.py index 256346da..de8754b1 100644 --- a/vel/rl/policy/purgatory/stochastic_rnn_policy.py +++ b/vel/rl/policy/stochastic_rnn_policy.py @@ -1,54 +1,13 @@ import gym -import torch import typing from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Trajectories, Evaluator, RlPolicy from vel.rl.module.stochastic_action_head import StochasticActionHead from vel.rl.module.value_head import ValueHead -class StochasticPolicyRnnEvaluator(Evaluator): - """ Evaluate recurrent model from initial state """ - - def __init__(self, model: 'StochasticRnnPolicy', rollout: Rollout): - assert isinstance(rollout, Trajectories), "For an RNN model, we must evaluate trajectories" - super().__init__(rollout) - - self.model = model - - observation_trajectories = rollout.transition_tensors['observations'] - hidden_state = rollout.transition_tensors['state'][0] # Initial hidden state - - action_accumulator = [] - value_accumulator = [] - - # Evaluate recurrent network step by step - for i in range(observation_trajectories.size(0)): - action_output, value_output, hidden_state = model(observation_trajectories[i], hidden_state) - action_accumulator.append(action_output) - value_accumulator.append(value_output) - - policy_params = torch.cat(action_accumulator, dim=0) - estimated_values = torch.cat(value_accumulator, dim=0) - - self.provide('model:policy_params', policy_params) - self.provide('model:values', estimated_values) - - @Evaluator.provides('model:action:logprobs') - def model_action_logprobs(self): - actions = self.get('rollout:actions') - policy_params = self.get('model:policy_params') - return self.model.action_head.logprob(actions, policy_params) - - @Evaluator.provides('model:entropy') - def model_entropy(self): - policy_params = self.get('model:policy_params') - return self.model.action_head.entropy(policy_params) - - -class StochasticRnnPolicy(RlPolicy): +class StochasticRnnPolicy(BackboneModel): """ Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone RNN version @@ -76,6 +35,9 @@ def is_stateful(self) -> bool: """ If the model has a state that needs to be fed between individual observations """ return True + def zero_state(self, batch_size): + return self.backbone.zero_state(batch_size) + def reset_weights(self): """ Initialize properly model weights """ self.input_block.reset_weights() @@ -93,25 +55,6 @@ def forward(self, observations, state): return action_output, value_output, new_state - def act(self, observation, state=None, deterministic=False) -> dict: - """ Select actions based on model's output """ - action_pd_params, value_output, new_state = self(observation, state) - actions = self.action_head.sample(action_pd_params, deterministic=deterministic) - - # log likelihood of selected action - logprobs = self.action_head.logprob(actions, action_pd_params) - - return { - 'actions': actions, - 'values': value_output, - 'action:logprobs': logprobs, - 'state': new_state - } - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return StochasticPolicyRnnEvaluator(self, rollout) - def value(self, observation, state=None): """ Calculate only value head for given state """ input_data = self.input_block(observation) From 62e82ffb0a0b315577fe465d0213108b7ea43cd4 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 1 Oct 2019 21:26:45 -0700 Subject: [PATCH 100/162] Updated README. --- README.md | 62 +++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 56 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 5f678ffd..60563e3f 100644 --- a/README.md +++ b/README.md @@ -9,12 +9,13 @@ Bring **velocity** to deep-learning research. This project hosts a collection of **highly modular** deep learning components that are tested to be working well together. -A simple yaml-based system ties these modules together declaratively using configuration files. +A simple yaml-based system ties these modules declaratively using configuration files. -This is still an early version and a hobby project so documentation is unfortunately nonexistent. I've tried to make the -code as clear as possible, and provide many usage examples, but whenever there was a tradeoff to be made between -simplicity and modularity I've chosen modularity first and simplicity second. +This is still an early version and a hobby project, so documentation is unfortunately nonexistent. +I've made an effort to make the code clear and provide many usage examples, +but whenever there was a tradeoff to be made between simplicity and modularity +I've chosen modularity first and simplicity second. Therefore, high emphasis is made on interfaces between components. Having conducted a few research projects, I've gathered a small collection of repositories @@ -97,14 +98,14 @@ that are ready to run and easy to modify for other similar usecases: # Implemented models - Reinforcement learning -- Continuous and discrete action spaces +- Support for continuous and discrete environment action spaces - Basic support for recurrent policies for A2C and PPO - Following policy gradient reinforcement learning algorithms: - Advantage Actor-Critic (A2C) - - Deep Deterministic Policy Gradient (DDPG) - Proximal Policy Optimization (PPO) - Trust Region Policy Optimization (TRPO) - Actor-Critic with Experience Replay (ACER) + - Deep Deterministic Policy Gradient (DDPG) - Deep Q-Learning (DQN) as described by DeepMind in their Nature publication with following improvements: - Double DQN @@ -215,6 +216,55 @@ Code quality: - Factor action noise back into the policy +# Directories + +Below I'll list brief explanation about contents of main top-level directories. + +- `docs` - Few markdown documents about the framework +- `examples-configs` - Ready to run configs with tried and tested models, usually heavily inspired by existing + literature. +- `examples-notebooks` - A few examples of how to interact with `vel` from the level of IPython notebook +- `vel` - Root for the Python source of the package + - `vel.api` - Interfaces and base classes used all over the codebase. To be used in source code only and not + referenced from config files. + - `vel.callback` - Definitions of callbacks that can be used in the training process. Can be referenced both by code + and by the config files. + - `vel.command` - Commands that can be used in your configuration files, and there isn't much need to refer to + them from code. + - `vel.data` - Various classes for handling data sources and data transformations. Referenced both by source code + and config files. + - `vel.function` - Interfaces for creating various functions/interpolators, to be refereced by config files. + - `vel.internal` - Functions and classes to be only used by `vel` internally, and not by by user code nor configs. + - `vel.metric` - Code for tracking metrics during training of your models. To be used by both code and configs. + - `vel.model` - Definition of models, which is kind of an end-package that references all other packages. Models + contain most other parts of the pipeline and define a training procedure. + - `vel.module` - Various useful definitions of PyTorch modules, to be used when defining your own `models` and + `layers`. + - `vel.net` - "Network" module that may be referenced by a model to define neural network architecture used. + - `vel.net.layer` - Modular layer system for defining networks declaratively in configuration files. + - `vel.notebook` - Utilities for interfacing with `vel` using IPython notebooks + - `vel.openai` - Imported parts of the codebase of `openai/baselines` that I didn't want to bring as a package + dependency. To be referenced in code mostly. + - `vel.optimizer` - Various implementations of deep learning optimizers. To be referenced mostly by scripts. + - `vel.rl` - Meta package for everything related to Reinforcement Learning + - `vel.rl.api` - Interfaces and base classes to be used for Reinforcement Learning models and other classes. + - `vel.rl.buffer` - All classes relating to experience replay and experience buffers + - `vel.rl.command` - Commands used for RL training + - `vel.rl.env` - Basic reinforcement learning environments, mostly based on OpenAI gym + - `vel.rl.env_roller` - Classes for generating environment rollouts + - `vel.rl.layer` - Layers desined especially for RL + - `vel.rl.module` - PyTorch modules designed for RL + - `vel.rl.policy` - Equivalent of `vel.model` for RL + - `vel.rl.reinforcer` - Reinforcer manages RL training, and corresponds to `Trainer` in Supervised Learning + - `vel.rl.vecenv` - Utilities for vectorizing environments and stepping multiple environments at the same time + - `vel.scheduler` - Classes helping to set up learning rate schedules for the optimizers. To be referenced mostly + by scripts. + - `vel.storage` - Everything about persisting models and metrics. To be referenced mostly by configuration files. + - `vel.train` - Utilities for defining more generic training loops of models. To be referenced in both code and + config. + - `vel.util` - Collection of various utility functions to be used by all other modules. + + # Citing If you use `vel` in your research, you can cite it as follows: From 43369d55959e4f1f4dd32d13fe07b2996e2eb63e Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 1 Oct 2019 23:14:26 -0700 Subject: [PATCH 101/162] Continuing with major net/rl code refactoring. --- examples-configs/rl/atari/atari_a2c.yaml | 21 ++-- examples-configs/rl/atari/atari_ppo.yaml | 20 ++-- .../atari/{ => purgatory}/atari_a2c_lstm.yaml | 0 .../rl/atari/purgatory/atari_ddqn.yaml | 0 .../rl/atari/purgatory/atari_dqn.yaml | 82 +++++++++++++ .../atari/{ => purgatory}/atari_ppo_gru.yaml | 0 .../rl/atari/purgatory/atari_rainbow.yaml | 0 .../rl/atari/{ => purgatory}/atari_trpo.yaml | 0 vel/api/__init__.py | 4 +- vel/api/model.py | 43 ++----- vel/api/model_factory.py | 4 +- vel/api/network.py | 28 +++++ vel/api/size_hint.py | 72 +++++++++++ vel/module/input/embedding.py | 4 - vel/module/input/flatten.py | 4 - vel/module/input/identity.py | 4 - vel/module/input/image_to_tensor.py | 40 +++---- vel/module/input/normalize_observations.py | 4 - vel/module/input/one_hot_encoding.py | 4 - vel/module/input/sequence.py | 4 - vel/{rl/backbone => net}/__init__.py | 0 .../purgatory => net/layer}/__init__.py | 0 .../layer/input}/__init__.py | 0 vel/net/layer/input/image_to_tensor.py | 47 ++++++++ vel/net/layer/util/__init__.py | 0 vel/net/layer/util/repeat_tensor.py | 41 +++++++ vel/net/layer_base.py | 31 +++++ vel/net/modular.py | 113 ++++++++++++++++++ vel/rl/api/__init__.py | 2 +- vel/rl/api/{rl_model.py => policy.py} | 10 +- vel/rl/layer/__init__.py | 0 vel/rl/{backbone => layer}/nature_cnn.py | 54 ++++++--- vel/rl/layer/premade/__init__.py | 0 vel/rl/layer/premade/purgatory/__init__.py | 0 .../premade/purgatory}/double_nature_cnn.py | 0 .../purgatory}/double_noisy_nature_cnn.py | 0 .../premade/purgatory}/mlp.py | 0 .../premade/purgatory}/mlp_rnn.py | 0 .../premade/purgatory}/nature_cnn_rnn.py | 4 - .../premade/purgatory}/nature_cnn_small.py | 0 .../premade/purgatory}/noisy_nature_cnn.py | 4 - .../premade/purgatory}/rnn.py | 0 vel/rl/{algo => policy}/a2c.py | 59 +++++---- vel/rl/{algo => policy}/a2c_rnn.py | 0 vel/rl/policy/{semipurgatory => }/dqn.py | 38 +++--- vel/rl/{algo => policy}/ppo.py | 57 ++++++--- vel/rl/{algo => policy}/ppo_rnn.py | 0 vel/rl/{algo => policy}/trpo.py | 32 +---- vel/rl/xpolicy/__init__.py | 0 vel/rl/xpolicy/purgatory/__init__.py | 0 .../purgatory/deterministic_policy.py | 0 .../purgatory/old_stochastic_policy.py | 0 .../purgatory/old_stochastic_rnn_policy.py | 0 .../purgatory/q_distributional_policy.py | 0 .../purgatory/q_dueling_policy.py | 0 .../{policy => xpolicy}/purgatory/q_model.py | 50 ++++---- .../purgatory/q_noisy_model.py | 0 .../purgatory/q_rainbow_model.py | 0 .../purgatory/q_stochastic_policy_model.py | 0 vel/rl/xpolicy/semipurgatory/__init__.py | 0 .../semipurgatory/a2c_rnn.py | 0 .../{policy => xpolicy}/semipurgatory/acer.py | 0 .../{policy => xpolicy}/semipurgatory/ddpg.py | 0 .../semipurgatory/distributional_dqn.py | 0 .../semipurgatory/ppo_rnn.py | 0 .../{policy => xpolicy}/stochastic_policy.py | 0 .../stochastic_policy_separate.py | 0 .../stochastic_rnn_policy.py | 0 vel/{calc => util}/process.py | 0 vel/{calc/function.py => util/stats.py} | 0 70 files changed, 633 insertions(+), 247 deletions(-) rename examples-configs/rl/atari/{ => purgatory}/atari_a2c_lstm.yaml (100%) rename vel/calc/__init__.py => examples-configs/rl/atari/purgatory/atari_ddqn.yaml (100%) create mode 100644 examples-configs/rl/atari/purgatory/atari_dqn.yaml rename examples-configs/rl/atari/{ => purgatory}/atari_ppo_gru.yaml (100%) rename vel/rl/algo/__init__.py => examples-configs/rl/atari/purgatory/atari_rainbow.yaml (100%) rename examples-configs/rl/atari/{ => purgatory}/atari_trpo.yaml (100%) create mode 100644 vel/api/network.py create mode 100644 vel/api/size_hint.py rename vel/{rl/backbone => net}/__init__.py (100%) rename vel/{rl/policy/purgatory => net/layer}/__init__.py (100%) rename vel/{rl/policy/semipurgatory => net/layer/input}/__init__.py (100%) create mode 100644 vel/net/layer/input/image_to_tensor.py create mode 100644 vel/net/layer/util/__init__.py create mode 100644 vel/net/layer/util/repeat_tensor.py create mode 100644 vel/net/layer_base.py create mode 100644 vel/net/modular.py rename vel/rl/api/{rl_model.py => policy.py} (88%) create mode 100644 vel/rl/layer/__init__.py rename vel/rl/{backbone => layer}/nature_cnn.py (67%) create mode 100644 vel/rl/layer/premade/__init__.py create mode 100644 vel/rl/layer/premade/purgatory/__init__.py rename vel/rl/{backbone => layer/premade/purgatory}/double_nature_cnn.py (100%) rename vel/rl/{backbone => layer/premade/purgatory}/double_noisy_nature_cnn.py (100%) rename vel/rl/{backbone => layer/premade/purgatory}/mlp.py (100%) rename vel/rl/{backbone => layer/premade/purgatory}/mlp_rnn.py (100%) rename vel/rl/{backbone => layer/premade/purgatory}/nature_cnn_rnn.py (96%) rename vel/rl/{backbone => layer/premade/purgatory}/nature_cnn_small.py (100%) rename vel/rl/{backbone => layer/premade/purgatory}/noisy_nature_cnn.py (98%) rename vel/rl/{backbone => layer/premade/purgatory}/rnn.py (100%) rename vel/rl/{algo => policy}/a2c.py (70%) rename vel/rl/{algo => policy}/a2c_rnn.py (100%) rename vel/rl/policy/{semipurgatory => }/dqn.py (74%) rename vel/rl/{algo => policy}/ppo.py (79%) rename vel/rl/{algo => policy}/ppo_rnn.py (100%) rename vel/rl/{algo => policy}/trpo.py (93%) create mode 100644 vel/rl/xpolicy/__init__.py create mode 100644 vel/rl/xpolicy/purgatory/__init__.py rename vel/rl/{policy => xpolicy}/purgatory/deterministic_policy.py (100%) rename vel/rl/{policy => xpolicy}/purgatory/old_stochastic_policy.py (100%) rename vel/rl/{policy => xpolicy}/purgatory/old_stochastic_rnn_policy.py (100%) rename vel/rl/{policy => xpolicy}/purgatory/q_distributional_policy.py (100%) rename vel/rl/{policy => xpolicy}/purgatory/q_dueling_policy.py (100%) rename vel/rl/{policy => xpolicy}/purgatory/q_model.py (70%) rename vel/rl/{policy => xpolicy}/purgatory/q_noisy_model.py (100%) rename vel/rl/{policy => xpolicy}/purgatory/q_rainbow_model.py (100%) rename vel/rl/{policy => xpolicy}/purgatory/q_stochastic_policy_model.py (100%) create mode 100644 vel/rl/xpolicy/semipurgatory/__init__.py rename vel/rl/{policy => xpolicy}/semipurgatory/a2c_rnn.py (100%) rename vel/rl/{policy => xpolicy}/semipurgatory/acer.py (100%) rename vel/rl/{policy => xpolicy}/semipurgatory/ddpg.py (100%) rename vel/rl/{policy => xpolicy}/semipurgatory/distributional_dqn.py (100%) rename vel/rl/{policy => xpolicy}/semipurgatory/ppo_rnn.py (100%) rename vel/rl/{policy => xpolicy}/stochastic_policy.py (100%) rename vel/rl/{policy => xpolicy}/stochastic_policy_separate.py (100%) rename vel/rl/{policy => xpolicy}/stochastic_rnn_policy.py (100%) rename vel/{calc => util}/process.py (100%) rename vel/{calc/function.py => util/stats.py} (100%) diff --git a/examples-configs/rl/atari/atari_a2c.yaml b/examples-configs/rl/atari/atari_a2c.yaml index cdacb76c..cbe9dc46 100644 --- a/examples-configs/rl/atari/atari_a2c.yaml +++ b/examples-configs/rl/atari/atari_a2c.yaml @@ -12,23 +12,20 @@ vec_env: model: - name: vel.rl.algo.a2c + name: vel.rl.policy.a2c entropy_coefficient: 0.01 value_coefficient: 0.5 discount_factor: 0.99 - policy: - name: vel.rl.policy.stochastic_policy - input_block: - name: vel.module.input.image_to_tensor - - backbone: - name: vel.rl.backbone.nature_cnn - - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + size: [84, 84, 4] # Number of channels is frame history + - name: vel.rl.layer.nature_cnn + - name: vel.net.layer.util.repeat_tensor + times: 2 # Need to repeat output twice, for action and value heads reinforcer: diff --git a/examples-configs/rl/atari/atari_ppo.yaml b/examples-configs/rl/atari/atari_ppo.yaml index c96c518c..12d043e0 100644 --- a/examples-configs/rl/atari/atari_ppo.yaml +++ b/examples-configs/rl/atari/atari_ppo.yaml @@ -12,7 +12,7 @@ vec_env: model: - name: vel.rl.algo.ppo + name: vel.rl.policy.ppo cliprange: name: vel.function.linear @@ -25,17 +25,15 @@ model: discount_factor: 0.99 # Discount factor for the rewards gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - policy: - name: vel.rl.policy.stochastic_policy + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + size: [84, 84, 4] # Number of channels is frame history + - name: vel.rl.layer.nature_cnn + - name: vel.net.layer.util.repeat_tensor + times: 2 # Need to repeat output twice, for action and value heads - input_block: - name: vel.module.input.image_to_tensor - - backbone: - name: vel.rl.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history reinforcer: diff --git a/examples-configs/rl/atari/atari_a2c_lstm.yaml b/examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml similarity index 100% rename from examples-configs/rl/atari/atari_a2c_lstm.yaml rename to examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml diff --git a/vel/calc/__init__.py b/examples-configs/rl/atari/purgatory/atari_ddqn.yaml similarity index 100% rename from vel/calc/__init__.py rename to examples-configs/rl/atari/purgatory/atari_ddqn.yaml diff --git a/examples-configs/rl/atari/purgatory/atari_dqn.yaml b/examples-configs/rl/atari/purgatory/atari_dqn.yaml new file mode 100644 index 00000000..a811880a --- /dev/null +++ b/examples-configs/rl/atari/purgatory/atari_dqn.yaml @@ -0,0 +1,82 @@ +name: 'atari_dqn' + + +env: + name: vel.rl.env.classic_atari + game: !param game = 'BreakoutNoFrameskip-v4' + + +vec_env: + name: vel.rl.vecenv.dummy + frame_history: 4 # How many stacked frames go into a single observation + + +model: + name: vel.rl.algo.dqn + + target_update_frequency: 10_000 # After how many batches to update the target network + discount_factor: 0.99 + + backbone: + name: vel.module.sequence + modules: + - name: vel.modules.input.image_to_tensor + - name: vel.rl.models.backbone.nature_cnn + input_width: 84 + input_height: 84 + input_channels: 4 # The same as frame_history + + +reinforcer: + name: vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer + + env_roller: + name: vel.rl.env_roller.transition_replay_env_roller + + replay_buffer: + name: vel.rl.buffer.circular_replay_buffer + + buffer_initial_size: 30_000 # How many samples we need in the buffer before we start using replay buffer + buffer_capacity: 250_000 + + # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer + frame_stack_compensation: true + frame_history: 4 # How many stacked frames go into a single observation + + action_noise: + name: vel.rl.module.noise.eps_greedy + + epsilon: + name: vel.function.linear_and_constant + end_of_interpolation: 0.1 + initial_value: 1.0 + final_value: 0.1 + + rollout_steps: 4 # How many environment steps (per env) to perform per batch of training + training_steps: 32 # How many environment steps (per env) to perform per training round + parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper + + +optimizer: + name: vel.optimizer.rmsprop + lr: 2.5e-4 + alpha: 0.95 + momentum: 0.95 + epsilon: 1.0e-1 + max_grad_norm: 0.5 + + +commands: + train: + name: vel.rl.command.rl_train_command + total_frames: 1.1e7 # 11M + batches_per_epoch: 2500 + + record: + name: vel.rl.command.record_movie_command + takes: 10 + videoname: 'atari_vid_{:04}.avi' + + evaluate: + name: vel.rl.command.evaluate_env_command + takes: 100 diff --git a/examples-configs/rl/atari/atari_ppo_gru.yaml b/examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml similarity index 100% rename from examples-configs/rl/atari/atari_ppo_gru.yaml rename to examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml diff --git a/vel/rl/algo/__init__.py b/examples-configs/rl/atari/purgatory/atari_rainbow.yaml similarity index 100% rename from vel/rl/algo/__init__.py rename to examples-configs/rl/atari/purgatory/atari_rainbow.yaml diff --git a/examples-configs/rl/atari/atari_trpo.yaml b/examples-configs/rl/atari/purgatory/atari_trpo.yaml similarity index 100% rename from examples-configs/rl/atari/atari_trpo.yaml rename to examples-configs/rl/atari/purgatory/atari_trpo.yaml diff --git a/vel/api/__init__.py b/vel/api/__init__.py index 358a356f..9ddf09d8 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -1,7 +1,9 @@ from .callback import Callback from .info import BatchInfo, EpochInfo, TrainingInfo +from .size_hint import SizeHint, SizeHints +from .network import Network, BackboneNetwork from .model import ( - Model, OptimizedModel, GradientModel, LossFunctionModel, BackboneModel, LinearBackboneModel + Model, ValidatedModel, OptimizedModel, GradientModel, LossFunctionModel ) from .model_config import ModelConfig from .model_factory import ModelFactory diff --git a/vel/api/model.py b/vel/api/model.py index 9032e098..693d6a46 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -4,14 +4,14 @@ import vel.util.module_util as mu from vel.api.optimizer import VelOptimizer, OptimizerFactory -from vel.api.scheduler import SchedulerFactory -from vel.api.callback import Callback from vel.metric.loss_metric import Loss from vel.util.summary import summary +from .network import Network -class Model(nn.Module): - """ Class representing full neural network model """ + +class Model(Network): + """ Class representing full neural network model, generally used to solve some problem """ def metrics(self) -> list: """ Set of metrics for this model """ @@ -48,19 +48,6 @@ def summary(self, input_size=None): else: summary(self, input_size) - def reset_weights(self): - """ Call proper initializers for the weights """ - pass - - @property - def is_stateful(self) -> bool: - """ If the model has a state that needs to be fed between individual observations """ - return False - - def zero_state(self, batch_size): - """ Potential state for the model """ - return None - class OptimizedModel(Model): """ Model that is being optimized by an 'optimizer' """ @@ -77,6 +64,10 @@ def optimize(self, data: dict, optimizer: VelOptimizer) -> dict: """ raise NotImplementedError + +class ValidatedModel(OptimizedModel): + """ Model that also has a validation operation """ + def validate(self, data: dict) -> dict: """ Perform one step of model inference without optimization @@ -85,7 +76,7 @@ def validate(self, data: dict) -> dict: raise NotImplementedError -class GradientModel(OptimizedModel): +class GradientModel(ValidatedModel): """ Model that calculates a single gradient and optimizes it """ def optimize(self, data: dict, optimizer: VelOptimizer) -> dict: @@ -144,19 +135,3 @@ def calculate_gradient(self, data: dict) -> dict: def loss_value(self, x_data, y_true, y_pred) -> torch.tensor: """ Calculate a value of loss function """ raise NotImplementedError - - -class BackboneModel(Model): - """ Model that serves as a backbone network to connect your heads to """ - - -class LinearBackboneModel(BackboneModel): - """ - Model that serves as a backbone network to connect your heads to. - Has a final output of a single-dimensional linear layer. - """ - - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - raise NotImplementedError diff --git a/vel/api/model_factory.py b/vel/api/model_factory.py index eeb533a0..0015f006 100644 --- a/vel/api/model_factory.py +++ b/vel/api/model_factory.py @@ -1,11 +1,11 @@ -from .model import Model +from .network import Network from vel.internal.generic_factory import GenericFactory class ModelFactory: """ Factory class for models """ - def instantiate(self, **extra_args) -> Model: + def instantiate(self, **extra_args) -> Network: raise NotImplementedError @staticmethod diff --git a/vel/api/network.py b/vel/api/network.py new file mode 100644 index 00000000..b6c8b97d --- /dev/null +++ b/vel/api/network.py @@ -0,0 +1,28 @@ +import torch.nn as nn + +from .size_hint import SizeHints + + +class Network(nn.Module): + """ Vel wrapper over nn.Module offering a few internally useful utilities """ + + def reset_weights(self): + """ Call proper initializers for the weights """ + pass + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return False + + def zero_state(self, batch_size): + """ Potential state for the model """ + return None + + +class BackboneNetwork(Network): + """ Network, whose output feeds into other models. Needs to provide size hints. """ + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + raise NotImplementedError diff --git a/vel/api/size_hint.py b/vel/api/size_hint.py new file mode 100644 index 00000000..d6a3879b --- /dev/null +++ b/vel/api/size_hint.py @@ -0,0 +1,72 @@ +import typing +import collections.abc as abc + +from vel.exception import VelException + + +class SizeHint(tuple): + """ Neural network hint of a layer size. Should consist of either integers or Nones """ + + def __new__(cls, *args): + return super().__new__(cls, tuple(args)) + + def last(self) -> int: + """ Return last part of the size hint, make sure it's not None """ + assert self[-1] is not None, "Size hint shouldn't be None" + return self[-1] + + def __repr__(self): + internal = ", ".join([self._inner_repr(s) for s in self]) + return f"{self.__class__.__name__}({internal})" + + def _inner_repr(self, x): + if x is None: + return '-' + else: + return repr(x) + + +SizeTuple = typing.Tuple[SizeHint] +SizeDict = typing.Dict[str, SizeHint] + + +class SizeHints: + """ SizeHint, tuple of size hints or dict of size hints """ + + TYPE_NONE = 0 + TYPE_SIZE = 1 + TYPE_TUPLE = 2 + TYPE_DICT = 3 + + def __init__(self, size_hints: typing.Union[SizeHint, SizeTuple, SizeDict] = None): + self.size_hints = size_hints + + if self.size_hints is None: + self.type = self.TYPE_NONE + elif isinstance(self.size_hints, SizeHint): + self.type = self.TYPE_SIZE + elif isinstance(self.size_hints, abc.Sequence): + self.size_hints = tuple(self.size_hints) + self.type = self.TYPE_TUPLE + elif isinstance(self.size_hints, abc.Mapping): + self.type = self.TYPE_DICT + else: + raise VelException("Invalid size hints: {}".format(self.size_hints)) + + def assert_tuple(self, length) -> SizeTuple: + """ Assert given size hints is a tuple """ + assert self.type == self.TYPE_TUPLE, "Network needs to return a tuple" + assert len(self.size_hints) == length, "Network must return {} results".format(length) + return self.size_hints + + def assert_single(self, length: typing.Optional[int] = None) -> SizeHint: + """ Make sure there is a single tensor as a size hint """ + assert self.type == self.TYPE_SIZE, "Layer input must be single tensor" + + if length is not None: + assert len(self.size_hints) == length, f"Layer input must have shape [{length}]" + + return self.size_hints + + def __repr__(self): + return repr(self.size_hints) diff --git a/vel/module/input/embedding.py b/vel/module/input/embedding.py index d055e257..b576ddfa 100644 --- a/vel/module/input/embedding.py +++ b/vel/module/input/embedding.py @@ -41,7 +41,3 @@ def instantiate(**_): return EmbeddingInput(alphabet_size, output_dim, pretrained=pretrained, frozen=frozen, source=source) return ModelFactory.generic(instantiate) - - -# Scripting interface -EmbeddingInputFactory = create diff --git a/vel/module/input/flatten.py b/vel/module/input/flatten.py index 0972616d..faf424df 100644 --- a/vel/module/input/flatten.py +++ b/vel/module/input/flatten.py @@ -20,7 +20,3 @@ def instantiate(**_): return Flatten() return ModelFactory.generic(instantiate) - - -# Scripting interface -FlattenInputFactory = create diff --git a/vel/module/input/identity.py b/vel/module/input/identity.py index 7018051e..6b9ee547 100644 --- a/vel/module/input/identity.py +++ b/vel/module/input/identity.py @@ -19,7 +19,3 @@ def instantiate(**_): return Identity() return ModelFactory.generic(instantiate) - - -# Scripting interface -IdentityFactory = create diff --git a/vel/module/input/image_to_tensor.py b/vel/module/input/image_to_tensor.py index 13b58ebd..b02a3229 100644 --- a/vel/module/input/image_to_tensor.py +++ b/vel/module/input/image_to_tensor.py @@ -1,36 +1,26 @@ import torch -from vel.api import BackboneModel, ModelFactory +from vel.api import Network -class ImageToTensor(BackboneModel): +def image_to_tensor(image: torch.Tensor) -> torch.Tensor: + """ Convert pytorch image (b, w, h, c) into tensor (b, c, w, h) float32 """ + result = image.permute(0, 3, 1, 2).contiguous() + + if result.dtype == torch.uint8: + result = result.type(torch.float) / 255.0 + else: + result = result.type(torch.float) + + return result + + +class ImageToTensor(Network): """ Convert simple image to tensor. Flip channels to a [C, W, H] order and potentially convert 8-bit color values to floats """ - def __init__(self): - super().__init__() - - def reset_weights(self): - pass - def forward(self, image): - result = image.permute(0, 3, 1, 2).contiguous() - - if result.dtype == torch.uint8: - result = result.type(torch.float) / 255.0 - else: - result = result.type(torch.float) - - return result - - -def create(): - """ Vel factory function """ - return ModelFactory.generic(ImageToTensor) - - -# Scripting interface -ImageToTensorFactory = create + return image_to_tensor(image) diff --git a/vel/module/input/normalize_observations.py b/vel/module/input/normalize_observations.py index 914cb5f6..d3013238 100644 --- a/vel/module/input/normalize_observations.py +++ b/vel/module/input/normalize_observations.py @@ -58,7 +58,3 @@ def instantiate(**_): return NormalizeObservations(input_shape) return ModelFactory.generic(instantiate) - - -# Scripting interface -NormalizeObservationsFactory = create diff --git a/vel/module/input/one_hot_encoding.py b/vel/module/input/one_hot_encoding.py index eaee642c..125bdb47 100644 --- a/vel/module/input/one_hot_encoding.py +++ b/vel/module/input/one_hot_encoding.py @@ -27,7 +27,3 @@ def instantiate(**_): return OneHotEncodingInput(alphabet_size) return ModelFactory.generic(instantiate) - - -# Scripting interface -OneHotEncodingInputFactory = create diff --git a/vel/module/input/sequence.py b/vel/module/input/sequence.py index 43d41ad1..51c50c82 100644 --- a/vel/module/input/sequence.py +++ b/vel/module/input/sequence.py @@ -19,7 +19,3 @@ def instantiate(**_): return SequenceInput([f.instantiate() for f in modules]) return ModelFactory.generic(instantiate) - - -# Scripting interface -SequenceInputFactory = create diff --git a/vel/rl/backbone/__init__.py b/vel/net/__init__.py similarity index 100% rename from vel/rl/backbone/__init__.py rename to vel/net/__init__.py diff --git a/vel/rl/policy/purgatory/__init__.py b/vel/net/layer/__init__.py similarity index 100% rename from vel/rl/policy/purgatory/__init__.py rename to vel/net/layer/__init__.py diff --git a/vel/rl/policy/semipurgatory/__init__.py b/vel/net/layer/input/__init__.py similarity index 100% rename from vel/rl/policy/semipurgatory/__init__.py rename to vel/net/layer/input/__init__.py diff --git a/vel/net/layer/input/image_to_tensor.py b/vel/net/layer/input/image_to_tensor.py new file mode 100644 index 00000000..cd034320 --- /dev/null +++ b/vel/net/layer/input/image_to_tensor.py @@ -0,0 +1,47 @@ +import typing + +from vel.api import SizeHints, SizeHint +from vel.net.modular import LayerFactory, Layer +from vel.module.input.image_to_tensor import image_to_tensor + + +class ImageToTensorLayer(Layer): + """ + Convert simple image to tensor. + + Flip channels to a [C, W, H] order and potentially convert 8-bit color values to floats + """ + def __init__(self, name: str, size: tuple = None): + super().__init__(name) + + if size is not None: + assert len(size) == 3, "Images must have three dimensions" + self.w, self.h, self.c = size + else: + self.w, self.h, self.c = (None, None, None) + + def forward(self, direct, state: dict = None, context: dict = None): + return image_to_tensor(direct) + + def size_hints(self) -> SizeHints: + return SizeHints(SizeHint(None, self.c, self.w, self.h)) + + +class ImageToTensorLayerFactory(LayerFactory): + def __init__(self, size: tuple = None): + self.size = size + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "image_to_tensor" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + """ Create a given layer object """ + # Potential improvement here is to use either direct input or size parameter + return ImageToTensorLayer(name=name, size=self.size) + + +def create(size: tuple = None): + """ Vel factory function """ + return ImageToTensorLayerFactory(size=size) diff --git a/vel/net/layer/util/__init__.py b/vel/net/layer/util/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/net/layer/util/repeat_tensor.py b/vel/net/layer/util/repeat_tensor.py new file mode 100644 index 00000000..58ea5dc1 --- /dev/null +++ b/vel/net/layer/util/repeat_tensor.py @@ -0,0 +1,41 @@ +import typing + +from vel.api import SizeHints, SizeHint +from vel.net.modular import LayerFactory, Layer + + +class RepeatTensor(Layer): + """ Repeat single tensor multiple times """ + + def __init__(self, name: str, times: int, size_hint: SizeHint): + super().__init__(name) + self.times = times + self.size_hint = size_hint + + def forward(self, direct, state: dict = None, context: dict = None): + return tuple([direct] * self.times) + + def size_hints(self) -> SizeHints: + return SizeHints(tuple([self.size_hint] * self.times)) + + +class RepeatTensorFactory(LayerFactory): + def __init__(self, times: int): + self.times = times + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "repeat_tensor" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + return RepeatTensor( + name=name, + times=self.times, + size_hint=direct_input.assert_single() + ) + + +def create(times: int): + """ Vel factory function """ + return RepeatTensorFactory(times=times) diff --git a/vel/net/layer_base.py b/vel/net/layer_base.py new file mode 100644 index 00000000..62dcabcb --- /dev/null +++ b/vel/net/layer_base.py @@ -0,0 +1,31 @@ +import typing + +from vel.api import BackboneNetwork, SizeHints, SizeHint + + +class Layer(BackboneNetwork): + def __init__(self, name: str): + super().__init__() + self.name = name + + def state_size_hints(self) -> typing.Dict[str, SizeHint]: + """ Size hints for state part of this network """ + return {} + + def forward(self, direct, state: dict = None, context: dict = None): + """ Forward propagation of a single layer """ + raise NotImplementedError + + +class LayerFactory: + """ Factory for layers """ + + @property + def name_base(self) -> str: + """ Base of layer name """ + raise NotImplementedError + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + """ Create a given layer object """ + raise NotImplementedError + diff --git a/vel/net/modular.py b/vel/net/modular.py new file mode 100644 index 00000000..e3147c3f --- /dev/null +++ b/vel/net/modular.py @@ -0,0 +1,113 @@ +import typing +import collections +import torch.nn as nn + +from vel.api import Network, BackboneNetwork, ModelFactory, SizeHints, SizeHint + +from .layer_base import Layer, LayerFactory + + +def instantiate_layers(layers: [LayerFactory]) -> nn.Module: + """ Instantiate list of layer factories into PyTorch Module """ + size_hint = SizeHints() # Empty input at first + module_dict = collections.OrderedDict() + context = {} + + for idx, layer_factory in enumerate(layers): + counter = idx + 1 + name = "{}_{:04d}".format(layer_factory.name_base, counter) + + layer = layer_factory.instantiate(name=name, direct_input=size_hint, context=context) + size_hint = layer.size_hints() + + module_dict[name] = layer + + return nn.Sequential(module_dict) + + +class ModularNetwork(BackboneNetwork): + """ Network that is built from layers """ + + def __init__(self, layers: nn.Module): + super().__init__() + + self.layers = layers + assert not any(l.is_stateful for l in self.layers), "Does not support stateful layers" + + def reset_weights(self): + """ Call proper initializers for the weights """ + for l in self.layers: + l.reset_weights() + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return False + + def size_hints(self) -> SizeHints: + return self.layers[-1].size_hints() + + def zero_state(self, batch_size): + """ Potential state for the model """ + return None + + def reset_state(self, state, dones): + """ Reset the state after the episode has been terminated """ + raise NotImplementedError + + def forward(self, input_data, state=None): + return self.layers(input_data) + + +class StatefulModularNetwork(BackboneNetwork): + """ Modular network handling the state between the episodes """ + + def __init__(self, layers: nn.Module): + super().__init__() + + self.layers = layers + + def reset_weights(self): + """ Call proper initializers for the weights """ + for l in self.layers: + l.reset_weights() + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return True + + def size_hints(self) -> SizeHints: + return self.layers[-1].size_hints() + + def zero_state(self, batch_size): + """ Potential state for the model """ + raise NotImplementedError + + def reset_state(self, state, dones): + """ Reset the state after the episode has been terminated """ + raise NotImplementedError + + def forward(self, input_data, state=None): + raise NotImplementedError + + +class ModularNetworkFactory(ModelFactory): + """ Factory class for the modular network """ + def __init__(self, layers: [LayerFactory]): + self.layers = layers + + def instantiate(self, **extra_args) -> BackboneNetwork: + """ Create either stateful or not modular network instance """ + layers = instantiate_layers(self.layers) + is_stateful = any(l.is_stateful for l in layers) + + if is_stateful: + return StatefulModularNetwork(layers) + else: + return ModularNetwork(layers) + + +def create(layers: [LayerFactory]): + """ Vel factory function """ + return ModularNetworkFactory(layers) diff --git a/vel/rl/api/__init__.py b/vel/rl/api/__init__.py index 699a8bdb..8102ab03 100644 --- a/vel/rl/api/__init__.py +++ b/vel/rl/api/__init__.py @@ -1,6 +1,6 @@ from .env_base import EnvFactory, VecEnvFactory from .env_roller import EnvRollerBase, ReplayEnvRollerBase, EnvRollerFactoryBase, ReplayEnvRollerFactoryBase from .rollout import Rollout, Trajectories, Transitions -from .rl_model import RlPolicy +from .policy import RlPolicy from .reinforcer_base import Reinforcer, ReinforcerFactory from .replay_buffer import ReplayBuffer, ReplayBufferFactory diff --git a/vel/rl/api/rl_model.py b/vel/rl/api/policy.py similarity index 88% rename from vel/rl/api/rl_model.py rename to vel/rl/api/policy.py index 0e0eb609..e5701d4d 100644 --- a/vel/rl/api/rl_model.py +++ b/vel/rl/api/policy.py @@ -1,8 +1,8 @@ -from vel.api import Model, VelOptimizer, OptimizerFactory, BatchInfo +from vel.api import OptimizedModel, VelOptimizer, OptimizerFactory, BatchInfo from vel.rl.api import Rollout -class RlPolicy(Model): +class RlPolicy(OptimizedModel): """ Base class for reinforcement learning policies """ def __init__(self, discount_factor: float): @@ -41,6 +41,8 @@ def optimize(self, batch_info: BatchInfo, rollout: Rollout) -> dict: for key, value in opt_metrics.items(): metrics[key] = value + self.post_optimization_step(batch_info, rollout) + return metrics def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: @@ -50,6 +52,10 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: """ raise NotImplementedError + def post_optimization_step(self, batch_info: BatchInfo, rollout: Rollout): + """ Optional operations to perform after optimization """ + pass + def reset_state(self, state, dones): """ Reset the state after the episode has been terminated """ raise NotImplementedError diff --git a/vel/rl/layer/__init__.py b/vel/rl/layer/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/rl/backbone/nature_cnn.py b/vel/rl/layer/nature_cnn.py similarity index 67% rename from vel/rl/backbone/nature_cnn.py rename to vel/rl/layer/nature_cnn.py index 8ce20ce6..b44b84d9 100644 --- a/vel/rl/backbone/nature_cnn.py +++ b/vel/rl/layer/nature_cnn.py @@ -5,6 +5,7 @@ Under MIT license. """ import numpy as np +import typing import torch.nn as nn import torch.nn.init as init @@ -12,15 +13,18 @@ import vel.util.network as net_util -from vel.api import LinearBackboneModel, ModelFactory +from vel.api import ModelFactory, SizeHint, SizeHints +from vel.net.modular import Layer, LayerFactory -class NatureCnn(LinearBackboneModel): + +class NatureCnn(Layer): """ Neural network as defined in the paper 'Human-level control through deep reinforcement learning' """ - def __init__(self, input_width, input_height, input_channels, output_dim=512): - super().__init__() - self._output_dim = output_dim + def __init__(self, name: str, input_width, input_height, input_channels, output_dim=512): + super().__init__(name) + + self.output_dim = output_dim self.conv1 = nn.Conv2d( in_channels=input_channels, @@ -57,11 +61,6 @@ def __init__(self, input_width, input_height, input_channels, output_dim=512): self.output_dim ) - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - return self._output_dim - def reset_weights(self): """ Call proper initializers for the weights """ for m in self.modules(): @@ -74,7 +73,10 @@ def reset_weights(self): init.orthogonal_(m.weight, gain=np.sqrt(2)) init.constant_(m.bias, 0.0) - def forward(self, image): + def size_hints(self) -> SizeHints: + return SizeHints(SizeHint(None, self.output_dim)) + + def forward(self, image, state: dict = None, context: dict = None): result = image result = F.relu(self.conv1(result)) result = F.relu(self.conv2(result)) @@ -83,16 +85,30 @@ def forward(self, image): return F.relu(self.linear_layer(flattened)) -def create(input_width, input_height, input_channels=1, output_dim=512): - """ Vel factory function """ - def instantiate(**_): +class NatureCnnFactory(LayerFactory): + """ Nature Cnn Network Factory """ + + def __init__(self, output_dim: int = 512): + self.output_dim = output_dim + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "nature_cnn" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + (b, c, w, h) = direct_input.assert_single(4) + return NatureCnn( - input_width=input_width, input_height=input_height, input_channels=input_channels, - output_dim=output_dim + name=name, + input_width=w, + input_height=h, + input_channels=c, + output_dim=self.output_dim ) - return ModelFactory.generic(instantiate) +def create(output_dim=512): + """ Vel factory function """ + return NatureCnnFactory(output_dim=output_dim) -# Scripting interface -NatureCnnFactory = create diff --git a/vel/rl/layer/premade/__init__.py b/vel/rl/layer/premade/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/rl/layer/premade/purgatory/__init__.py b/vel/rl/layer/premade/purgatory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/rl/backbone/double_nature_cnn.py b/vel/rl/layer/premade/purgatory/double_nature_cnn.py similarity index 100% rename from vel/rl/backbone/double_nature_cnn.py rename to vel/rl/layer/premade/purgatory/double_nature_cnn.py diff --git a/vel/rl/backbone/double_noisy_nature_cnn.py b/vel/rl/layer/premade/purgatory/double_noisy_nature_cnn.py similarity index 100% rename from vel/rl/backbone/double_noisy_nature_cnn.py rename to vel/rl/layer/premade/purgatory/double_noisy_nature_cnn.py diff --git a/vel/rl/backbone/mlp.py b/vel/rl/layer/premade/purgatory/mlp.py similarity index 100% rename from vel/rl/backbone/mlp.py rename to vel/rl/layer/premade/purgatory/mlp.py diff --git a/vel/rl/backbone/mlp_rnn.py b/vel/rl/layer/premade/purgatory/mlp_rnn.py similarity index 100% rename from vel/rl/backbone/mlp_rnn.py rename to vel/rl/layer/premade/purgatory/mlp_rnn.py diff --git a/vel/rl/backbone/nature_cnn_rnn.py b/vel/rl/layer/premade/purgatory/nature_cnn_rnn.py similarity index 96% rename from vel/rl/backbone/nature_cnn_rnn.py rename to vel/rl/layer/premade/purgatory/nature_cnn_rnn.py index 8888da6e..6dccd7c9 100644 --- a/vel/rl/backbone/nature_cnn_rnn.py +++ b/vel/rl/layer/premade/purgatory/nature_cnn_rnn.py @@ -56,7 +56,3 @@ def instantiate(**_): ) return ModelFactory.generic(instantiate) - - -# Add this to make nicer scripting interface -NatureCnnFactory = create diff --git a/vel/rl/backbone/nature_cnn_small.py b/vel/rl/layer/premade/purgatory/nature_cnn_small.py similarity index 100% rename from vel/rl/backbone/nature_cnn_small.py rename to vel/rl/layer/premade/purgatory/nature_cnn_small.py diff --git a/vel/rl/backbone/noisy_nature_cnn.py b/vel/rl/layer/premade/purgatory/noisy_nature_cnn.py similarity index 98% rename from vel/rl/backbone/noisy_nature_cnn.py rename to vel/rl/layer/premade/purgatory/noisy_nature_cnn.py index d258543e..08ff71ba 100644 --- a/vel/rl/backbone/noisy_nature_cnn.py +++ b/vel/rl/layer/premade/purgatory/noisy_nature_cnn.py @@ -101,7 +101,3 @@ def instantiate(**_): ) return ModelFactory.generic(instantiate) - - -# Scripting interface -NatureCnnFactory = create diff --git a/vel/rl/backbone/rnn.py b/vel/rl/layer/premade/purgatory/rnn.py similarity index 100% rename from vel/rl/backbone/rnn.py rename to vel/rl/layer/premade/purgatory/rnn.py diff --git a/vel/rl/algo/a2c.py b/vel/rl/policy/a2c.py similarity index 70% rename from vel/rl/algo/a2c.py rename to vel/rl/policy/a2c.py index 4fd529c7..858cf5e4 100644 --- a/vel/rl/algo/a2c.py +++ b/vel/rl/policy/a2c.py @@ -1,17 +1,21 @@ +import gym import torch import torch.nn.functional as F from vel.metric.base import AveragingNamedMetric -from vel.calc.function import explained_variance -from vel.api import BackboneModel, ModelFactory, BatchInfo +from vel.util.stats import explained_variance +from vel.api import ModelFactory, BatchInfo, BackboneNetwork from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae +from vel.rl.module.stochastic_action_head import StochasticActionHead +from vel.rl.module.value_head import ValueHead class A2C(RlPolicy): """ Simplest policy gradient - calculate loss as an advantage of an actor versus value function """ - def __init__(self, policy: BackboneModel, entropy_coefficient, value_coefficient, discount_factor: float, + def __init__(self, net: BackboneNetwork, action_space: gym.Space, + entropy_coefficient, value_coefficient, discount_factor: float, gae_lambda=1.0): super().__init__(discount_factor) @@ -19,26 +23,40 @@ def __init__(self, policy: BackboneModel, entropy_coefficient, value_coefficient self.value_coefficient = value_coefficient self.gae_lambda = gae_lambda - self.policy = policy + self.net = net - assert not self.policy.is_stateful, "For stateful policies, try A2CRnn" + assert not self.net.is_stateful, "For stateful policies, use A2CRnn" + + # Make sure network returns two results + (action_size, value_size) = self.net.size_hints().assert_tuple(2) + + self.action_head = StochasticActionHead( + action_space=action_space, + input_dim=action_size.last(), + ) + + self.value_head = ValueHead( + input_dim=value_size.last() + ) def reset_weights(self): """ Initialize properly model weights """ - self.policy.reset_weights() + self.net.reset_weights() + self.action_head.reset_weights() + self.value_head.reset_weights() def forward(self, observation, state=None): """ Calculate model outputs """ - return self.policy(observation, state=state) + action_hidden, value_hidden = self.net(observation, state=state) + return self.action_head(action_hidden), self.value_head(value_hidden) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ - action_pd_params, value_output = self(observation, state=state) - - actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + action_pd_params, value_output = self(observation) + actions = self.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action - logprobs = self.policy.action_head.logprob(actions, action_pd_params) + logprobs = self.action_head.logprob(actions, action_pd_params) return { 'actions': actions, @@ -78,8 +96,8 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: pd_params, model_values = self(observations) - log_probs = self.policy.action_head.logprob(actions, pd_params) - entropy = self.policy.action_head.entropy(pd_params) + log_probs = self.action_head.logprob(actions, pd_params) + entropy = self.action_head.entropy(pd_params) # Actual calculations. Pretty trivial policy_loss = -torch.mean(advantages * log_probs) @@ -113,8 +131,8 @@ def metrics(self) -> list: class A2CFactory(ModelFactory): """ Factory class for policy gradient models """ - def __init__(self, policy, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): - self.policy = policy + def __init__(self, net, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): + self.net = net self.entropy_coefficient = entropy_coefficient self.value_coefficient = value_coefficient self.discount_factor = discount_factor @@ -122,11 +140,12 @@ def __init__(self, policy, entropy_coefficient, value_coefficient, discount_fact def instantiate(self, **extra_args): """ Instantiate the model """ - # action_space = extra_args.pop('action_space') - policy = self.policy.instantiate(**extra_args) + action_space = extra_args.pop('action_space') + net = self.net.instantiate(**extra_args) return A2C( - policy=policy, + net=net, + action_space=action_space, entropy_coefficient=self.entropy_coefficient, value_coefficient=self.value_coefficient, discount_factor=self.discount_factor, @@ -134,10 +153,10 @@ def instantiate(self, **extra_args): ) -def create(policy: BackboneModel, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): +def create(net: ModelFactory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): """ Vel factory function """ return A2CFactory( - policy=policy, + net=net, entropy_coefficient=entropy_coefficient, value_coefficient=value_coefficient, discount_factor=discount_factor, diff --git a/vel/rl/algo/a2c_rnn.py b/vel/rl/policy/a2c_rnn.py similarity index 100% rename from vel/rl/algo/a2c_rnn.py rename to vel/rl/policy/a2c_rnn.py diff --git a/vel/rl/policy/semipurgatory/dqn.py b/vel/rl/policy/dqn.py similarity index 74% rename from vel/rl/policy/semipurgatory/dqn.py rename to vel/rl/policy/dqn.py index bd3c355e..c6ea4933 100644 --- a/vel/rl/policy/semipurgatory/dqn.py +++ b/vel/rl/policy/dqn.py @@ -2,20 +2,20 @@ import torch.nn.functional as F import torch.nn.utils -from vel.api import ModelFactory +from vel.api import ModelFactory, BackboneModel from vel.metric import AveragingNamedMetric -from vel.rl.api import OptimizerAlgoBase +from vel.rl.api import RlPolicy -class DeepQLearning(OptimizerAlgoBase): +class DeepQLearning(RlPolicy): """ Deep Q-Learning algorithm """ - def __init__(self, model_factory: ModelFactory, discount_factor: float, double_dqn: bool, - target_update_frequency: int, max_grad_norm: float): - super().__init__(max_grad_norm) + def __init__(self, backbone: BackboneModel, + discount_factor: float, double_dqn: bool, + target_update_frequency: int): + super().__init__(discount_factor) - self.model_factory = model_factory - self.discount_factor = discount_factor + self.backbone = backbone self.double_dqn = double_dqn self.target_update_frequency = target_update_frequency @@ -28,7 +28,7 @@ def initialize(self, training_info, model, environment, device): self.target_model.load_state_dict(model.state_dict()) self.target_model.eval() - def calculate_gradient(self, batch_info, device, model, rollout): + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: """ Calculate loss of the supplied rollout """ evaluator = model.evaluate(rollout) @@ -74,29 +74,29 @@ def calculate_gradient(self, batch_info, device, model, rollout): 'average_q_target': torch.mean(estimated_return).item() } - def post_optimization_step(self, batch_info, device, model, rollout): + def post_optimization_step(self, batch_info, rollout): """ Steps to take after optimization has been done""" if batch_info.aggregate_batch_number % self.target_update_frequency == 0: - self.target_model.load_state_dict(model.state_dict()) + self.target_model.load_state_dict(self.state_dict()) self.target_model.eval() def metrics(self) -> list: """ List of metrics to track for this learning process """ return [ - AveragingNamedMetric("loss"), - AveragingNamedMetric("average_q_selected"), - AveragingNamedMetric("average_q_target"), - AveragingNamedMetric("grad_norm"), + AveragingNamedMetric("loss", scope="model"), + AveragingNamedMetric("average_q_selected", scope="model"), + AveragingNamedMetric("average_q_target", scope="model") ] -def create(model: ModelFactory, discount_factor: float, target_update_frequency: int, - max_grad_norm: float, double_dqn: bool = False): +def create(backbone: ModelFactory, + discount_factor: float, target_update_frequency: int, + double_dqn: bool = False): """ Vel factory function """ + return DeepQLearning( - model_factory=model, + backbone=backbone, discount_factor=discount_factor, double_dqn=double_dqn, target_update_frequency=target_update_frequency, - max_grad_norm=max_grad_norm ) diff --git a/vel/rl/algo/ppo.py b/vel/rl/policy/ppo.py similarity index 79% rename from vel/rl/algo/ppo.py rename to vel/rl/policy/ppo.py index 483b4830..58be0a22 100644 --- a/vel/rl/algo/ppo.py +++ b/vel/rl/policy/ppo.py @@ -1,19 +1,23 @@ +import gym import torch import numbers -from vel.api import BackboneModel, BatchInfo, ModelFactory -from vel.calc.function import explained_variance +from vel.api import BatchInfo, ModelFactory, BackboneNetwork +from vel.util.stats import explained_variance from vel.function.constant import ConstantSchedule from vel.metric.base import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae +from vel.rl.module.stochastic_action_head import StochasticActionHead +from vel.rl.module.value_head import ValueHead + class PPO(RlPolicy): """ Proximal Policy Optimization - https://arxiv.org/abs/1707.06347 """ - def __init__(self, policy: BackboneModel, + def __init__(self, net: BackboneNetwork, action_space: gym.Space, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): super().__init__(discount_factor) @@ -28,23 +32,40 @@ def __init__(self, policy: BackboneModel, else: self.cliprange = cliprange - self.policy = policy + self.net = net + + assert not self.net.is_stateful, "For stateful policies, use PPORnn" + + # Make sure network returns two results + (action_size, value_size) = self.net.size_hints().assert_tuple(2) + + self.action_head = StochasticActionHead( + action_space=action_space, + input_dim=action_size.last(), + ) + + self.value_head = ValueHead( + input_dim=value_size.last() + ) def reset_weights(self): """ Initialize properly model weights """ - self.policy.reset_weights() + self.net.reset_weights() + self.action_head.reset_weights() + self.value_head.reset_weights() def forward(self, observation): """ Calculate model outputs """ - return self.policy.forward(observation) + action_hidden, value_hidden = self.net(observation) + return self.action_head(action_hidden), self.value_head(value_hidden) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ action_pd_params, value_output = self(observation) - actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + actions = self.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action - logprobs = self.policy.action_head.logprob(actions, action_pd_params) + logprobs = self.action_head.logprob(actions, action_pd_params) return { 'actions': actions, @@ -88,8 +109,8 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: # PART 0.1 - Model evaluation pd_params, model_values = self(observations) - model_action_logprobs = self.policy.action_head.logprob(actions, pd_params) - entropy = self.policy.action_head.entropy(pd_params) + model_action_logprobs = self.action_head.logprob(actions, pd_params) + entropy = self.action_head.entropy(pd_params) # Select the cliprange current_cliprange = self.cliprange.value(batch_info['progress']) @@ -151,10 +172,9 @@ def metrics(self) -> list: class PPOFactory(ModelFactory): """ Factory class for policy gradient models """ - def __init__(self, policy: BackboneModel, - entropy_coefficient, value_coefficient, cliprange, discount_factor: float, + def __init__(self, net, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): - self.policy = policy + self.net = net self.entropy_coefficient = entropy_coefficient self.value_coefficient = value_coefficient self.cliprange = cliprange @@ -164,10 +184,12 @@ def __init__(self, policy: BackboneModel, def instantiate(self, **extra_args): """ Instantiate the model """ - policy = self.policy.instantiate(**extra_args) + action_space = extra_args.pop('action_space') + net = self.net.instantiate(**extra_args) return PPO( - policy=policy, + net=net, + action_space=action_space, entropy_coefficient=self.entropy_coefficient, value_coefficient=self.value_coefficient, cliprange=self.cliprange, @@ -177,12 +199,11 @@ def instantiate(self, **extra_args): ) -def create(policy: BackboneModel, - entropy_coefficient, value_coefficient, cliprange, discount_factor: float, +def create(net: ModelFactory, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): """ Vel factory function """ return PPOFactory( - policy=policy, + net=net, entropy_coefficient=entropy_coefficient, value_coefficient=value_coefficient, cliprange=cliprange, diff --git a/vel/rl/algo/ppo_rnn.py b/vel/rl/policy/ppo_rnn.py similarity index 100% rename from vel/rl/algo/ppo_rnn.py rename to vel/rl/policy/ppo_rnn.py diff --git a/vel/rl/algo/trpo.py b/vel/rl/policy/trpo.py similarity index 93% rename from vel/rl/algo/trpo.py rename to vel/rl/policy/trpo.py index 6c92d9dc..586d33c2 100644 --- a/vel/rl/algo/trpo.py +++ b/vel/rl/policy/trpo.py @@ -6,12 +6,10 @@ import torch.autograd as autograd import torch.nn.functional as F import torch.nn.utils -import typing from vel.api import BatchInfo, VelOptimizer, BackboneModel, LinearBackboneModel, OptimizerFactory, ModelFactory from vel.calc.function import explained_variance from vel.metric.base import AveragingNamedMetric -from vel.module.input.identity import IdentityFactory from vel.rl.api import Rollout, Trajectories, RlPolicy from vel.rl.discount_bootstrap import discount_bootstrap_gae @@ -60,14 +58,12 @@ class TRPO(RlPolicy): """ Trust Region Policy Optimization - https://arxiv.org/abs/1502.05477 """ def __init__(self, - input_block: BackboneModel, policy_backbone: LinearBackboneModel, value_backbone: LinearBackboneModel, action_space: gym.Space, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, discount_factor, gae_lambda, improvement_acceptance_ratio): super().__init__(discount_factor) - self.input_block = input_block self.policy_backbone = policy_backbone self.value_backbone = value_backbone @@ -89,8 +85,6 @@ def __init__(self, def reset_weights(self): """ Initialize properly model weights """ - self.input_block.reset_weights() - self.policy_backbone.reset_weights() self.value_backbone.reset_weights() @@ -99,10 +93,8 @@ def reset_weights(self): def forward(self, observations): """ Calculate model outputs """ - input_data = self.input_block(observations) - - policy_base_output = self.policy_backbone(input_data) - value_base_output = self.value_backbone(input_data) + policy_base_output = self.policy_backbone(observations) + value_base_output = self.value_backbone(observations) action_output = self.action_head(policy_base_output) value_output = self.value_head(value_base_output) @@ -111,15 +103,13 @@ def forward(self, observations): def value(self, observations, state=None): """ Calculate only value head for given state """ - input_data = self.input_block(observations) - base_output = self.value_backbone(input_data) + base_output = self.value_backbone(observations) value_output = self.value_head(base_output) return value_output def policy(self, observations): """ Calculate only action head for given state """ - input_data = self.input_block(observations) - policy_base_output = self.policy_backbone(input_data) + policy_base_output = self.policy_backbone(observations) policy_params = self.action_head(policy_base_output) return policy_params @@ -145,7 +135,6 @@ def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: def policy_parameters(self): """ Parameters of policy """ return it.chain( - self.input_block.parameters(), self.policy_backbone.parameters(), self.action_head.parameters() ) @@ -153,7 +142,6 @@ def policy_parameters(self): def value_parameters(self): """ Parameters of value function """ return it.chain( - self.input_block.parameters(), self.value_backbone.parameters(), self.value_head.parameters() ) @@ -345,12 +333,11 @@ def metrics(self) -> list: class TRPOFactory(ModelFactory): """ Factory class for policy gradient models """ - def __init__(self, input_block, policy_backbone: ModelFactory, value_backbone: ModelFactory, + def __init__(self, policy_backbone: ModelFactory, value_backbone: ModelFactory, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, discount_factor, gae_lambda, improvement_acceptance_ratio): self.policy_backbone = policy_backbone self.value_backbone = value_backbone - self.input_block = input_block self.entropy_coefficient = entropy_coefficient self.mak_kl = max_kl @@ -366,13 +353,10 @@ def instantiate(self, **extra_args): """ Instantiate the model """ action_space = extra_args.pop('action_space') - input_block = self.input_block.instantiate() - policy_backbone = self.policy_backbone.instantiate(**extra_args) value_backbone = self.value_backbone.instantiate(**extra_args) return TRPO( - input_block=input_block, policy_backbone=policy_backbone, value_backbone=value_backbone, action_space=action_space, @@ -390,14 +374,10 @@ def instantiate(self, **extra_args): def create(policy_backbone: ModelFactory, value_backbone: ModelFactory, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, - discount_factor, gae_lambda, improvement_acceptance_ratio, - input_block: typing.Optional[ModelFactory] = None): + discount_factor, gae_lambda, improvement_acceptance_ratio): """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() return TRPOFactory( - input_block=input_block, policy_backbone=policy_backbone, value_backbone=value_backbone, max_kl=max_kl, diff --git a/vel/rl/xpolicy/__init__.py b/vel/rl/xpolicy/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/rl/xpolicy/purgatory/__init__.py b/vel/rl/xpolicy/purgatory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/rl/policy/purgatory/deterministic_policy.py b/vel/rl/xpolicy/purgatory/deterministic_policy.py similarity index 100% rename from vel/rl/policy/purgatory/deterministic_policy.py rename to vel/rl/xpolicy/purgatory/deterministic_policy.py diff --git a/vel/rl/policy/purgatory/old_stochastic_policy.py b/vel/rl/xpolicy/purgatory/old_stochastic_policy.py similarity index 100% rename from vel/rl/policy/purgatory/old_stochastic_policy.py rename to vel/rl/xpolicy/purgatory/old_stochastic_policy.py diff --git a/vel/rl/policy/purgatory/old_stochastic_rnn_policy.py b/vel/rl/xpolicy/purgatory/old_stochastic_rnn_policy.py similarity index 100% rename from vel/rl/policy/purgatory/old_stochastic_rnn_policy.py rename to vel/rl/xpolicy/purgatory/old_stochastic_rnn_policy.py diff --git a/vel/rl/policy/purgatory/q_distributional_policy.py b/vel/rl/xpolicy/purgatory/q_distributional_policy.py similarity index 100% rename from vel/rl/policy/purgatory/q_distributional_policy.py rename to vel/rl/xpolicy/purgatory/q_distributional_policy.py diff --git a/vel/rl/policy/purgatory/q_dueling_policy.py b/vel/rl/xpolicy/purgatory/q_dueling_policy.py similarity index 100% rename from vel/rl/policy/purgatory/q_dueling_policy.py rename to vel/rl/xpolicy/purgatory/q_dueling_policy.py diff --git a/vel/rl/policy/purgatory/q_model.py b/vel/rl/xpolicy/purgatory/q_model.py similarity index 70% rename from vel/rl/policy/purgatory/q_model.py rename to vel/rl/xpolicy/purgatory/q_model.py index d162a4de..2fbd4513 100644 --- a/vel/rl/policy/purgatory/q_model.py +++ b/vel/rl/xpolicy/purgatory/q_model.py @@ -3,34 +3,34 @@ from vel.api import LinearBackboneModel, ModelFactory, BackboneModel from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, RlPolicy, Evaluator +from vel.rl.api import Rollout, RlPolicy from vel.rl.module.q_head import QHead -class QModelEvaluator(Evaluator): - """ Evaluate simple q-model """ - def __init__(self, model: 'QModel', rollout: Rollout): - super().__init__(rollout) - self.model = model - - @Evaluator.provides('model:q') - def model_q(self): - """ Action values for all (discrete) actions """ - observations = self.get('rollout:observations') - return self.model(observations) - - @Evaluator.provides('model:action:q') - def model_action_q(self): - """ Action values for selected actions in the rollout """ - q = self.get('model:q') - actions = self.get('rollout:actions') - return q.gather(1, actions.unsqueeze(1)).squeeze(1) - - @Evaluator.provides('model:q_next') - def model_q_next(self): - """ Action values for all (discrete) actions """ - observations = self.get('rollout:observations_next') - return self.model(observations) +# class QModelEvaluator(Evaluator): +# """ Evaluate simple q-model """ +# def __init__(self, model: 'QModel', rollout: Rollout): +# super().__init__(rollout) +# self.model = model +# +# @Evaluator.provides('model:q') +# def model_q(self): +# """ Action values for all (discrete) actions """ +# observations = self.get('rollout:observations') +# return self.model(observations) +# +# @Evaluator.provides('model:action:q') +# def model_action_q(self): +# """ Action values for selected actions in the rollout """ +# q = self.get('model:q') +# actions = self.get('rollout:actions') +# return q.gather(1, actions.unsqueeze(1)).squeeze(1) +# +# @Evaluator.provides('model:q_next') +# def model_q_next(self): +# """ Action values for all (discrete) actions """ +# observations = self.get('rollout:observations_next') +# return self.model(observations) class QModel(RlPolicy): diff --git a/vel/rl/policy/purgatory/q_noisy_model.py b/vel/rl/xpolicy/purgatory/q_noisy_model.py similarity index 100% rename from vel/rl/policy/purgatory/q_noisy_model.py rename to vel/rl/xpolicy/purgatory/q_noisy_model.py diff --git a/vel/rl/policy/purgatory/q_rainbow_model.py b/vel/rl/xpolicy/purgatory/q_rainbow_model.py similarity index 100% rename from vel/rl/policy/purgatory/q_rainbow_model.py rename to vel/rl/xpolicy/purgatory/q_rainbow_model.py diff --git a/vel/rl/policy/purgatory/q_stochastic_policy_model.py b/vel/rl/xpolicy/purgatory/q_stochastic_policy_model.py similarity index 100% rename from vel/rl/policy/purgatory/q_stochastic_policy_model.py rename to vel/rl/xpolicy/purgatory/q_stochastic_policy_model.py diff --git a/vel/rl/xpolicy/semipurgatory/__init__.py b/vel/rl/xpolicy/semipurgatory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/rl/policy/semipurgatory/a2c_rnn.py b/vel/rl/xpolicy/semipurgatory/a2c_rnn.py similarity index 100% rename from vel/rl/policy/semipurgatory/a2c_rnn.py rename to vel/rl/xpolicy/semipurgatory/a2c_rnn.py diff --git a/vel/rl/policy/semipurgatory/acer.py b/vel/rl/xpolicy/semipurgatory/acer.py similarity index 100% rename from vel/rl/policy/semipurgatory/acer.py rename to vel/rl/xpolicy/semipurgatory/acer.py diff --git a/vel/rl/policy/semipurgatory/ddpg.py b/vel/rl/xpolicy/semipurgatory/ddpg.py similarity index 100% rename from vel/rl/policy/semipurgatory/ddpg.py rename to vel/rl/xpolicy/semipurgatory/ddpg.py diff --git a/vel/rl/policy/semipurgatory/distributional_dqn.py b/vel/rl/xpolicy/semipurgatory/distributional_dqn.py similarity index 100% rename from vel/rl/policy/semipurgatory/distributional_dqn.py rename to vel/rl/xpolicy/semipurgatory/distributional_dqn.py diff --git a/vel/rl/policy/semipurgatory/ppo_rnn.py b/vel/rl/xpolicy/semipurgatory/ppo_rnn.py similarity index 100% rename from vel/rl/policy/semipurgatory/ppo_rnn.py rename to vel/rl/xpolicy/semipurgatory/ppo_rnn.py diff --git a/vel/rl/policy/stochastic_policy.py b/vel/rl/xpolicy/stochastic_policy.py similarity index 100% rename from vel/rl/policy/stochastic_policy.py rename to vel/rl/xpolicy/stochastic_policy.py diff --git a/vel/rl/policy/stochastic_policy_separate.py b/vel/rl/xpolicy/stochastic_policy_separate.py similarity index 100% rename from vel/rl/policy/stochastic_policy_separate.py rename to vel/rl/xpolicy/stochastic_policy_separate.py diff --git a/vel/rl/policy/stochastic_rnn_policy.py b/vel/rl/xpolicy/stochastic_rnn_policy.py similarity index 100% rename from vel/rl/policy/stochastic_rnn_policy.py rename to vel/rl/xpolicy/stochastic_rnn_policy.py diff --git a/vel/calc/process.py b/vel/util/process.py similarity index 100% rename from vel/calc/process.py rename to vel/util/process.py diff --git a/vel/calc/function.py b/vel/util/stats.py similarity index 100% rename from vel/calc/function.py rename to vel/util/stats.py From bc211ec75e62df9daa86881bbf1838f7bcb26e0d Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Tue, 1 Oct 2019 23:32:29 -0700 Subject: [PATCH 102/162] Restored TRPO. --- .../rl/atari/{purgatory => }/atari_trpo.yaml | 29 ++++----- vel/rl/layer/nature_cnn.py | 4 +- .../purgatory => }/nature_cnn_small.py | 50 ++++++++++----- vel/rl/policy/trpo.py | 64 ++++++++++--------- 4 files changed, 81 insertions(+), 66 deletions(-) rename examples-configs/rl/atari/{purgatory => }/atari_trpo.yaml (75%) rename vel/rl/layer/{premade/purgatory => }/nature_cnn_small.py (63%) diff --git a/examples-configs/rl/atari/purgatory/atari_trpo.yaml b/examples-configs/rl/atari/atari_trpo.yaml similarity index 75% rename from examples-configs/rl/atari/purgatory/atari_trpo.yaml rename to examples-configs/rl/atari/atari_trpo.yaml index df2446ab..56af94db 100644 --- a/examples-configs/rl/atari/purgatory/atari_trpo.yaml +++ b/examples-configs/rl/atari/atari_trpo.yaml @@ -12,7 +12,7 @@ vec_env: model: - name: vel.rl.algo.trpo + name: vel.rl.policy.trpo max_kl: 0.001 cg_iters: 10 @@ -25,20 +25,19 @@ model: gae_lambda: 1.00 # Generalized Advantage Estimator Lambda parameter - input_block: - name: vel.module.input.image_to_tensor - - policy_backbone: - name: vel.rl.backbone.nature_cnn_small - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - value_backbone: - name: vel.rl.backbone.nature_cnn_small - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history + policy_net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + size: [84, 84, 4] # Number of channels is frame history + - name: vel.rl.layer.nature_cnn_small + + value_net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + size: [84, 84, 4] # Number of channels is frame history + - name: vel.rl.layer.nature_cnn_small reinforcer: diff --git a/vel/rl/layer/nature_cnn.py b/vel/rl/layer/nature_cnn.py index b44b84d9..16cfed1b 100644 --- a/vel/rl/layer/nature_cnn.py +++ b/vel/rl/layer/nature_cnn.py @@ -5,7 +5,6 @@ Under MIT license. """ import numpy as np -import typing import torch.nn as nn import torch.nn.init as init @@ -13,8 +12,7 @@ import vel.util.network as net_util -from vel.api import ModelFactory, SizeHint, SizeHints - +from vel.api import SizeHint, SizeHints from vel.net.modular import Layer, LayerFactory diff --git a/vel/rl/layer/premade/purgatory/nature_cnn_small.py b/vel/rl/layer/nature_cnn_small.py similarity index 63% rename from vel/rl/layer/premade/purgatory/nature_cnn_small.py rename to vel/rl/layer/nature_cnn_small.py index 29359262..c9ac77bb 100644 --- a/vel/rl/layer/premade/purgatory/nature_cnn_small.py +++ b/vel/rl/layer/nature_cnn_small.py @@ -12,18 +12,19 @@ import vel.util.network as net_util -from vel.api import LinearBackboneModel, ModelFactory +from vel.api import SizeHint, SizeHints +from vel.net.modular import Layer, LayerFactory -class NatureCnnSmall(LinearBackboneModel): +class NatureCnnSmall(Layer): """ Neural network as defined in the paper 'Human-level control through deep reinforcement learning' Smaller version. """ - def __init__(self, input_width, input_height, input_channels, output_dim=128): - super().__init__() + def __init__(self, name: str, input_width, input_height, input_channels, output_dim=128): + super().__init__(name) - self._output_dim = output_dim + self.output_dim = output_dim self.conv1 = nn.Conv2d( in_channels=input_channels, @@ -54,11 +55,6 @@ def __init__(self, input_width, input_height, input_channels, output_dim=128): self.output_dim ) - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - return self._output_dim - def reset_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): @@ -70,7 +66,10 @@ def reset_weights(self): init.orthogonal_(m.weight, gain=np.sqrt(2)) init.constant_(m.bias, 0.0) - def forward(self, image): + def size_hints(self) -> SizeHints: + return SizeHints(SizeHint(None, self.output_dim)) + + def forward(self, image, state: dict = None, context: dict = None): result = image result = F.relu(self.conv1(result)) result = F.relu(self.conv2(result)) @@ -78,12 +77,29 @@ def forward(self, image): return F.relu(self.linear_layer(flattened)) -def create(input_width, input_height, input_channels=1): - """ Vel factory function """ - def instantiate(**_): - return NatureCnnSmall(input_width=input_width, input_height=input_height, input_channels=input_channels) +class NatureCnnSmallFactory(LayerFactory): + """ Nature Cnn Network Factory """ - return ModelFactory.generic(instantiate) + def __init__(self, output_dim: int = 128): + self.output_dim = output_dim + @property + def name_base(self) -> str: + """ Base of layer name """ + return "nature_cnn_small" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + (b, c, w, h) = direct_input.assert_single(4) + + return NatureCnnSmall( + name=name, + input_width=w, + input_height=h, + input_channels=c, + output_dim=self.output_dim + ) -NatureCnnSmallFactory = create + +def create(output_dim: int = 128): + """ Vel factory function """ + return NatureCnnSmallFactory(output_dim=output_dim) diff --git a/vel/rl/policy/trpo.py b/vel/rl/policy/trpo.py index 586d33c2..4d614cd8 100644 --- a/vel/rl/policy/trpo.py +++ b/vel/rl/policy/trpo.py @@ -7,8 +7,8 @@ import torch.nn.functional as F import torch.nn.utils -from vel.api import BatchInfo, VelOptimizer, BackboneModel, LinearBackboneModel, OptimizerFactory, ModelFactory -from vel.calc.function import explained_variance +from vel.api import BatchInfo, VelOptimizer, OptimizerFactory, ModelFactory, BackboneNetwork +from vel.util.stats import explained_variance from vel.metric.base import AveragingNamedMetric from vel.rl.api import Rollout, Trajectories, RlPolicy @@ -58,22 +58,12 @@ class TRPO(RlPolicy): """ Trust Region Policy Optimization - https://arxiv.org/abs/1502.05477 """ def __init__(self, - policy_backbone: LinearBackboneModel, value_backbone: LinearBackboneModel, + policy_net: BackboneNetwork, value_net: BackboneNetwork, action_space: gym.Space, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, discount_factor, gae_lambda, improvement_acceptance_ratio): super().__init__(discount_factor) - self.policy_backbone = policy_backbone - self.value_backbone = value_backbone - - self.action_head = StochasticActionHead( - action_space=action_space, - input_dim=self.policy_backbone.output_dim - ) - - self.value_head = ValueHead(input_dim=self.value_backbone.output_dim) - self.mak_kl = max_kl self.cg_iters = cg_iters self.line_search_iters = line_search_iters @@ -83,18 +73,30 @@ def __init__(self, self.gae_lambda = gae_lambda self.improvement_acceptance_ratio = improvement_acceptance_ratio + self.policy_net = policy_net + self.value_net = value_net + + self.action_head = StochasticActionHead( + action_space=action_space, + input_dim=self.policy_net.size_hints().assert_single(2).last() + ) + + self.value_head = ValueHead( + input_dim=self.value_net.size_hints().assert_single(2).last() + ) + def reset_weights(self): """ Initialize properly model weights """ - self.policy_backbone.reset_weights() - self.value_backbone.reset_weights() + self.policy_net.reset_weights() + self.value_net.reset_weights() self.action_head.reset_weights() self.value_head.reset_weights() def forward(self, observations): """ Calculate model outputs """ - policy_base_output = self.policy_backbone(observations) - value_base_output = self.value_backbone(observations) + policy_base_output = self.policy_net(observations) + value_base_output = self.value_net(observations) action_output = self.action_head(policy_base_output) value_output = self.value_head(value_base_output) @@ -103,13 +105,13 @@ def forward(self, observations): def value(self, observations, state=None): """ Calculate only value head for given state """ - base_output = self.value_backbone(observations) + base_output = self.value_net(observations) value_output = self.value_head(base_output) return value_output def policy(self, observations): """ Calculate only action head for given state """ - policy_base_output = self.policy_backbone(observations) + policy_base_output = self.policy_net(observations) policy_params = self.action_head(policy_base_output) return policy_params @@ -135,14 +137,14 @@ def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: def policy_parameters(self): """ Parameters of policy """ return it.chain( - self.policy_backbone.parameters(), + self.policy_net.parameters(), self.action_head.parameters() ) def value_parameters(self): """ Parameters of value function """ return it.chain( - self.value_backbone.parameters(), + self.value_net.parameters(), self.value_head.parameters() ) @@ -333,11 +335,11 @@ def metrics(self) -> list: class TRPOFactory(ModelFactory): """ Factory class for policy gradient models """ - def __init__(self, policy_backbone: ModelFactory, value_backbone: ModelFactory, + def __init__(self, policy_net: ModelFactory, value_net: ModelFactory, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, discount_factor, gae_lambda, improvement_acceptance_ratio): - self.policy_backbone = policy_backbone - self.value_backbone = value_backbone + self.policy_net = policy_net + self.value_net = value_net self.entropy_coefficient = entropy_coefficient self.mak_kl = max_kl @@ -353,12 +355,12 @@ def instantiate(self, **extra_args): """ Instantiate the model """ action_space = extra_args.pop('action_space') - policy_backbone = self.policy_backbone.instantiate(**extra_args) - value_backbone = self.value_backbone.instantiate(**extra_args) + policy_net = self.policy_net.instantiate(**extra_args) + value_net = self.value_net.instantiate(**extra_args) return TRPO( - policy_backbone=policy_backbone, - value_backbone=value_backbone, + policy_net=policy_net, + value_net=value_net, action_space=action_space, max_kl=self.mak_kl, cg_iters=self.cg_iters, @@ -372,14 +374,14 @@ def instantiate(self, **extra_args): ) -def create(policy_backbone: ModelFactory, value_backbone: ModelFactory, +def create(policy_net: ModelFactory, value_net: ModelFactory, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, discount_factor, gae_lambda, improvement_acceptance_ratio): """ Vel factory function """ return TRPOFactory( - policy_backbone=policy_backbone, - value_backbone=value_backbone, + policy_net=policy_net, + value_net=value_net, max_kl=max_kl, cg_iters=cg_iters, line_search_iters=line_search_iters, From 8a17a973635bdacfe829d9c26d86f1d09129ea04 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Wed, 2 Oct 2019 10:38:42 -0700 Subject: [PATCH 103/162] Restored `atari_a2c_tf_rmsprop` example. --- .../{purgatory => }/atari_a2c_tf_rmsprop.yaml | 40 +++++++++---------- vel/optimizer/rmsprop_tf.py | 3 ++ 2 files changed, 22 insertions(+), 21 deletions(-) rename examples-configs/rl/atari/{purgatory => }/atari_a2c_tf_rmsprop.yaml (50%) diff --git a/examples-configs/rl/atari/purgatory/atari_a2c_tf_rmsprop.yaml b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml similarity index 50% rename from examples-configs/rl/atari/purgatory/atari_a2c_tf_rmsprop.yaml rename to examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml index 2786a6a4..13de0fef 100644 --- a/examples-configs/rl/atari/purgatory/atari_a2c_tf_rmsprop.yaml +++ b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml @@ -12,37 +12,35 @@ vec_env: model: - name: vel.rl.models.stochastic_policy_model + name: vel.rl.policy.a2c - input_block: - name: vel.modules.input.image_to_tensor + entropy_coefficient: 0.01 + value_coefficient: 0.5 + discount_factor: 0.99 - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + size: [84, 84, 4] # Number of channels is frame history + - name: vel.rl.layer.nature_cnn + - name: vel.net.layer.util.repeat_tensor + times: 2 # Need to repeat output twice, for action and value heads reinforcer: - name: vel.rl.reinforcers.on_policy_iteration_reinforcer - - algo: - name: vel.rl.algo.policy_gradient.a2c - entropy_coefficient: 0.01 - value_coefficient: 0.5 - max_grad_norm: 0.5 - discount_factor: 0.99 + name: vel.rl.reinforcer.on_policy_iteration_reinforcer env_roller: name: vel.rl.env_roller.step_env_roller number_of_steps: 5 # How many environment steps go into a single batch parallel_envs: 16 # How many environments to run in parallel + batch_size: 256 # How many samples can go into the model once optimizer: - name: vel.optimizers.rmsprop_tf + name: vel.optimizer.rmsprop_tf lr: 7.0e-4 alpha: 0.99 epsilon: 1.0e-6 @@ -50,20 +48,20 @@ optimizer: commands: train: - name: vel.rl.commands.rl_train_command + name: vel.rl.command.rl_train_command total_frames: 1.1e7 batches_per_epoch: 100 record: - name: vel.rl.commands.record_movie_command + name: vel.rl.command.record_movie_command takes: 10 videoname: 'atari_vid_{:04}.avi' evaluate: - name: vel.rl.commands.evaluate_env_command + name: vel.rl.command.evaluate_env_command parallel_envs: 16 # How many environments to run in parallel takes: 20 visdom: - name: vel.commands.vis_store_command + name: vel.command.vis_store_command diff --git a/vel/optimizer/rmsprop_tf.py b/vel/optimizer/rmsprop_tf.py index 4e80a401..934d5090 100644 --- a/vel/optimizer/rmsprop_tf.py +++ b/vel/optimizer/rmsprop_tf.py @@ -1,3 +1,6 @@ +import torch +import typing + from torch.optim.optimizer import Optimizer import vel.util.module_util as mu From 1ccc0a4ef4890b6960f5eac7d5e3b511ca385191 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Wed, 2 Oct 2019 13:24:35 -0700 Subject: [PATCH 104/162] ACER works again. --- .../rl/atari/{purgatory => }/atari_acer.yaml | 51 ++--- vel/rl/api/policy.py | 2 +- vel/rl/env_roller/step_env_roller.py | 14 +- .../trajectory_replay_env_roller.py | 11 +- .../semipurgatory => module/head}/__init__.py | 0 .../{ => head}/deterministic_action_head.py | 0 .../{ => head}/deterministic_critic_head.py | 0 .../{ => head}/q_distributional_head.py | 0 .../q_distributional_noisy_dueling_head.py | 0 vel/rl/module/{ => head}/q_dueling_head.py | 0 vel/rl/module/{ => head}/q_head.py | 0 vel/rl/module/{ => head}/q_noisy_head.py | 0 .../{ => head}/stochastic_action_head.py | 56 ++---- vel/rl/module/{ => head}/value_head.py | 0 vel/rl/module/stochastic_policy.py | 42 +++++ vel/rl/policy/a2c.py | 34 +--- .../{xpolicy/semipurgatory => policy}/acer.py | 176 ++++++++++++++---- vel/rl/policy/ppo.py | 37 +--- vel/rl/policy/purgatory/__init__.py | 0 vel/rl/policy/{ => purgatory}/a2c_rnn.py | 0 .../purgatory}/ddpg.py | 0 .../purgatory}/distributional_dqn.py | 0 vel/rl/policy/{ => purgatory}/dqn.py | 0 vel/rl/policy/{ => purgatory}/ppo_rnn.py | 0 ...fered_mixed_policy_iteration_reinforcer.py | 57 +++--- .../on_policy_iteration_reinforcer.py | 10 +- vel/rl/xpolicy/semipurgatory/a2c_rnn.py | 92 --------- vel/rl/xpolicy/semipurgatory/ppo_rnn.py | 133 ------------- vel/rl/xpolicy/stochastic_policy.py | 70 ------- 29 files changed, 283 insertions(+), 502 deletions(-) rename examples-configs/rl/atari/{purgatory => }/atari_acer.yaml (61%) rename vel/rl/{xpolicy/semipurgatory => module/head}/__init__.py (100%) rename vel/rl/module/{ => head}/deterministic_action_head.py (100%) rename vel/rl/module/{ => head}/deterministic_critic_head.py (100%) rename vel/rl/module/{ => head}/q_distributional_head.py (100%) rename vel/rl/module/{ => head}/q_distributional_noisy_dueling_head.py (100%) rename vel/rl/module/{ => head}/q_dueling_head.py (100%) rename vel/rl/module/{ => head}/q_head.py (100%) rename vel/rl/module/{ => head}/q_noisy_head.py (100%) rename vel/rl/module/{ => head}/stochastic_action_head.py (71%) rename vel/rl/module/{ => head}/value_head.py (100%) create mode 100644 vel/rl/module/stochastic_policy.py rename vel/rl/{xpolicy/semipurgatory => policy}/acer.py (58%) create mode 100644 vel/rl/policy/purgatory/__init__.py rename vel/rl/policy/{ => purgatory}/a2c_rnn.py (100%) rename vel/rl/{xpolicy/semipurgatory => policy/purgatory}/ddpg.py (100%) rename vel/rl/{xpolicy/semipurgatory => policy/purgatory}/distributional_dqn.py (100%) rename vel/rl/policy/{ => purgatory}/dqn.py (100%) rename vel/rl/policy/{ => purgatory}/ppo_rnn.py (100%) delete mode 100644 vel/rl/xpolicy/semipurgatory/a2c_rnn.py delete mode 100644 vel/rl/xpolicy/semipurgatory/ppo_rnn.py delete mode 100644 vel/rl/xpolicy/stochastic_policy.py diff --git a/examples-configs/rl/atari/purgatory/atari_acer.yaml b/examples-configs/rl/atari/atari_acer.yaml similarity index 61% rename from examples-configs/rl/atari/purgatory/atari_acer.yaml rename to examples-configs/rl/atari/atari_acer.yaml index 52e47b84..256b09bc 100644 --- a/examples-configs/rl/atari/purgatory/atari_acer.yaml +++ b/examples-configs/rl/atari/atari_acer.yaml @@ -10,28 +10,39 @@ vec_env: name: vel.rl.vecenv.shared_mem frame_history: 4 # How many stacked frames go into a single observation + algo: + name: vel.rl.algo.policy_gradient.acer model: - name: vel.rl.models.q_stochastic_policy_model + name: vel.rl.policy.acer + + entropy_coefficient: 0.01 + q_coefficient: 0.5 + rho_cap: 10.0 + retrace_rho_cap: 1.0 + + discount_factor: 0.99 - input_block: - name: vel.modules.input.image_to_tensor + trust_region: true - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + size: [84, 84, 4] # Number of channels is frame history + - name: vel.rl.layer.nature_cnn + - name: vel.net.layer.util.repeat_tensor + times: 2 # Need to repeat output twice, for action and value heads reinforcer: - name: vel.rl.reinforcers.buffered_mixed_policy_iteration_reinforcer + name: vel.rl.reinforcer.buffered_mixed_policy_iteration_reinforcer env_roller: name: vel.rl.env_roller.trajectory_replay_env_roller replay_buffer: - name: vel.rl.buffers.circular_replay_buffer + name: vel.rl.buffer.circular_replay_buffer buffer_initial_size: 1_000 # How many samples we need in the buffer before we start using replay buffer buffer_capacity: 50_000 @@ -40,17 +51,6 @@ reinforcer: frame_stack_compensation: true frame_history: 4 # How many stacked frames go into a single observation - algo: - name: vel.rl.algo.policy_gradient.acer - entropy_coefficient: 0.01 - q_coefficient: 0.5 - rho_cap: 10.0 - retrace_rho_cap: 1.0 - - max_grad_norm: 10.0 - discount_factor: 0.99 - - trust_region: false parallel_envs: 12 # How many environments to run in parallel number_of_steps: 20 # How many environment steps go into a single batch @@ -58,25 +58,26 @@ reinforcer: optimizer: - name: vel.optimizers.rmsprop + name: vel.optimizer.rmsprop lr: 7.0e-4 alpha: 0.99 # epsilon: 1.0e-5 epsilon: 1.0e-3 + max_grad_norm: 10.0 commands: train: - name: vel.rl.commands.rl_train_command + name: vel.rl.command.rl_train_command total_frames: 1.1e7 batches_per_epoch: 30 record: - name: vel.rl.commands.record_movie_command + name: vel.rl.command.record_movie_command takes: 10 videoname: 'atari_vid_{:04}.avi' evaluate: - name: vel.rl.commands.evaluate_env_command + name: vel.rl.command.evaluate_env_command takes: 100 parallel_envs: 12 # How many environments to run in parallel diff --git a/vel/rl/api/policy.py b/vel/rl/api/policy.py index e5701d4d..3a73003e 100644 --- a/vel/rl/api/policy.py +++ b/vel/rl/api/policy.py @@ -12,7 +12,7 @@ def __init__(self, discount_factor: float): def process_rollout(self, rollout: Rollout) -> Rollout: """ Process rollout for optimization before any chunking/shuffling """ - raise NotImplementedError + return rollout def act(self, observation, state=None, deterministic=False) -> dict: """ diff --git a/vel/rl/env_roller/step_env_roller.py b/vel/rl/env_roller/step_env_roller.py index f749a895..2b959f6e 100644 --- a/vel/rl/env_roller/step_env_roller.py +++ b/vel/rl/env_roller/step_env_roller.py @@ -58,18 +58,22 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: episode_information.append(new_infos) - final_values = self.actor.value(self.last_observation.to(self.device)).cpu() - accumulated_tensors = accumulator.result() + # Perform last agent step, without advancing the state + final_obs = self.actor.act(self.last_observation.to(self.device), advance_state=False) + + rollout_tensors = {} + + for key, value in final_obs.items(): + rollout_tensors[f"final_{key}"] = value.cpu() + return Trajectories( num_steps=accumulated_tensors['observations'].size(0), num_envs=accumulated_tensors['observations'].size(1), environment_information=episode_information, transition_tensors=accumulated_tensors, - rollout_tensors={ - 'final_values': final_values - } + rollout_tensors=rollout_tensors ) diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index 1f413f4f..7e347edb 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -81,14 +81,19 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: accumulated_tensors = accumulator.result() + final_obs = self.actor.act(self.last_observation.to(self.device), advance_state=False) + + rollout_tensors = {} + + for key, value in final_obs.items(): + rollout_tensors[f"final_{key}"] = value.cpu() + return Trajectories( num_steps=accumulated_tensors['observations'].size(0), num_envs=accumulated_tensors['observations'].size(1), environment_information=episode_information, transition_tensors=accumulated_tensors, - rollout_tensors={ - 'final_values': self.actor.value(self.last_observation).cpu() - } + rollout_tensors=rollout_tensors ) def sample(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: diff --git a/vel/rl/xpolicy/semipurgatory/__init__.py b/vel/rl/module/head/__init__.py similarity index 100% rename from vel/rl/xpolicy/semipurgatory/__init__.py rename to vel/rl/module/head/__init__.py diff --git a/vel/rl/module/deterministic_action_head.py b/vel/rl/module/head/deterministic_action_head.py similarity index 100% rename from vel/rl/module/deterministic_action_head.py rename to vel/rl/module/head/deterministic_action_head.py diff --git a/vel/rl/module/deterministic_critic_head.py b/vel/rl/module/head/deterministic_critic_head.py similarity index 100% rename from vel/rl/module/deterministic_critic_head.py rename to vel/rl/module/head/deterministic_critic_head.py diff --git a/vel/rl/module/q_distributional_head.py b/vel/rl/module/head/q_distributional_head.py similarity index 100% rename from vel/rl/module/q_distributional_head.py rename to vel/rl/module/head/q_distributional_head.py diff --git a/vel/rl/module/q_distributional_noisy_dueling_head.py b/vel/rl/module/head/q_distributional_noisy_dueling_head.py similarity index 100% rename from vel/rl/module/q_distributional_noisy_dueling_head.py rename to vel/rl/module/head/q_distributional_noisy_dueling_head.py diff --git a/vel/rl/module/q_dueling_head.py b/vel/rl/module/head/q_dueling_head.py similarity index 100% rename from vel/rl/module/q_dueling_head.py rename to vel/rl/module/head/q_dueling_head.py diff --git a/vel/rl/module/q_head.py b/vel/rl/module/head/q_head.py similarity index 100% rename from vel/rl/module/q_head.py rename to vel/rl/module/head/q_head.py diff --git a/vel/rl/module/q_noisy_head.py b/vel/rl/module/head/q_noisy_head.py similarity index 100% rename from vel/rl/module/q_noisy_head.py rename to vel/rl/module/head/q_noisy_head.py diff --git a/vel/rl/module/stochastic_action_head.py b/vel/rl/module/head/stochastic_action_head.py similarity index 71% rename from vel/rl/module/stochastic_action_head.py rename to vel/rl/module/head/stochastic_action_head.py index 2d54cbab..d0ce774f 100644 --- a/vel/rl/module/stochastic_action_head.py +++ b/vel/rl/module/head/stochastic_action_head.py @@ -1,3 +1,4 @@ +import gym import numpy as np import torch @@ -110,7 +111,7 @@ def sample(self, logits, deterministic=False): if deterministic: return torch.argmax(logits, dim=-1) else: - # Gumbel-softmax trick + # Gumbel-Softmax trick u = torch.rand_like(logits) return torch.argmax(logits - torch.log(-torch.log(u)), dim=-1) @@ -134,47 +135,12 @@ def kl_divergence(self, logits_q, logits_p): return (torch.exp(logits_q) * (logits_q - logits_p)).sum(1, keepdim=True) -class StochasticActionHead(nn.Module): - """ - Network head for action determination. Returns probability distribution parametrization - """ - - def __init__(self, input_dim, action_space): - super().__init__() - - self.action_space = action_space - - if isinstance(action_space, spaces.Box): - assert len(action_space.shape) == 1 - self.head = DiagGaussianActionHead(input_dim, action_space.shape[0]) - elif isinstance(action_space, spaces.Discrete): - self.head = CategoricalActionHead(input_dim, action_space.n) - # elif isinstance(action_space, spaces.MultiDiscrete): - # return MultiCategoricalPdType(action_space.nvec) - # elif isinstance(action_space, spaces.MultiBinary): - # return BernoulliPdType(action_space.n) - else: - raise NotImplementedError - - def forward(self, input_data): - return self.head(input_data) - - def sample(self, policy_params, **kwargs): - """ Sample from a probability space of all actions """ - return self.head.sample(policy_params, **kwargs) - - def reset_weights(self): - """ Initialize weights to sane defaults """ - self.head.reset_weights() - - def entropy(self, policy_params): - """ Entropy calculation - sum probs * log(probs) """ - return self.head.entropy(policy_params) - - def kl_divergence(self, params_q, params_p): - """ Kullback–Leibler divergence between two sets of parameters """ - return self.head.kl_divergence(params_q, params_p) - - def logprob(self, action_sample, policy_params): - """ - log probabilty of selected actions """ - return self.head.logprob(action_sample, policy_params) +def make_stockastic_action_head(input_dim: int, action_space: gym.Space): + """ Instantiate stochastic action space relevant for the task """ + if isinstance(action_space, spaces.Box): + assert len(action_space.shape) == 1 + return DiagGaussianActionHead(input_dim, action_space.shape[0]) + elif isinstance(action_space, spaces.Discrete): + return CategoricalActionHead(input_dim, action_space.n) + else: + raise NotImplementedError diff --git a/vel/rl/module/value_head.py b/vel/rl/module/head/value_head.py similarity index 100% rename from vel/rl/module/value_head.py rename to vel/rl/module/head/value_head.py diff --git a/vel/rl/module/stochastic_policy.py b/vel/rl/module/stochastic_policy.py new file mode 100644 index 00000000..47a22e3f --- /dev/null +++ b/vel/rl/module/stochastic_policy.py @@ -0,0 +1,42 @@ +import gym + +from vel.api import Network, BackboneNetwork + +from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head +from vel.rl.module.head.value_head import ValueHead + + +class StochasticPolicy(Network): + """ + Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone + """ + + def __init__(self, net: BackboneNetwork, action_space: gym.Space): + super().__init__() + + self.net = net + + assert not self.net.is_stateful, "Backbone shouldn't have state" + + (action_size, value_size) = self.net.size_hints().assert_tuple(2) + + self.action_head = make_stockastic_action_head( + action_space=action_space, + input_dim=action_size.last(), + ) + + self.value_head = ValueHead( + input_dim=value_size.last() + ) + + def reset_weights(self): + """ Initialize properly model weights """ + self.net.reset_weights() + self.action_head.reset_weights() + self.value_head.reset_weights() + + def forward(self, observation): + """ Calculate model outputs """ + action_hidden, value_hidden = self.net(observation) + return self.action_head(action_hidden), self.value_head(value_hidden) + diff --git a/vel/rl/policy/a2c.py b/vel/rl/policy/a2c.py index 858cf5e4..cd03eead 100644 --- a/vel/rl/policy/a2c.py +++ b/vel/rl/policy/a2c.py @@ -8,8 +8,7 @@ from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae -from vel.rl.module.stochastic_action_head import StochasticActionHead -from vel.rl.module.value_head import ValueHead +from vel.rl.module.stochastic_policy import StochasticPolicy class A2C(RlPolicy): @@ -23,40 +22,23 @@ def __init__(self, net: BackboneNetwork, action_space: gym.Space, self.value_coefficient = value_coefficient self.gae_lambda = gae_lambda - self.net = net - - assert not self.net.is_stateful, "For stateful policies, use A2CRnn" - - # Make sure network returns two results - (action_size, value_size) = self.net.size_hints().assert_tuple(2) - - self.action_head = StochasticActionHead( - action_space=action_space, - input_dim=action_size.last(), - ) - - self.value_head = ValueHead( - input_dim=value_size.last() - ) + self.policy = StochasticPolicy(net, action_space) def reset_weights(self): """ Initialize properly model weights """ - self.net.reset_weights() - self.action_head.reset_weights() - self.value_head.reset_weights() + self.policy.reset_weights() def forward(self, observation, state=None): """ Calculate model outputs """ - action_hidden, value_hidden = self.net(observation, state=state) - return self.action_head(action_hidden), self.value_head(value_hidden) + return self.policy(observation) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ action_pd_params, value_output = self(observation) - actions = self.action_head.sample(action_pd_params, deterministic=deterministic) + actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action - logprobs = self.action_head.logprob(actions, action_pd_params) + logprobs = self.policy.action_head.logprob(actions, action_pd_params) return { 'actions': actions, @@ -96,8 +78,8 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: pd_params, model_values = self(observations) - log_probs = self.action_head.logprob(actions, pd_params) - entropy = self.action_head.entropy(pd_params) + log_probs = self.policy.action_head.logprob(actions, pd_params) + entropy = self.policy.action_head.entropy(pd_params) # Actual calculations. Pretty trivial policy_loss = -torch.mean(advantages * log_probs) diff --git a/vel/rl/xpolicy/semipurgatory/acer.py b/vel/rl/policy/acer.py similarity index 58% rename from vel/rl/xpolicy/semipurgatory/acer.py rename to vel/rl/policy/acer.py index 6d78a603..f4d62580 100644 --- a/vel/rl/xpolicy/semipurgatory/acer.py +++ b/vel/rl/policy/acer.py @@ -1,8 +1,12 @@ +import gym import torch import torch.nn.functional as F +from vel.api import BackboneNetwork, ModelFactory, BatchInfo, Network from vel.metric.base import AveragingNamedMetric -from vel.rl.api import Trajectories, OptimizerAlgoBase +from vel.rl.api import Trajectories, RlPolicy, Rollout +from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head +from vel.rl.module.head.q_head import QHead def select_indices(tensor, indices): @@ -10,18 +14,57 @@ def select_indices(tensor, indices): return tensor.gather(1, indices.unsqueeze(1)).squeeze() -class AcerPolicyGradient(OptimizerAlgoBase): +class QStochasticPolicy(Network): + """ + A policy model with an action-value critic head (instead of more common state-value critic head). + Supports only discrete action spaces (ones that can be enumerated) + """ + + def __init__(self, net: BackboneNetwork, action_space: gym.Space): + super().__init__() + + assert isinstance(action_space, gym.spaces.Discrete) + + self.net = net + + (action_size, value_size) = self.net.size_hints().assert_tuple(2) + + self.action_head = make_stockastic_action_head( + input_dim=action_size.last(), + action_space=action_space + ) + + self.q_head = QHead( + input_dim=value_size.last(), + action_space=action_space + ) + + def reset_weights(self): + """ Initialize properly model weights """ + self.net.reset_weights() + self.action_head.reset_weights() + self.q_head.reset_weights() + + def forward(self, observations): + """ Calculate model outputs """ + action_hidden, q_hidden = self.net(observations) + policy_params = self.action_head(action_hidden) + + q = self.q_head(q_hidden) + + return policy_params, q + + +class ACER(RlPolicy): """ Actor-Critic with Experience Replay - policy gradient calculations """ - def __init__(self, model_factory, discount_factor, trust_region: bool = True, entropy_coefficient: float = 0.01, + def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space: gym.Space, + discount_factor: float, trust_region: bool = True, entropy_coefficient: float = 0.01, q_coefficient: float = 0.5, rho_cap: float = 10.0, retrace_rho_cap: float = 1.0, - max_grad_norm: float = None, average_model_alpha: float = 0.99, trust_region_delta: float = 1.0): - super().__init__(max_grad_norm) - - self.discount_factor = discount_factor + average_model_alpha: float = 0.99, trust_region_delta: float = 1.0): + super().__init__(discount_factor) self.trust_region = trust_region - self.model_factory = model_factory self.entropy_coefficient = entropy_coefficient self.q_coefficient = q_coefficient @@ -30,39 +73,67 @@ def __init__(self, model_factory, discount_factor, trust_region: bool = True, en self.retrace_rho_cap = retrace_rho_cap # Trust region settings - self.average_model = None self.average_model_alpha = average_model_alpha self.trust_region_delta = trust_region_delta - def initialize(self, training_info, model, environment, device): - """ Initialize policy gradient from reinforcer settings """ + self.policy = QStochasticPolicy(net, action_space) + if self.trust_region: - self.average_model = self.model_factory.instantiate(action_space=environment.action_space).to(device) - self.average_model.load_state_dict(model.state_dict()) + self.target_policy = QStochasticPolicy(net_factory.instantiate(), action_space) + else: + self.target_policy = None + + def reset_weights(self): + """ Initialize properly model weights """ + self.policy.reset_weights() + + if self.trust_region: + self.target_policy.load_state_dict(self.policy.state_dict()) + + def forward(self, observation, state=None): + """ Calculate model outputs """ + return self.policy(observation) + + def act(self, observation, state=None, deterministic=False): + """ Select actions based on model's output """ + logprobs, q = self(observation) + actions = self.policy.action_head.sample(logprobs, deterministic=deterministic) - def update_average_model(self, model): + # log likelihood of selected action + action_logprobs = self.policy.action_head.logprob(actions, logprobs) + values = (torch.exp(logprobs) * q).sum(dim=1) + + return { + 'actions': actions, + 'q': q, + 'values': values, + 'action:logprobs': action_logprobs, + 'logprobs': logprobs + } + + def update_target_policy(self): """ Update weights of the average model with new model observation """ - for model_param, average_param in zip(model.parameters(), self.average_model.parameters()): + for model_param, average_param in zip(self.policy.parameters(), self.target_policy.parameters()): # EWMA average model update average_param.data.mul_(self.average_model_alpha).add_(model_param.data * (1 - self.average_model_alpha)) - def calculate_gradient(self, batch_info, device, model, rollout): + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: """ Calculate loss of the supplied rollout """ assert isinstance(rollout, Trajectories), "ACER algorithm requires trajectory input" - local_epsilon = 1e-6 - - evaluator = model.evaluate(rollout) - - actions = evaluator.get('rollout:actions') - rollout_probabilities = torch.exp(evaluator.get('rollout:logprobs')) - # We calculate the trust-region update with respect to the average model if self.trust_region: - self.update_average_model(model) + self.update_target_policy() + + local_epsilon = 1e-6 + + # Part 0.0 - Rollout values + actions = rollout.batch_tensor('actions') + rollout_probabilities = torch.exp(rollout.batch_tensor('logprobs')) + observations = rollout.batch_tensor('observations') - logprobs = evaluator.get('model:logprobs') - q = evaluator.get('model:q') + # PART 0.1 - Model evaluation + logprobs, q = self(observations) # Selected action values action_logprobs = select_indices(logprobs, actions) @@ -99,7 +170,7 @@ def calculate_gradient(self, batch_info, device, model, rollout): explained_variance = 1 - torch.var(q_retraced - action_q) / torch.var(q_retraced) # Entropy of the policy distribution - policy_entropy = torch.mean(model.entropy(logprobs)) + policy_entropy = torch.mean(self.policy.action_head.entropy(logprobs)) policy_gradient_loss = -torch.mean(advantages * importance_sampling_coefficient * action_logprobs) # Policy gradient bias correction @@ -121,8 +192,7 @@ def calculate_gradient(self, batch_info, device, model, rollout): if self.trust_region: with torch.no_grad(): - average_evaluator = self.average_model.evaluate(rollout) - average_action_logits = average_evaluator.get('model:logprobs') + target_logprobs = self.target_policy(observations)[0] actor_loss = policy_loss - self.entropy_coefficient * policy_entropy q_loss = self.q_coefficient * q_function_loss @@ -134,7 +204,7 @@ def calculate_gradient(self, batch_info, device, model, rollout): # Analytically calculated derivative of KL divergence on logits # That makes it hardcoded for discrete action spaces - kl_divergence_grad_symbolic = - torch.exp(average_action_logits) / logprobs.size(0) + kl_divergence_grad_symbolic = - torch.exp(target_logprobs) / logprobs.size(0) k_dot_g = (actor_gradient * kl_divergence_grad_symbolic).sum(dim=-1) k_dot_k = (kl_divergence_grad_symbolic ** 2).sum(dim=-1) @@ -195,7 +265,6 @@ def metrics(self) -> list: AveragingNamedMetric("policy_gradient_bias_correction"), AveragingNamedMetric("explained_variance"), AveragingNamedMetric("advantage_norm"), - AveragingNamedMetric("grad_norm"), AveragingNamedMetric("model_prob_std"), AveragingNamedMetric("rollout_prob_std"), AveragingNamedMetric("avg_q_selected"), @@ -203,17 +272,52 @@ def metrics(self) -> list: ] -def create(model, trust_region, entropy_coefficient, q_coefficient, max_grad_norm, discount_factor, - rho_cap=10.0, retrace_rho_cap=1.0, average_model_alpha=0.99, trust_region_delta=1.0): +class ACERFactory(ModelFactory): + """ Factory class for ACER policies """ + def __init__(self, net_factory, trust_region: bool, entropy_coefficient: float, q_coefficient: float, + discount_factor: float, rho_cap: float = 10.0, retrace_rho_cap: float = 1.0, + average_model_alpha: float = 0.99, trust_region_delta: float = 1.0): + self.net_factory = net_factory + self.trust_region = trust_region + self.entropy_coefficient = entropy_coefficient + self.q_coefficient = q_coefficient + self.discount_factor = discount_factor + self.rho_cap = rho_cap + self.retrace_rho_cap = retrace_rho_cap + self.average_model_alpha = average_model_alpha + self.trust_region_delta = trust_region_delta + + def instantiate(self, **extra_args): + """ Instantiate the model """ + action_space = extra_args.pop('action_space') + net = self.net_factory.instantiate(**extra_args) + + return ACER( + net=net, + net_factory=self.net_factory, + action_space=action_space, + trust_region=self.trust_region, + entropy_coefficient=self.entropy_coefficient, + q_coefficient=self.q_coefficient, + discount_factor=self.discount_factor, + rho_cap=self.rho_cap, + retrace_rho_cap=self.retrace_rho_cap, + average_model_alpha=self.average_model_alpha, + trust_region_delta=self.trust_region_delta, + ) + + +def create(net, trust_region: bool , entropy_coefficient: float, q_coefficient: float, discount_factor: float, + rho_cap: float = 10.0, retrace_rho_cap: float = 1.0, average_model_alpha: float = 0.99, + trust_region_delta: float = 1.0): """ Vel factory function """ - return AcerPolicyGradient( + return ACERFactory( + net_factory=net, trust_region=trust_region, - model_factory=model, entropy_coefficient=entropy_coefficient, q_coefficient=q_coefficient, rho_cap=rho_cap, retrace_rho_cap=retrace_rho_cap, - max_grad_norm=max_grad_norm, discount_factor=discount_factor, average_model_alpha=average_model_alpha, trust_region_delta=trust_region_delta diff --git a/vel/rl/policy/ppo.py b/vel/rl/policy/ppo.py index 58be0a22..6230020d 100644 --- a/vel/rl/policy/ppo.py +++ b/vel/rl/policy/ppo.py @@ -10,9 +10,7 @@ from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae - -from vel.rl.module.stochastic_action_head import StochasticActionHead -from vel.rl.module.value_head import ValueHead +from vel.rl.module.stochastic_policy import StochasticPolicy class PPO(RlPolicy): @@ -32,40 +30,23 @@ def __init__(self, net: BackboneNetwork, action_space: gym.Space, else: self.cliprange = cliprange - self.net = net - - assert not self.net.is_stateful, "For stateful policies, use PPORnn" - - # Make sure network returns two results - (action_size, value_size) = self.net.size_hints().assert_tuple(2) - - self.action_head = StochasticActionHead( - action_space=action_space, - input_dim=action_size.last(), - ) - - self.value_head = ValueHead( - input_dim=value_size.last() - ) + self.policy = StochasticPolicy(net, action_space) def reset_weights(self): """ Initialize properly model weights """ - self.net.reset_weights() - self.action_head.reset_weights() - self.value_head.reset_weights() + self.policy.reset_weights() - def forward(self, observation): + def forward(self, observation, state=None): """ Calculate model outputs """ - action_hidden, value_hidden = self.net(observation) - return self.action_head(action_hidden), self.value_head(value_hidden) + return self.policy(observation) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ action_pd_params, value_output = self(observation) - actions = self.action_head.sample(action_pd_params, deterministic=deterministic) + actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action - logprobs = self.action_head.logprob(actions, action_pd_params) + logprobs = self.policy.action_head.logprob(actions, action_pd_params) return { 'actions': actions, @@ -109,8 +90,8 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: # PART 0.1 - Model evaluation pd_params, model_values = self(observations) - model_action_logprobs = self.action_head.logprob(actions, pd_params) - entropy = self.action_head.entropy(pd_params) + model_action_logprobs = self.policy.action_head.logprob(actions, pd_params) + entropy = self.policy.action_head.entropy(pd_params) # Select the cliprange current_cliprange = self.cliprange.value(batch_info['progress']) diff --git a/vel/rl/policy/purgatory/__init__.py b/vel/rl/policy/purgatory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/rl/policy/a2c_rnn.py b/vel/rl/policy/purgatory/a2c_rnn.py similarity index 100% rename from vel/rl/policy/a2c_rnn.py rename to vel/rl/policy/purgatory/a2c_rnn.py diff --git a/vel/rl/xpolicy/semipurgatory/ddpg.py b/vel/rl/policy/purgatory/ddpg.py similarity index 100% rename from vel/rl/xpolicy/semipurgatory/ddpg.py rename to vel/rl/policy/purgatory/ddpg.py diff --git a/vel/rl/xpolicy/semipurgatory/distributional_dqn.py b/vel/rl/policy/purgatory/distributional_dqn.py similarity index 100% rename from vel/rl/xpolicy/semipurgatory/distributional_dqn.py rename to vel/rl/policy/purgatory/distributional_dqn.py diff --git a/vel/rl/policy/dqn.py b/vel/rl/policy/purgatory/dqn.py similarity index 100% rename from vel/rl/policy/dqn.py rename to vel/rl/policy/purgatory/dqn.py diff --git a/vel/rl/policy/ppo_rnn.py b/vel/rl/policy/purgatory/ppo_rnn.py similarity index 100% rename from vel/rl/policy/ppo_rnn.py rename to vel/rl/policy/purgatory/ppo_rnn.py diff --git a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py index f80694dc..11764c84 100644 --- a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py @@ -7,8 +7,8 @@ from vel.api import TrainingInfo, EpochInfo, BatchInfo, Model, ModelFactory from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( - Reinforcer, ReinforcerFactory, VecEnvFactory, ReplayEnvRollerBase, AlgoBase, ReplayEnvRollerFactoryBase -) + Reinforcer, ReinforcerFactory, VecEnvFactory, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, + RlPolicy) from vel.rl.metrics import ( FPSMetric, EpisodeLengthMetric, EpisodeRewardMetricQuantile, EpisodeRewardMetric, FramesMetric ) @@ -33,15 +33,19 @@ class BufferedMixedPolicyIterationReinforcer(Reinforcer): """ def __init__(self, device: torch.device, settings: BufferedMixedPolicyIterationReinforcerSettings, env: VecEnv, - model: Model, env_roller: ReplayEnvRollerBase, algo: AlgoBase) -> None: + model: Model, env_roller: ReplayEnvRollerBase) -> None: self.device = device self.settings = settings self.environment = env - self._trained_model = model.to(self.device) + self._model: RlPolicy = model.to(self.device) self.env_roller = env_roller - self.algo = algo + + @property + def policy(self) -> RlPolicy: + """ Model trained by this reinforcer """ + return self._model def metrics(self) -> list: """ List of metrics to track for this learning process """ @@ -54,12 +58,7 @@ def metrics(self) -> list: EpisodeLengthMetric("episode_length") ] - return my_metrics + self.algo.metrics() + self.env_roller.metrics() - - @property - def policy(self) -> Model: - """ Model trained by this reinforcer """ - return self._trained_model + return my_metrics + self.policy.metrics() + self.env_roller.metrics() def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): """ Prepare models for training """ @@ -68,11 +67,7 @@ def initialize_training(self, training_info: TrainingInfo, model_state=None, hid else: self.policy.reset_weights() - self.algo.initialize( - training_info=training_info, model=self.policy, environment=self.environment, device=self.device - ) - - def train_epoch(self, epoch_info: EpochInfo, interactive=True): + def train_epoch(self, epoch_info: EpochInfo, interactive=True) -> None: """ Train model on an epoch of a fixed number of batch updates """ epoch_info.on_epoch_begin() @@ -91,7 +86,7 @@ def train_epoch(self, epoch_info: EpochInfo, interactive=True): epoch_info.result_accumulator.freeze_results() epoch_info.on_epoch_end() - def train_batch(self, batch_info: BatchInfo): + def train_batch(self, batch_info: BatchInfo) -> None: """ Single, most atomic 'step' of learning this reinforcer can perform """ batch_info['sub_batch_data'] = [] @@ -113,12 +108,13 @@ def on_policy_train_batch(self, batch_info: BatchInfo): """ Perform an 'on-policy' training step of evaluating an env and a single backpropagation step """ self.policy.train() - rollout = self.env_roller.rollout(batch_info, self.policy, self.settings.number_of_steps).to_device(self.device) + rollout = self.env_roller.rollout(batch_info, self.settings.number_of_steps).to_device(self.device) - batch_result = self.algo.optimize( + # Preprocessing of the rollout for this algorithm + rollout = self.policy.process_rollout(rollout) + + batch_result = self.policy.optimize( batch_info=batch_info, - device=self.device, - model=self.policy, rollout=rollout ) @@ -130,12 +126,10 @@ def off_policy_train_batch(self, batch_info: BatchInfo): """ Perform an 'off-policy' training step of sampling the replay buffer and gradient descent """ self.policy.train() - rollout = self.env_roller.sample(batch_info, self.policy, self.settings.number_of_steps).to_device(self.device) + rollout = self.env_roller.sample(batch_info, self.settings.number_of_steps).to_device(self.device) - batch_result = self.algo.optimize( + batch_result = self.policy.optimize( batch_info=batch_info, - device=self.device, - model=self.policy, rollout=rollout ) @@ -145,25 +139,23 @@ def off_policy_train_batch(self, batch_info: BatchInfo): class BufferedMixedPolicyIterationReinforcerFactory(ReinforcerFactory): """ Factory class for the PolicyGradientReplayBuffer factory """ def __init__(self, settings, env_factory: VecEnvFactory, model_factory: ModelFactory, - env_roller_factory: ReplayEnvRollerFactoryBase, algo: AlgoBase, parallel_envs: int, seed: int): + env_roller_factory: ReplayEnvRollerFactoryBase, parallel_envs: int, seed: int): self.settings = settings self.model_factory = model_factory self.env_factory = env_factory self.parallel_envs = parallel_envs self.env_roller_factory = env_roller_factory - self.algo = algo self.seed = seed def instantiate(self, device: torch.device) -> Reinforcer: env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, seed=self.seed) - model = self.model_factory.instantiate(action_space=env.action_space) - env_roller = self.env_roller_factory.instantiate(env, device) - - return BufferedMixedPolicyIterationReinforcer(device, self.settings, env, model, env_roller, self.algo) + policy = self.model_factory.instantiate(action_space=env.action_space) + env_roller = self.env_roller_factory.instantiate(environment=env, policy=policy, device=device) + return BufferedMixedPolicyIterationReinforcer(device, self.settings, env, policy, env_roller) -def create(model_config, model, vec_env, algo, env_roller, +def create(model_config, model, vec_env, env_roller, parallel_envs, number_of_steps, experience_replay=1, stochastic_experience_replay=True): """ Vel factory function """ @@ -179,6 +171,5 @@ def create(model_config, model, vec_env, algo, env_roller, model_factory=model, parallel_envs=parallel_envs, env_roller_factory=env_roller, - algo=algo, seed=model_config.seed ) diff --git a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py index 93096a6b..03b53e28 100644 --- a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py @@ -43,6 +43,11 @@ def __init__(self, device: torch.device, settings: OnPolicyIterationReinforcerSe self._model: RlPolicy = policy.to(self.device) + @property + def policy(self) -> RlPolicy: + """ Model trained by this reinforcer """ + return self._model + def metrics(self) -> list: """ List of metrics to track for this learning process """ my_metrics = [ @@ -56,11 +61,6 @@ def metrics(self) -> list: return my_metrics + self.env_roller.metrics() + self.policy.metrics() - @property - def policy(self) -> RlPolicy: - """ Model trained by this reinforcer """ - return self._model - def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): """ Prepare models for training """ if model_state is not None: diff --git a/vel/rl/xpolicy/semipurgatory/a2c_rnn.py b/vel/rl/xpolicy/semipurgatory/a2c_rnn.py deleted file mode 100644 index fc38671a..00000000 --- a/vel/rl/xpolicy/semipurgatory/a2c_rnn.py +++ /dev/null @@ -1,92 +0,0 @@ -import torch -import torch.nn.functional as F - -from vel.metric.base import AveragingNamedMetric -from vel.calc.function import explained_variance -from vel.rl.api import OptimizerAlgoBase, Rollout, Trajectories -from vel.rl.discount_bootstrap import discount_bootstrap_gae - - -class A2CPolicyGradient(OptimizerAlgoBase): - """ Simplest policy gradient - calculate loss as an advantage of an actor versus value function """ - def __init__(self, entropy_coefficient, value_coefficient, discount_factor: float, gae_lambda=1.0): - super().__init__() - - self.entropy_coefficient = entropy_coefficient - self.value_coefficient = value_coefficient - self.gae_lambda = gae_lambda - self.discount_factor = discount_factor - - def process_rollout(self, batch_info, rollout: Rollout): - """ Process rollout for ALGO before any chunking/shuffling """ - assert isinstance(rollout, Trajectories), "A2C requires trajectory rollouts" - - advantages = discount_bootstrap_gae( - rewards_buffer=rollout.transition_tensors['rewards'], - dones_buffer=rollout.transition_tensors['dones'], - values_buffer=rollout.transition_tensors['values'], - final_values=rollout.rollout_tensors['final_values'], - discount_factor=self.discount_factor, - gae_lambda=self.gae_lambda, - number_of_steps=rollout.num_steps - ) - - returns = advantages + rollout.transition_tensors['values'] - - rollout.transition_tensors['advantages'] = advantages - rollout.transition_tensors['returns'] = returns - - return rollout - - def calculate_gradient(self, batch_info, device, model, rollout): - """ Calculate loss of the supplied rollout """ - evaluator = model.evaluate(rollout) - - # Use evaluator interface to get the what we are interested in from the model - advantages = evaluator.get('rollout:advantages') - returns = evaluator.get('rollout:returns') - rollout_values = evaluator.get('rollout:values') - - logprobs = evaluator.get('model:action:logprobs') - values = evaluator.get('model:values') - entropy = evaluator.get('model:entropy') - - # Actual calculations. Pretty trivial - policy_loss = -torch.mean(advantages * logprobs) - value_loss = 0.5 * F.mse_loss(values, returns) - policy_entropy = torch.mean(entropy) - - loss_value = ( - policy_loss - self.entropy_coefficient * policy_entropy + self.value_coefficient * value_loss - ) - - loss_value.backward() - - return { - 'policy_loss': policy_loss.item(), - 'value_loss': value_loss.item(), - 'policy_entropy': policy_entropy.item(), - 'advantage_norm': torch.norm(advantages).item(), - 'explained_variance': explained_variance(returns, rollout_values) - } - - def metrics(self) -> list: - """ List of metrics to track for this learning process """ - return [ - AveragingNamedMetric("value_loss"), - AveragingNamedMetric("policy_entropy"), - AveragingNamedMetric("policy_loss"), - AveragingNamedMetric("grad_norm"), - AveragingNamedMetric("advantage_norm"), - AveragingNamedMetric("explained_variance") - ] - - -def create(entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): - """ Vel factory function """ - return A2CPolicyGradient( - entropy_coefficient, - value_coefficient, - discount_factor, - gae_lambda - ) diff --git a/vel/rl/xpolicy/semipurgatory/ppo_rnn.py b/vel/rl/xpolicy/semipurgatory/ppo_rnn.py deleted file mode 100644 index 3ef76c72..00000000 --- a/vel/rl/xpolicy/semipurgatory/ppo_rnn.py +++ /dev/null @@ -1,133 +0,0 @@ -import torch - -import numbers - -from vel.calc.function import explained_variance -from vel.function.constant import ConstantSchedule -from vel.metric.base import AveragingNamedMetric -from vel.rl.api import OptimizerAlgoBase, Rollout, Trajectories -from vel.rl.discount_bootstrap import discount_bootstrap_gae - - -class PpoPolicyGradient(OptimizerAlgoBase): - """ Proximal Policy Optimization - https://arxiv.org/abs/1707.06347 """ - def __init__(self, entropy_coefficient, value_coefficient, cliprange, max_grad_norm, discount_factor: float, - normalize_advantage: bool = True, gae_lambda: float = 1.0): - super().__init__(max_grad_norm) - - self.entropy_coefficient = entropy_coefficient - self.value_coefficient = value_coefficient - self.normalize_advantage = normalize_advantage - - if isinstance(cliprange, numbers.Number): - self.cliprange = ConstantSchedule(cliprange) - else: - self.cliprange = cliprange - - self.gae_lambda = gae_lambda - self.discount_factor = discount_factor - - def process_rollout(self, batch_info, rollout: Rollout): - """ Process rollout for ALGO before any chunking/shuffling """ - assert isinstance(rollout, Trajectories), "PPO requires trajectory rollouts" - - advantages = discount_bootstrap_gae( - rewards_buffer=rollout.transition_tensors['rewards'], - dones_buffer=rollout.transition_tensors['dones'], - values_buffer=rollout.transition_tensors['values'], - final_values=rollout.rollout_tensors['final_values'], - discount_factor=self.discount_factor, - gae_lambda=self.gae_lambda, - number_of_steps=rollout.num_steps - ) - - returns = advantages + rollout.transition_tensors['values'] - - rollout.transition_tensors['advantages'] = advantages - rollout.transition_tensors['returns'] = returns - - return rollout - - def calculate_gradient(self, batch_info, device, model, rollout): - """ Calculate loss of the supplied rollout """ - evaluator = model.evaluate(rollout) - - # Part 0.0 - Rollout values - advantages = evaluator.get('rollout:advantages') - rollout_values = evaluator.get('rollout:values') - rollout_action_logprobs = evaluator.get('rollout:action:logprobs') - returns = evaluator.get('rollout:returns') - - # PART 0.1 - Model evaluation - entropy = evaluator.get('model:entropy') - model_values = evaluator.get('model:values') - model_action_logprobs = evaluator.get('model:action:logprobs') - - # Select the cliprange - current_cliprange = self.cliprange.value(batch_info['progress']) - - # Normalize the advantages? - if self.normalize_advantage: - advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) - - # PART 1 - policy entropy - policy_entropy = torch.mean(entropy) - - # PART 2 - value function - value_output_clipped = rollout_values + torch.clamp( - model_values - rollout_values, -current_cliprange, current_cliprange - ) - value_loss_part1 = (model_values - returns).pow(2) - value_loss_part2 = (value_output_clipped - returns).pow(2) - value_loss = 0.5 * torch.mean(torch.max(value_loss_part1, value_loss_part2)) - - # PART 3 - policy gradient loss - ratio = torch.exp(model_action_logprobs - rollout_action_logprobs) - - pg_loss_part1 = -advantages * ratio - pg_loss_part2 = -advantages * torch.clamp(ratio, 1.0 - current_cliprange, 1.0 + current_cliprange) - policy_loss = torch.mean(torch.max(pg_loss_part1, pg_loss_part2)) - - loss_value = ( - policy_loss - self.entropy_coefficient * policy_entropy + self.value_coefficient * value_loss - ) - - loss_value.backward() - - with torch.no_grad(): - approx_kl_divergence = 0.5 * torch.mean((model_action_logprobs - rollout_action_logprobs).pow(2)) - clip_fraction = torch.mean((torch.abs(ratio - 1.0) > current_cliprange).to(dtype=torch.float)) - - return { - 'policy_loss': policy_loss.item(), - 'value_loss': value_loss.item(), - 'policy_entropy': policy_entropy.item(), - 'approx_kl_divergence': approx_kl_divergence.item(), - 'clip_fraction': clip_fraction.item(), - 'advantage_norm': torch.norm(advantages).item(), - 'explained_variance': explained_variance(returns, rollout_values) - } - - def metrics(self) -> list: - """ List of metrics to track for this learning process """ - return [ - AveragingNamedMetric("policy_loss"), - AveragingNamedMetric("value_loss"), - AveragingNamedMetric("policy_entropy"), - AveragingNamedMetric("approx_kl_divergence"), - AveragingNamedMetric("clip_fraction"), - AveragingNamedMetric("grad_norm"), - AveragingNamedMetric("advantage_norm"), - AveragingNamedMetric("explained_variance") - ] - - -def create(entropy_coefficient, value_coefficient, cliprange, max_grad_norm, discount_factor, - normalize_advantage=True, gae_lambda=1.0): - """ Vel factory function """ - return PpoPolicyGradient( - entropy_coefficient, value_coefficient, cliprange, max_grad_norm, - discount_factor=discount_factor, - normalize_advantage=normalize_advantage, - gae_lambda=gae_lambda - ) diff --git a/vel/rl/xpolicy/stochastic_policy.py b/vel/rl/xpolicy/stochastic_policy.py deleted file mode 100644 index 23db9d13..00000000 --- a/vel/rl/xpolicy/stochastic_policy.py +++ /dev/null @@ -1,70 +0,0 @@ -import gym -import typing - -from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.module.stochastic_action_head import StochasticActionHead -from vel.rl.module.value_head import ValueHead - - -class StochasticPolicy(BackboneModel): - """ - Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone - """ - - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space): - super().__init__() - - self.input_block = input_block - self.backbone = backbone - - assert not self.backbone.is_stateful, "Backbone shouldn't have state" - - self.action_head = StochasticActionHead( - action_space=action_space, - input_dim=self.backbone.output_dim - ) - - self.value_head = ValueHead( - input_dim=self.backbone.output_dim - ) - - def reset_weights(self): - """ Initialize properly model weights """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.action_head.reset_weights() - self.value_head.reset_weights() - - def forward(self, observation): - """ Calculate model outputs """ - input_data = self.input_block(observation) - - base_output = self.backbone(input_data) - - action_output = self.action_head(base_output) - value_output = self.value_head(base_output) - - return action_output, value_output - - -class StochasticPolicyFactory(ModelFactory): - """ Factory class for policy gradient models """ - def __init__(self, input_block: IdentityFactory, backbone: ModelFactory): - self.backbone = backbone - self.input_block = input_block - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return StochasticPolicy(input_block, backbone, extra_args['action_space']) - - -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return StochasticPolicyFactory(input_block=input_block, backbone=backbone) From cbb38f3b2fb32c75eeccaa1c3cc8741d5ee5a48c Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Wed, 2 Oct 2019 20:37:34 -0700 Subject: [PATCH 105/162] Revived the DDQN config. --- ..._ddqn_prioritized.yaml => atari_ddqn.yaml} | 59 +++---- .../rl/atari/{purgatory => }/atari_dqn.yaml | 31 ++-- .../purgatory/atari_acer_trust_region.yaml | 83 --------- vel/rl/api/policy.py | 4 - .../prioritized_circular_replay_buffer.py | 2 +- vel/rl/env/mujoco.py | 31 +--- vel/rl/env/wrappers/env_normalize.py | 56 ------ .../transition_replay_env_roller.py | 26 +-- .../purgatory => }/double_nature_cnn.py | 53 ++++-- vel/rl/layer/premade/purgatory/__init__.py | 0 .../layer/{premade => purgatory}/__init__.py | 0 .../purgatory/double_noisy_nature_cnn.py | 3 - vel/rl/layer/{premade => }/purgatory/mlp.py | 3 - .../layer/{premade => }/purgatory/mlp_rnn.py | 3 - .../{premade => }/purgatory/nature_cnn_rnn.py | 0 .../purgatory/noisy_nature_cnn.py | 0 vel/rl/layer/{premade => }/purgatory/rnn.py | 3 - vel/rl/module/head/q_dueling_head.py | 6 +- vel/rl/module/noise/eps_greedy.py | 34 ++-- vel/rl/module/noise/ou_noise.py | 12 +- vel/rl/module/q_policy.py | 50 ++++++ vel/rl/module/q_stochastic_policy.py | 46 +++++ vel/rl/policy/acer.py | 57 +----- vel/rl/policy/dqn.py | 165 ++++++++++++++++++ vel/rl/policy/purgatory/dqn.py | 102 ----------- ...fered_mixed_policy_iteration_reinforcer.py | 9 +- ...uffered_off_policy_iteration_reinforcer.py | 44 ++--- 27 files changed, 400 insertions(+), 482 deletions(-) rename examples-configs/rl/atari/{dqn/atari_dueling_ddqn_prioritized.yaml => atari_ddqn.yaml} (58%) rename examples-configs/rl/atari/{purgatory => }/atari_dqn.yaml (77%) delete mode 100644 examples-configs/rl/atari/purgatory/atari_acer_trust_region.yaml delete mode 100644 vel/rl/env/wrappers/env_normalize.py rename vel/rl/layer/{premade/purgatory => }/double_nature_cnn.py (66%) delete mode 100644 vel/rl/layer/premade/purgatory/__init__.py rename vel/rl/layer/{premade => purgatory}/__init__.py (100%) rename vel/rl/layer/{premade => }/purgatory/double_noisy_nature_cnn.py (98%) rename vel/rl/layer/{premade => }/purgatory/mlp.py (99%) rename vel/rl/layer/{premade => }/purgatory/mlp_rnn.py (98%) rename vel/rl/layer/{premade => }/purgatory/nature_cnn_rnn.py (100%) rename vel/rl/layer/{premade => }/purgatory/noisy_nature_cnn.py (100%) rename vel/rl/layer/{premade => }/purgatory/rnn.py (98%) create mode 100644 vel/rl/module/q_policy.py create mode 100644 vel/rl/module/q_stochastic_policy.py create mode 100644 vel/rl/policy/dqn.py delete mode 100644 vel/rl/policy/purgatory/dqn.py diff --git a/examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml b/examples-configs/rl/atari/atari_ddqn.yaml similarity index 58% rename from examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml rename to examples-configs/rl/atari/atari_ddqn.yaml index 3084f15a..ecbf6544 100644 --- a/examples-configs/rl/atari/dqn/atari_dueling_ddqn_prioritized.yaml +++ b/examples-configs/rl/atari/atari_ddqn.yaml @@ -12,26 +12,37 @@ vec_env: model: - name: vel.rl.models.q_dueling_model + name: vel.rl.policy.dqn - input_block: - name: vel.modules.input.image_to_tensor + double_dqn: true + dueling_dqn: true + target_update_frequency: 10_000 # After how many batches to update the target network + max_grad_norm: 0.5 - backbone: - name: vel.rl.models.backbone.double_nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history + discount_factor: 0.99 + + epsilon: + name: vel.function.linear_and_constant + end_of_interpolation: 0.1 + initial_value: 1.0 + final_value: 0.1 + + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + size: [84, 84, 4] # Number of channels is frame history + - name: vel.rl.layer.double_nature_cnn reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer + name: vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer env_roller: name: vel.rl.env_roller.transition_replay_env_roller replay_buffer: - name: vel.rl.buffers.prioritized_circular_replay_buffer + name: vel.rl.buffer.prioritized_circular_replay_buffer buffer_initial_size: 30_000 # How many samples we need in the buffer before we start using replay buffer buffer_capacity: 250_000 @@ -42,30 +53,12 @@ reinforcer: priority_exponent: 0.6 priority_weight: - name: vel.schedules.linear + name: vel.function.linear initial_value: 0.4 final_value: 1.0 priority_epsilon: 1.0e-6 - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.dqn - - double_dqn: true - target_update_frequency: 10_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - rollout_steps: 4 # How many environment steps to perform per batch of training training_steps: 32 # How many environment steps (per env) to perform per training round @@ -73,7 +66,7 @@ reinforcer: optimizer: - name: vel.optimizers.rmsprop + name: vel.optimizer.rmsprop lr: 2.5e-4 alpha: 0.95 momentum: 0.95 @@ -82,15 +75,15 @@ optimizer: commands: train: - name: vel.rl.commands.rl_train_command + name: vel.rl.command.rl_train_command total_frames: 1.1e7 # 11M batches_per_epoch: 2500 record: - name: vel.rl.commands.record_movie_command + name: vel.rl.command.record_movie_command takes: 10 videoname: 'atari_vid_{:04}.avi' evaluate: - name: vel.rl.commands.evaluate_env_command + name: vel.rl.command.evaluate_env_command takes: 100 diff --git a/examples-configs/rl/atari/purgatory/atari_dqn.yaml b/examples-configs/rl/atari/atari_dqn.yaml similarity index 77% rename from examples-configs/rl/atari/purgatory/atari_dqn.yaml rename to examples-configs/rl/atari/atari_dqn.yaml index a811880a..9851ecba 100644 --- a/examples-configs/rl/atari/purgatory/atari_dqn.yaml +++ b/examples-configs/rl/atari/atari_dqn.yaml @@ -12,19 +12,23 @@ vec_env: model: - name: vel.rl.algo.dqn + name: vel.rl.policy.dqn target_update_frequency: 10_000 # After how many batches to update the target network discount_factor: 0.99 - backbone: - name: vel.module.sequence - modules: - - name: vel.modules.input.image_to_tensor - - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history + epsilon: + name: vel.function.linear_and_constant + end_of_interpolation: 0.1 + initial_value: 1.0 + final_value: 0.1 + + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + size: [84, 84, 4] # Number of channels is frame history + - name: vel.rl.layer.nature_cnn reinforcer: @@ -43,15 +47,6 @@ reinforcer: frame_stack_compensation: true frame_history: 4 # How many stacked frames go into a single observation - action_noise: - name: vel.rl.module.noise.eps_greedy - - epsilon: - name: vel.function.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training training_steps: 32 # How many environment steps (per env) to perform per training round parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper diff --git a/examples-configs/rl/atari/purgatory/atari_acer_trust_region.yaml b/examples-configs/rl/atari/purgatory/atari_acer_trust_region.yaml deleted file mode 100644 index 810a6a51..00000000 --- a/examples-configs/rl/atari/purgatory/atari_acer_trust_region.yaml +++ /dev/null @@ -1,83 +0,0 @@ -name: 'atari_acer_trust_region' - - -env: - name: vel.rl.env.classic_atari - game: !param game = 'BreakoutNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.shared_mem - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_stochastic_policy_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_mixed_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.trajectory_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 1_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 50_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - algo: - name: vel.rl.algo.policy_gradient.acer - entropy_coefficient: 0.01 - q_coefficient: 0.5 - rho_cap: 10.0 - retrace_rho_cap: 1.0 - - max_grad_norm: 10.0 - discount_factor: 0.99 - - trust_region: true - trust_region_delta: 1.0 - - parallel_envs: 12 # How many environments to run in parallel - number_of_steps: 20 # How many environment steps go into a single batch - experience_replay: 4 - - -optimizer: - name: vel.optimizers.rmsprop - lr: 7.0e-4 - alpha: 0.99 - # epsilon: 1.0e-5 - epsilon: 1.0e-3 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 - batches_per_epoch: 10 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'atari_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 - parallel_envs: 12 # How many environments to run in parallel diff --git a/vel/rl/api/policy.py b/vel/rl/api/policy.py index 3a73003e..4d1109c2 100644 --- a/vel/rl/api/policy.py +++ b/vel/rl/api/policy.py @@ -62,10 +62,6 @@ def reset_state(self, state, dones): #################################################################################################################### # Utility Methods - that provide default implementations but may be short circuited by some implementations - def value(self, observation, state=None): - """ Return value for given observation """ - return self.act(observation, state=state)['values'] - def action(self, observation, state=None, deterministic=False): """ Return policy action for given observation """ return self.act(observation, state=state, deterministic=deterministic)['actions'] diff --git a/vel/rl/buffer/prioritized_circular_replay_buffer.py b/vel/rl/buffer/prioritized_circular_replay_buffer.py index 85f6dedb..0081d73c 100644 --- a/vel/rl/buffer/prioritized_circular_replay_buffer.py +++ b/vel/rl/buffer/prioritized_circular_replay_buffer.py @@ -58,7 +58,7 @@ def _get_transitions(self, probs, indexes, tree_idxs, batch_info, forward_steps= weights = (capacity * probs) ** (-priority_weight) weights = weights / weights.max(axis=0, keepdims=True) - transition_arrays['weights'] = weights + transition_arrays['weights'] = weights.astype(np.float32) transition_tensors = {k: torch.from_numpy(v) for k, v in transition_arrays.items()} transitions = Trajectories( diff --git a/vel/rl/env/mujoco.py b/vel/rl/env/mujoco.py index 3c511c69..9b16b8a0 100644 --- a/vel/rl/env/mujoco.py +++ b/vel/rl/env/mujoco.py @@ -6,28 +6,22 @@ from vel.openai.baselines import logger from vel.openai.baselines.bench import Monitor from vel.rl.api import EnvFactory -from vel.rl.env.wrappers.env_normalize import EnvNormalize from vel.util.situational import process_environment_settings DEFAULT_SETTINGS = { 'default': { 'monitor': False, - 'allow_early_resets': False, - 'normalize_observations': False, - 'normalize_returns': False, + 'allow_early_resets': False }, 'record': { 'monitor': False, - 'allow_early_resets': True, - 'normalize_observations': False, - 'normalize_returns': False, + 'allow_early_resets': True } } -def env_maker(environment_id, seed, serial_id, monitor=False, allow_early_resets=False, normalize_observations=False, - normalize_returns=False, normalize_gamma=0.99): +def env_maker(environment_id, seed, serial_id, monitor=False, allow_early_resets=False): """ Create a relatively raw atari environment """ env = gym.make(environment_id) env.seed(seed + serial_id) @@ -40,30 +34,16 @@ def env_maker(environment_id, seed, serial_id, monitor=False, allow_early_resets env = Monitor(env, logdir, allow_early_resets=allow_early_resets) - if normalize_observations or normalize_returns: - env = EnvNormalize( - env, - normalize_observations=normalize_observations, - normalize_returns=normalize_returns, - gamma=normalize_gamma - ) - return env class MujocoEnv(EnvFactory): """ Atari game environment wrapped in the same way as Deep Mind and OpenAI baselines """ - def __init__(self, envname, normalize_observations=False, normalize_returns=False, settings=None, presets=None): + def __init__(self, envname, settings=None, presets=None): self.envname = envname settings = settings if settings is not None else {} - if normalize_observations: - settings['normalize_observations'] = True - - if normalize_returns: - settings['normalize_returns'] = True - self.settings = process_environment_settings(DEFAULT_SETTINGS, settings, presets) def specification(self) -> EnvSpec: @@ -80,11 +60,10 @@ def instantiate(self, seed=0, serial_id=0, preset='default', extra_args=None) -> return env_maker(self.envname, seed, serial_id, **settings) -def create(game, normalize_returns=False, settings=None, presets=None): +def create(game, settings=None, presets=None): """ Vel factory function """ return MujocoEnv( envname=game, - normalize_returns=normalize_returns, settings=settings, presets=presets ) diff --git a/vel/rl/env/wrappers/env_normalize.py b/vel/rl/env/wrappers/env_normalize.py deleted file mode 100644 index be21772f..00000000 --- a/vel/rl/env/wrappers/env_normalize.py +++ /dev/null @@ -1,56 +0,0 @@ -import gym -import numpy as np - -from vel.openai.baselines.common.running_mean_std import RunningMeanStd - - -class EnvNormalize(gym.Wrapper): - """ - Single environment normalization based on VecNormalize from OpenAI baselines - """ - def __init__(self, env, normalize_observations=True, normalize_returns=True, - clip_observations=10., clip_rewards=10., gamma=0.99, epsilon=1e-8): - super().__init__(env) - - self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if normalize_observations else None - self.ret_rms = RunningMeanStd(shape=()) if normalize_returns else None - self.clipob = clip_observations - self.cliprew = clip_rewards - self.ret = 0.0 - self.gamma = gamma - self.epsilon = epsilon - - def step(self, action): - """ - Apply sequence of actions to sequence of environments - actions -> (observations, rewards, news) - - where 'news' is a boolean vector indicating whether each element is new. - """ - obs, rews, news, infos = self.env.step(action) - - self.ret = self.ret * self.gamma + rews - - obs = self._filter_observation(obs) - - if self.ret_rms: - self.ret_rms.update(np.array([self.ret])) - rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew) - - return obs, rews, news, infos - - def _filter_observation(self, obs): - if self.ob_rms: - self.ob_rms.update(obs[None]) - obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob) - - return obs.astype(np.float32) - else: - return obs - - def reset(self): - """ - Reset all environments - """ - obs = self.env.reset() - return self._filter_observation(obs) diff --git a/vel/rl/env_roller/transition_replay_env_roller.py b/vel/rl/env_roller/transition_replay_env_roller.py index 14cb282e..5cf7a738 100644 --- a/vel/rl/env_roller/transition_replay_env_roller.py +++ b/vel/rl/env_roller/transition_replay_env_roller.py @@ -22,14 +22,13 @@ class TransitionReplayEnvRoller(ReplayEnvRollerBase): def __init__(self, environment: VecEnv, policy: RlPolicy, device: torch.device, replay_buffer: ReplayBuffer, discount_factor: typing.Optional[float] = None, normalize_returns: bool = False, - forward_steps: int = 1, action_noise: typing.Optional[nn.Module] = None): + forward_steps: int = 1): self._environment = environment self.device = device self.replay_buffer = replay_buffer self.normalize_returns = normalize_returns self.forward_steps = forward_steps self.discount_factor = discount_factor - self.action_noise = action_noise.to(self.device) if action_noise is not None else None self.actor = PolicyActor(self.environment.num_envs, policy, device) assert not self.actor.is_stateful, "Does not support stateful policies" @@ -66,9 +65,6 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: for step_idx in range(number_of_steps): step = self.actor.act(self.last_observation) - if self.action_noise is not None: - step['actions'] = self.action_noise(step['actions'], batch_info=batch_info) - replay_extra_information = {} accumulator.add('observations', self.last_observation_cpu) @@ -102,9 +98,6 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: dones_tensor = torch.from_numpy(new_dones.astype(np.float32)).clone() accumulator.add('dones', dones_tensor) - if self.action_noise is not None: - self.action_noise.reset_training_state(dones_tensor, batch_info=batch_info) - self.accumulated_returns = self.accumulated_returns * (1.0 - new_dones.astype(np.float32)) self.last_observation_cpu = torch.from_numpy(new_obs).clone() @@ -161,22 +154,15 @@ class TransitionReplayEnvRollerFactory(ReplayEnvRollerFactoryBase): """ Factory for the ReplayEnvRoller """ def __init__(self, replay_buffer_factory: ReplayBufferFactory, discount_factor: typing.Optional[float] = None, - normalize_returns: bool = False, forward_steps: int = 1, - action_noise: typing.Optional[ModelFactory] = None): + normalize_returns: bool = False, forward_steps: int = 1): self.replay_buffer_factory = replay_buffer_factory self.normalize_returns = normalize_returns self.forward_steps = forward_steps self.discount_factor = discount_factor - self.action_noise_factory = action_noise def instantiate(self, environment, policy, device): replay_buffer = self.replay_buffer_factory.instantiate(environment) - if self.action_noise_factory is None: - action_noise = None - else: - action_noise = self.action_noise_factory.instantiate(environment=environment) - return TransitionReplayEnvRoller( environment=environment, policy=policy, @@ -184,18 +170,16 @@ def instantiate(self, environment, policy, device): replay_buffer=replay_buffer, discount_factor=self.discount_factor, normalize_returns=self.normalize_returns, - forward_steps=self.forward_steps, - action_noise=action_noise + forward_steps=self.forward_steps ) def create(replay_buffer, discount_factor: typing.Optional[float] = None, normalize_returns: bool = False, - forward_steps: int = 1, action_noise: typing.Optional[ModelFactory] = None): + forward_steps: int = 1): """ Vel factory function """ return TransitionReplayEnvRollerFactory( replay_buffer_factory=replay_buffer, discount_factor=discount_factor, forward_steps=forward_steps, - normalize_returns=normalize_returns, - action_noise=action_noise + normalize_returns=normalize_returns ) diff --git a/vel/rl/layer/premade/purgatory/double_nature_cnn.py b/vel/rl/layer/double_nature_cnn.py similarity index 66% rename from vel/rl/layer/premade/purgatory/double_nature_cnn.py rename to vel/rl/layer/double_nature_cnn.py index ed64afcd..54599e9e 100644 --- a/vel/rl/layer/premade/purgatory/double_nature_cnn.py +++ b/vel/rl/layer/double_nature_cnn.py @@ -12,18 +12,19 @@ import vel.util.network as net_util -from vel.api import LinearBackboneModel, ModelFactory +from vel.api import SizeHints, SizeHint +from vel.net.layer_base import Layer, LayerFactory -class DoubleNatureCnn(LinearBackboneModel): +class DoubleNatureCnn(Layer): """ Neural network as defined in the paper 'Human-level control through deep reinforcement learning' but with two separate heads. """ - def __init__(self, input_width, input_height, input_channels, output_dim=512): - super().__init__() + def __init__(self, name: str, input_width, input_height, input_channels, output_dim=512): + super().__init__(name) - self._output_dim = output_dim + self.output_dim = output_dim self.conv1 = nn.Conv2d( in_channels=input_channels, @@ -68,11 +69,6 @@ def __init__(self, input_width, input_height, input_channels, output_dim=512): self.output_dim ) - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - return self._output_dim - def reset_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): @@ -84,7 +80,13 @@ def reset_weights(self): init.orthogonal_(m.weight, gain=np.sqrt(2)) init.constant_(m.bias, 0.0) - def forward(self, image): + def size_hints(self) -> SizeHints: + return SizeHints(( + SizeHint(None, self.output_dim), + SizeHint(None, self.output_dim) + )) + + def forward(self, image, state: dict = None, context: dict = None): result = image result = F.relu(self.conv1(result)) result = F.relu(self.conv2(result)) @@ -97,12 +99,29 @@ def forward(self, image): return output_one, output_two -def create(input_width, input_height, input_channels=1): - """ Vel factory function """ - def instantiate(**_): - return DoubleNatureCnn(input_width=input_width, input_height=input_height, input_channels=input_channels) +class DoubleNatureCnnFactory(LayerFactory): + """ Nature Cnn Network Factory """ - return ModelFactory.generic(instantiate) + def __init__(self, output_dim: int = 512): + self.output_dim = output_dim + @property + def name_base(self) -> str: + """ Base of layer name """ + return "nature_cnn" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + (b, c, w, h) = direct_input.assert_single(4) + + return DoubleNatureCnn( + name=name, + input_width=w, + input_height=h, + input_channels=c, + output_dim=self.output_dim + ) -DoubleNatureCnnFactory = create + +def create(output_dim: int = 512): + """ Vel factory function """ + return DoubleNatureCnnFactory(output_dim=output_dim) diff --git a/vel/rl/layer/premade/purgatory/__init__.py b/vel/rl/layer/premade/purgatory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vel/rl/layer/premade/__init__.py b/vel/rl/layer/purgatory/__init__.py similarity index 100% rename from vel/rl/layer/premade/__init__.py rename to vel/rl/layer/purgatory/__init__.py diff --git a/vel/rl/layer/premade/purgatory/double_noisy_nature_cnn.py b/vel/rl/layer/purgatory/double_noisy_nature_cnn.py similarity index 98% rename from vel/rl/layer/premade/purgatory/double_noisy_nature_cnn.py rename to vel/rl/layer/purgatory/double_noisy_nature_cnn.py index a55fc8ed..7c31e719 100644 --- a/vel/rl/layer/premade/purgatory/double_noisy_nature_cnn.py +++ b/vel/rl/layer/purgatory/double_noisy_nature_cnn.py @@ -116,6 +116,3 @@ def instantiate(**_): ) return ModelFactory.generic(instantiate) - - -DoubleNoisyNatureCnnFactory = create diff --git a/vel/rl/layer/premade/purgatory/mlp.py b/vel/rl/layer/purgatory/mlp.py similarity index 99% rename from vel/rl/layer/premade/purgatory/mlp.py rename to vel/rl/layer/purgatory/mlp.py index f4e03ae3..65560553 100644 --- a/vel/rl/layer/premade/purgatory/mlp.py +++ b/vel/rl/layer/purgatory/mlp.py @@ -68,6 +68,3 @@ def instantiate(**_): ) return ModelFactory.generic(instantiate) - - -MLPFactory = create diff --git a/vel/rl/layer/premade/purgatory/mlp_rnn.py b/vel/rl/layer/purgatory/mlp_rnn.py similarity index 98% rename from vel/rl/layer/premade/purgatory/mlp_rnn.py rename to vel/rl/layer/purgatory/mlp_rnn.py index d5229d7f..1e871ffb 100644 --- a/vel/rl/layer/premade/purgatory/mlp_rnn.py +++ b/vel/rl/layer/purgatory/mlp_rnn.py @@ -57,6 +57,3 @@ def instantiate(**_): ) return ModelFactory.generic(instantiate) - - -MlpRnnFactory = create diff --git a/vel/rl/layer/premade/purgatory/nature_cnn_rnn.py b/vel/rl/layer/purgatory/nature_cnn_rnn.py similarity index 100% rename from vel/rl/layer/premade/purgatory/nature_cnn_rnn.py rename to vel/rl/layer/purgatory/nature_cnn_rnn.py diff --git a/vel/rl/layer/premade/purgatory/noisy_nature_cnn.py b/vel/rl/layer/purgatory/noisy_nature_cnn.py similarity index 100% rename from vel/rl/layer/premade/purgatory/noisy_nature_cnn.py rename to vel/rl/layer/purgatory/noisy_nature_cnn.py diff --git a/vel/rl/layer/premade/purgatory/rnn.py b/vel/rl/layer/purgatory/rnn.py similarity index 98% rename from vel/rl/layer/premade/purgatory/rnn.py rename to vel/rl/layer/purgatory/rnn.py index 973345d4..5a6e9625 100644 --- a/vel/rl/layer/premade/purgatory/rnn.py +++ b/vel/rl/layer/purgatory/rnn.py @@ -45,6 +45,3 @@ def instantiate(**_): rnn_type=rnn_type ) return ModelFactory.generic(instantiate) - - -RNNFactory = create diff --git a/vel/rl/module/head/q_dueling_head.py b/vel/rl/module/head/q_dueling_head.py index 78a9d6ea..600e3564 100644 --- a/vel/rl/module/head/q_dueling_head.py +++ b/vel/rl/module/head/q_dueling_head.py @@ -6,14 +6,14 @@ class QDuelingHead(nn.Module): """ Network head calculating Q-function value for each (discrete) action using two separate inputs. """ - def __init__(self, input_dim, action_space): + def __init__(self, val_input_dim, adv_input_dim, action_space): super().__init__() # Q-function requires a discrete action space assert isinstance(action_space, spaces.Discrete) - self.linear_layer_advantage = nn.Linear(input_dim, action_space.n) - self.linear_layer_value = nn.Linear(input_dim, 1) + self.linear_layer_value = nn.Linear(val_input_dim, 1) + self.linear_layer_advantage = nn.Linear(adv_input_dim, action_space.n) self.action_space = action_space def reset_weights(self): diff --git a/vel/rl/module/noise/eps_greedy.py b/vel/rl/module/noise/eps_greedy.py index 0f3346eb..becf13e1 100644 --- a/vel/rl/module/noise/eps_greedy.py +++ b/vel/rl/module/noise/eps_greedy.py @@ -1,39 +1,31 @@ +import gym import typing import torch -import torch.nn as nn -from vel.api import Schedule +from vel.api import Schedule, Network from vel.internal.generic_factory import GenericFactory from vel.function.constant import ConstantSchedule -class EpsGreedy(nn.Module): +class EpsGreedy(Network): """ Epsilon-greedy action selection """ - def __init__(self, epsilon: typing.Union[Schedule, float], environment): + def __init__(self, action_space: gym.Space): super().__init__() - if isinstance(epsilon, Schedule): - self.epsilon_schedule = epsilon - else: - self.epsilon_schedule = ConstantSchedule(epsilon) - - self.action_space = environment.action_space + self.action_space = action_space - def forward(self, actions, batch_info=None): - if batch_info is None: - # Just take final value if there is no batch info - epsilon = self.epsilon_schedule.value(1.0) + def forward(self, actions, epsilon, deterministic=False): + if deterministic: + return actions else: - epsilon = self.epsilon_schedule.value(batch_info['progress']) - - random_samples = torch.randint_like(actions, self.action_space.n) - selector = torch.rand_like(random_samples, dtype=torch.float32) + random_samples = torch.randint_like(actions, self.action_space.n) + selector = torch.rand_like(random_samples, dtype=torch.float32) - # Actions with noise applied - noisy_actions = torch.where(selector > epsilon, actions, random_samples) + # Actions with noise applied + noisy_actions = torch.where(selector > epsilon, actions, random_samples) - return noisy_actions + return noisy_actions def reset_training_state(self, dones, batch_info): """ A hook for a model to react when during training episode is finished """ diff --git a/vel/rl/module/noise/ou_noise.py b/vel/rl/module/noise/ou_noise.py index d54a67e9..edda86f8 100644 --- a/vel/rl/module/noise/ou_noise.py +++ b/vel/rl/module/noise/ou_noise.py @@ -1,19 +1,21 @@ -import torch +import gym import numpy as np +import torch import torch.nn as nn -from vel.calc.process import OrnsteinUhlenbeckNoiseProcess +from vel.api import Network +from vel.util.process import OrnsteinUhlenbeckNoiseProcess from vel.internal.generic_factory import GenericFactory -class OuNoise(nn.Module): +class OuNoise(Network): """ Ornstein–Uhlenbeck noise process for action noise """ - def __init__(self, std_dev, environment): + def __init__(self, std_dev: float, action_space: gym.Space): super().__init__() self.std_dev = std_dev - self.action_space = environment.action_space + self.action_space = action_space self.processes = [] self.register_buffer('low_tensor', torch.from_numpy(self.action_space.low).unsqueeze(0)) diff --git a/vel/rl/module/q_policy.py b/vel/rl/module/q_policy.py new file mode 100644 index 00000000..7e6e8925 --- /dev/null +++ b/vel/rl/module/q_policy.py @@ -0,0 +1,50 @@ +import gym + +from vel.api import Network, BackboneNetwork + +from vel.rl.module.head.q_head import QHead +from vel.rl.module.head.q_dueling_head import QDuelingHead + + +class QPolicy(Network): + """ + Simple deterministic greedy action-value model. + Supports only discrete action spaces (ones that can be enumerated) + """ + def __init__(self, net: BackboneNetwork, action_space: gym.Space, dueling_dqn=False): + super().__init__() + + self.dueling_dqn = dueling_dqn + self.action_space = action_space + + self.net = net + + if self.dueling_dqn: + (value_size, adv_size) = self.net.size_hints().assert_tuple(2) + + self.q_head = QDuelingHead( + val_input_dim=value_size.last(), + adv_input_dim=adv_size.last(), + action_space=action_space + ) + else: + self.q_head = QHead( + input_dim=self.net.size_hints().assert_single(2).last(), + action_space=action_space + ) + + def reset_weights(self): + """ Initialize weights to reasonable defaults """ + self.net.reset_weights() + self.q_head.reset_weights() + + def forward(self, observations): + """ Model forward pass """ + if self.dueling_dqn: + val_output, adv_output = self.net(observations) + q_values = self.q_head(val_output, adv_output) + else: + base_output = self.net(observations) + q_values = self.q_head(base_output) + + return q_values diff --git a/vel/rl/module/q_stochastic_policy.py b/vel/rl/module/q_stochastic_policy.py new file mode 100644 index 00000000..d98b1ac3 --- /dev/null +++ b/vel/rl/module/q_stochastic_policy.py @@ -0,0 +1,46 @@ +import gym + +from vel.api import BackboneNetwork, Network +from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head +from vel.rl.module.head.q_head import QHead + + +class QStochasticPolicy(Network): + """ + A policy model with an action-value critic head (instead of more common state-value critic head). + Supports only discrete action spaces (ones that can be enumerated) + """ + + def __init__(self, net: BackboneNetwork, action_space: gym.Space): + super().__init__() + + assert isinstance(action_space, gym.spaces.Discrete) + + self.net = net + + (action_size, value_size) = self.net.size_hints().assert_tuple(2) + + self.action_head = make_stockastic_action_head( + input_dim=action_size.last(), + action_space=action_space + ) + + self.q_head = QHead( + input_dim=value_size.last(), + action_space=action_space + ) + + def reset_weights(self): + """ Initialize properly model weights """ + self.net.reset_weights() + self.action_head.reset_weights() + self.q_head.reset_weights() + + def forward(self, observations): + """ Calculate model outputs """ + action_hidden, q_hidden = self.net(observations) + policy_params = self.action_head(action_hidden) + + q = self.q_head(q_hidden) + + return policy_params, q diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index f4d62580..c320a25f 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -2,11 +2,10 @@ import torch import torch.nn.functional as F -from vel.api import BackboneNetwork, ModelFactory, BatchInfo, Network +from vel.api import BackboneNetwork, ModelFactory, BatchInfo from vel.metric.base import AveragingNamedMetric from vel.rl.api import Trajectories, RlPolicy, Rollout -from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head -from vel.rl.module.head.q_head import QHead +from vel.rl.module.q_stochastic_policy import QStochasticPolicy def select_indices(tensor, indices): @@ -14,47 +13,6 @@ def select_indices(tensor, indices): return tensor.gather(1, indices.unsqueeze(1)).squeeze() -class QStochasticPolicy(Network): - """ - A policy model with an action-value critic head (instead of more common state-value critic head). - Supports only discrete action spaces (ones that can be enumerated) - """ - - def __init__(self, net: BackboneNetwork, action_space: gym.Space): - super().__init__() - - assert isinstance(action_space, gym.spaces.Discrete) - - self.net = net - - (action_size, value_size) = self.net.size_hints().assert_tuple(2) - - self.action_head = make_stockastic_action_head( - input_dim=action_size.last(), - action_space=action_space - ) - - self.q_head = QHead( - input_dim=value_size.last(), - action_space=action_space - ) - - def reset_weights(self): - """ Initialize properly model weights """ - self.net.reset_weights() - self.action_head.reset_weights() - self.q_head.reset_weights() - - def forward(self, observations): - """ Calculate model outputs """ - action_hidden, q_hidden = self.net(observations) - policy_params = self.action_head(action_hidden) - - q = self.q_head(q_hidden) - - return policy_params, q - - class ACER(RlPolicy): """ Actor-Critic with Experience Replay - policy gradient calculations """ @@ -117,14 +75,16 @@ def update_target_policy(self): # EWMA average model update average_param.data.mul_(self.average_model_alpha).add_(model_param.data * (1 - self.average_model_alpha)) - def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: - """ Calculate loss of the supplied rollout """ - assert isinstance(rollout, Trajectories), "ACER algorithm requires trajectory input" - + def post_optimization_step(self, batch_info: BatchInfo, rollout: Rollout): + """ Optional operations to perform after optimization """ # We calculate the trust-region update with respect to the average model if self.trust_region: self.update_target_policy() + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ Calculate loss of the supplied rollout """ + assert isinstance(rollout, Trajectories), "ACER algorithm requires trajectory input" + local_epsilon = 1e-6 # Part 0.0 - Rollout values @@ -192,6 +152,7 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: if self.trust_region: with torch.no_grad(): + self.target_policy.eval() target_logprobs = self.target_policy(observations)[0] actor_loss = policy_loss - self.entropy_coefficient * policy_entropy diff --git a/vel/rl/policy/dqn.py b/vel/rl/policy/dqn.py new file mode 100644 index 00000000..e2460fe7 --- /dev/null +++ b/vel/rl/policy/dqn.py @@ -0,0 +1,165 @@ +import numbers + +import typing +import gym +import torch +import torch.nn.functional as F +import torch.nn.utils + +from vel.api import ModelFactory, BackboneNetwork, BatchInfo, Schedule +from vel.function.constant import ConstantSchedule +from vel.metric import AveragingNamedMetric +from vel.rl.api import RlPolicy, Rollout +from vel.rl.module.q_policy import QPolicy +from vel.rl.module.noise.eps_greedy import EpsGreedy + + +class DQN(RlPolicy): + """ Deep Q-Learning algorithm """ + + def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space: gym.Space, + epsilon: typing.Union[float, Schedule], discount_factor: float, double_dqn: bool, + dueling_dqn: bool, target_update_frequency: int): + super().__init__(discount_factor) + + self.model = QPolicy(net=net, action_space=action_space, dueling_dqn=dueling_dqn) + + self.double_dqn = double_dqn + self.target_update_frequency = target_update_frequency + + if isinstance(epsilon, numbers.Number): + self.epsilon_schedule = ConstantSchedule(epsilon) + else: + self.epsilon_schedule = epsilon + + self.epsilon_value = self.epsilon_schedule.value(0.0) + + self.action_noise = EpsGreedy(action_space=action_space) + + self.target_model = QPolicy(net=net_factory.instantiate(), action_space=action_space, dueling_dqn=dueling_dqn) + + def reset_weights(self): + """ Initialize properly model weights """ + self.model.reset_weights() + self.target_model.load_state_dict(self.model.state_dict()) + + def forward(self, observation, state=None): + """ Calculate model outputs """ + return self.model(observation) + + def act(self, observation, state=None, deterministic=False): + """ Select actions based on model's output """ + q_values = self.model(observation) + actions = self.model.q_head.sample(q_values) + noisy_actions = self.action_noise(actions, epsilon=self.epsilon_value, deterministic=deterministic) + + return { + 'actions': noisy_actions, + 'q': q_values + } + + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ Calculate loss of the supplied rollout """ + observations = rollout.batch_tensor('observations') + observations_next = rollout.batch_tensor('observations_next') + + actions = rollout.batch_tensor('actions') + dones_tensor = rollout.batch_tensor('dones') + rewards_tensor = rollout.batch_tensor('rewards') + + assert dones_tensor.dtype == torch.float32 + + q = self.model(observations) + + with torch.no_grad(): + self.target_model.eval() + target_q = self.target_model(observations_next) + + if self.double_dqn: + # DOUBLE DQN + model_q_next = self.model(observations_next) + # Select largest 'target' value based on action that 'model' selects + values = target_q.gather(1, model_q_next.argmax(dim=1, keepdim=True)).squeeze(1) + else: + # REGULAR DQN + # [0] is because in pytorch .max(...) returns tuple (max values, argmax) + values = target_q.max(dim=1)[0] + + forward_steps = rollout.extra_data.get('forward_steps', 1) + estimated_return = rewards_tensor + (self.discount_factor ** forward_steps) * values * (1 - dones_tensor) + + q_selected = q.gather(1, actions.unsqueeze(1)).squeeze(1) + + if rollout.has_tensor('weights'): + weights = rollout.batch_tensor('weights') + else: + weights = torch.ones_like(rewards_tensor) + + original_losses = F.smooth_l1_loss(q_selected, estimated_return, reduction='none') + + loss_value = torch.mean(weights * original_losses) + loss_value.backward() + + return { + 'loss': loss_value.item(), + # We need it to update priorities in the replay buffer: + 'errors': original_losses.detach().cpu().numpy(), + 'average_q_selected': torch.mean(q_selected).item(), + 'average_q_target': torch.mean(estimated_return).item() + } + + def post_optimization_step(self, batch_info, rollout): + """ Steps to take after optimization has been done""" + if batch_info.aggregate_batch_number % self.target_update_frequency == 0: + self.target_model.load_state_dict(self.model.state_dict()) + + self.epsilon_value = self.epsilon_schedule.value(batch_info['progress']) + + def metrics(self) -> list: + """ List of metrics to track for this learning process """ + return [ + AveragingNamedMetric("loss", scope="model"), + AveragingNamedMetric("average_q_selected", scope="model"), + AveragingNamedMetric("average_q_target", scope="model") + ] + + +class DQNFactory(ModelFactory): + def __init__(self, net_factory: ModelFactory, epsilon: typing.Union[float, Schedule], discount_factor: float, + target_update_frequency: int, double_dqn: bool = False, dueling_dqn: bool = False): + self.net_factory = net_factory + self.epsilon = epsilon + self.discount_factor = discount_factor + self.target_update_frequency = target_update_frequency + self.double_dqn = double_dqn + self.dueling_dqn = dueling_dqn + + def instantiate(self, **extra_args): + """ Instantiate the model """ + action_space = extra_args.pop('action_space') + net = self.net_factory.instantiate(**extra_args) + + return DQN( + net=net, + net_factory=self.net_factory, + action_space=action_space, + epsilon=self.epsilon, + discount_factor=self.discount_factor, + double_dqn=self.double_dqn, + dueling_dqn=self.dueling_dqn, + target_update_frequency=self.target_update_frequency + ) + + +def create(net: ModelFactory, epsilon: typing.Union[float, Schedule], discount_factor: float, + target_update_frequency: int, double_dqn: bool = False, dueling_dqn: bool = False): + """ Vel factory function """ + + return DQNFactory( + net_factory=net, + epsilon=epsilon, + discount_factor=discount_factor, + double_dqn=double_dqn, + dueling_dqn=dueling_dqn, + target_update_frequency=target_update_frequency, + ) diff --git a/vel/rl/policy/purgatory/dqn.py b/vel/rl/policy/purgatory/dqn.py deleted file mode 100644 index c6ea4933..00000000 --- a/vel/rl/policy/purgatory/dqn.py +++ /dev/null @@ -1,102 +0,0 @@ -import torch -import torch.nn.functional as F -import torch.nn.utils - -from vel.api import ModelFactory, BackboneModel -from vel.metric import AveragingNamedMetric -from vel.rl.api import RlPolicy - - -class DeepQLearning(RlPolicy): - """ Deep Q-Learning algorithm """ - - def __init__(self, backbone: BackboneModel, - discount_factor: float, double_dqn: bool, - target_update_frequency: int): - super().__init__(discount_factor) - - self.backbone = backbone - - self.double_dqn = double_dqn - self.target_update_frequency = target_update_frequency - - self.target_model = None - - def initialize(self, training_info, model, environment, device): - """ Initialize policy gradient from reinforcer settings """ - self.target_model = self.model_factory.instantiate(action_space=environment.action_space).to(device) - self.target_model.load_state_dict(model.state_dict()) - self.target_model.eval() - - def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: - """ Calculate loss of the supplied rollout """ - evaluator = model.evaluate(rollout) - - dones_tensor = evaluator.get('rollout:dones') - rewards_tensor = evaluator.get('rollout:rewards') - - assert dones_tensor.dtype == torch.float32 - - with torch.no_grad(): - target_evaluator = self.target_model.evaluate(rollout) - - if self.double_dqn: - # DOUBLE DQN - target_q = target_evaluator.get('model:q_next') - model_q = evaluator.get('model:q_next') - # Select largest 'target' value based on action that 'model' selects - values = target_q.gather(1, model_q.argmax(dim=1, keepdim=True)).squeeze(1) - else: - # REGULAR DQN - # [0] is because in pytorch .max(...) returns tuple (max values, argmax) - values = target_evaluator.get('model:q_next').max(dim=1)[0] - - forward_steps = rollout.extra_data.get('forward_steps', 1) - estimated_return = rewards_tensor + (self.discount_factor ** forward_steps) * values * (1 - dones_tensor) - - q_selected = evaluator.get('model:action:q') - - if evaluator.is_provided('rollout:weights'): - weights = evaluator.get('rollout:weights') - else: - weights = torch.ones_like(rewards_tensor) - - original_losses = F.smooth_l1_loss(q_selected, estimated_return, reduction='none') - - loss_value = torch.mean(weights * original_losses) - loss_value.backward() - - return { - 'loss': loss_value.item(), - # We need it to update priorities in the replay buffer: - 'errors': original_losses.detach().cpu().numpy(), - 'average_q_selected': torch.mean(q_selected).item(), - 'average_q_target': torch.mean(estimated_return).item() - } - - def post_optimization_step(self, batch_info, rollout): - """ Steps to take after optimization has been done""" - if batch_info.aggregate_batch_number % self.target_update_frequency == 0: - self.target_model.load_state_dict(self.state_dict()) - self.target_model.eval() - - def metrics(self) -> list: - """ List of metrics to track for this learning process """ - return [ - AveragingNamedMetric("loss", scope="model"), - AveragingNamedMetric("average_q_selected", scope="model"), - AveragingNamedMetric("average_q_target", scope="model") - ] - - -def create(backbone: ModelFactory, - discount_factor: float, target_update_frequency: int, - double_dqn: bool = False): - """ Vel factory function """ - - return DeepQLearning( - backbone=backbone, - discount_factor=discount_factor, - double_dqn=double_dqn, - target_update_frequency=target_update_frequency, - ) diff --git a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py index 11764c84..2a6b42e4 100644 --- a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py @@ -33,12 +33,12 @@ class BufferedMixedPolicyIterationReinforcer(Reinforcer): """ def __init__(self, device: torch.device, settings: BufferedMixedPolicyIterationReinforcerSettings, env: VecEnv, - model: Model, env_roller: ReplayEnvRollerBase) -> None: + policy: RlPolicy, env_roller: ReplayEnvRollerBase) -> None: self.device = device self.settings = settings self.environment = env - self._model: RlPolicy = model.to(self.device) + self._model: RlPolicy = policy.to(self.device) self.env_roller = env_roller @@ -110,7 +110,7 @@ def on_policy_train_batch(self, batch_info: BatchInfo): rollout = self.env_roller.rollout(batch_info, self.settings.number_of_steps).to_device(self.device) - # Preprocessing of the rollout for this algorithm + # Preprocessing of the rollout for this policy rollout = self.policy.process_rollout(rollout) batch_result = self.policy.optimize( @@ -155,8 +155,7 @@ def instantiate(self, device: torch.device) -> Reinforcer: return BufferedMixedPolicyIterationReinforcer(device, self.settings, env, policy, env_roller) -def create(model_config, model, vec_env, env_roller, - parallel_envs, number_of_steps, +def create(model_config, model, vec_env, env_roller, parallel_envs, number_of_steps, experience_replay=1, stochastic_experience_replay=True): """ Vel factory function """ settings = BufferedMixedPolicyIterationReinforcerSettings( diff --git a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py index be04f2d7..b5baaad5 100644 --- a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py @@ -7,8 +7,8 @@ from vel.api import TrainingInfo, EpochInfo, BatchInfo, Model, ModelFactory from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( - Reinforcer, ReinforcerFactory, ReplayEnvRollerBase, AlgoBase, VecEnvFactory, ReplayEnvRollerFactoryBase -) + Reinforcer, ReinforcerFactory, ReplayEnvRollerBase, VecEnvFactory, ReplayEnvRollerFactoryBase, + RlPolicy) from vel.rl.metrics import ( FPSMetric, EpisodeLengthMetric, EpisodeRewardMetricQuantile, EpisodeRewardMetric, FramesMetric, ) @@ -32,13 +32,12 @@ class BufferedOffPolicyIterationReinforcer(Reinforcer): Afterwards, it samples experience batches from this buffer to train the policy. """ def __init__(self, device: torch.device, settings: BufferedOffPolicyIterationReinforcerSettings, - environment: VecEnv, model: Model, algo: AlgoBase, env_roller: ReplayEnvRollerBase): + environment: VecEnv, policy: RlPolicy, env_roller: ReplayEnvRollerBase): self.device = device self.settings = settings self.environment = environment - self._trained_model = model.to(self.device) - self.algo = algo + self._policy = policy.to(self.device) self.env_roller = env_roller @@ -53,11 +52,11 @@ def metrics(self) -> list: EpisodeLengthMetric("episode_length") ] - return my_metrics + self.algo.metrics() + self.env_roller.metrics() + return my_metrics + self.policy.metrics() + self.env_roller.metrics() @property def policy(self) -> Model: - return self._trained_model + return self._policy def initialize_training(self, training_info: TrainingInfo, model_state=None, hidden_state=None): """ Prepare models for training """ @@ -66,10 +65,6 @@ def initialize_training(self, training_info: TrainingInfo, model_state=None, hid else: self.policy.reset_weights() - self.algo.initialize( - training_info=training_info, model=self.policy, environment=self.environment, device=self.device - ) - def train_epoch(self, epoch_info: EpochInfo, interactive=True) -> None: """ Train model for a single epoch """ epoch_info.on_epoch_begin() @@ -96,14 +91,14 @@ def train_batch(self, batch_info: BatchInfo) -> None: For this reinforforcer, that involves: 1. Roll out environment and store out experience in the buffer - 2. Sample the buffer and train the algo on sample batch + 2. Sample the buffer and train the policy on sample batch """ # For each reinforcer batch: # 1. Roll out environment and store out experience in the buffer self.roll_out_and_store(batch_info) - # 2. Sample the buffer and train the algo on sample batch + # 2. Sample the buffer and train the policy on sample batch self.train_on_replay_memory(batch_info) def roll_out_and_store(self, batch_info): @@ -111,7 +106,7 @@ def roll_out_and_store(self, batch_info): self.policy.train() if self.env_roller.is_ready_for_sampling(): - rollout = self.env_roller.rollout(batch_info, self.policy, self.settings.rollout_steps) + rollout = self.env_roller.rollout(batch_info, self.settings.rollout_steps) rollout = rollout.to_device(self.device) # Store some information about the rollout, no training phase @@ -123,7 +118,7 @@ def roll_out_and_store(self, batch_info): with tqdm.tqdm(desc="Populating memory", total=self.env_roller.initial_memory_size_hint()) as pbar: while not self.env_roller.is_ready_for_sampling(): - rollout = self.env_roller.rollout(batch_info, self.policy, self.settings.rollout_steps) + rollout = self.env_roller.rollout(batch_info, self.settings.rollout_steps) rollout = rollout.to_device(self.device) new_frames = rollout.frames() @@ -144,12 +139,10 @@ def train_on_replay_memory(self, batch_info): batch_info['sub_batch_data'] = [] for i in range(self.settings.training_rounds): - sampled_rollout = self.env_roller.sample(batch_info, self.policy, self.settings.training_steps) + sampled_rollout = self.env_roller.sample(batch_info, self.settings.training_steps) - batch_result = self.algo.optimize( + batch_result = self.policy.optimize( batch_info=batch_info, - device=self.device, - model=self.policy, rollout=sampled_rollout.to_device(self.device) ) @@ -164,32 +157,30 @@ class BufferedOffPolicyIterationReinforcerFactory(ReinforcerFactory): """ Factory class for the DQN reinforcer """ def __init__(self, settings, env_factory: VecEnvFactory, model_factory: ModelFactory, - algo: AlgoBase, env_roller_factory: ReplayEnvRollerFactoryBase, parallel_envs: int, seed: int): + env_roller_factory: ReplayEnvRollerFactoryBase, parallel_envs: int, seed: int): self.settings = settings self.env_factory = env_factory self.model_factory = model_factory - self.algo = algo self.env_roller_factory = env_roller_factory self.parallel_envs = parallel_envs self.seed = seed def instantiate(self, device: torch.device) -> BufferedOffPolicyIterationReinforcer: env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, seed=self.seed) - env_roller = self.env_roller_factory.instantiate(env, device) - model = self.model_factory.instantiate(action_space=env.action_space) + policy = self.model_factory.instantiate(action_space=env.action_space) + env_roller = self.env_roller_factory.instantiate(environment=env, policy=policy, device=device) return BufferedOffPolicyIterationReinforcer( device=device, settings=self.settings, environment=env, - model=model, - algo=self.algo, + policy=policy, env_roller=env_roller ) -def create(model_config, vec_env, model, algo, env_roller, parallel_envs: int, +def create(model_config, vec_env, model, env_roller, parallel_envs: int, rollout_steps: int, training_steps: int, training_rounds: int = 1): """ Vel factory function """ settings = BufferedOffPolicyIterationReinforcerSettings( @@ -202,7 +193,6 @@ def create(model_config, vec_env, model, algo, env_roller, parallel_envs: int, settings=settings, env_factory=vec_env, model_factory=model, - algo=algo, env_roller_factory=env_roller, parallel_envs=parallel_envs, seed=model_config.seed From aa2905e4177db8fe565e722a0a241ad36220320d Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Wed, 2 Oct 2019 21:57:43 -0700 Subject: [PATCH 106/162] Revived the Rainbow. --- examples-configs/rl/atari/atari_ddqn.yaml | 2 +- .../atari_rainbow.yaml | 51 ++-- examples-configs/rl/atari/dqn/atari_ddqn.yaml | 78 ------ .../atari/dqn/atari_dqn_distributional.yaml | 90 ------- .../rl/atari/dqn/atari_dqn_raw.yaml | 86 ------- .../rl/atari/dqn/atari_dueling_ddqn.yaml | 79 ------ .../atari_rp_dqn_distributional.yaml | 93 ------- .../atari_rp_dqn_noisynet.yaml | 86 ------- .../dqn_rainbow_param/atari_rp_dqn_nstep.yaml | 91 ------- .../dqn_rainbow_param/atari_rp_dqn_raw.yaml | 88 ------- .../rl/atari/purgatory/atari_ddqn.yaml | 0 .../rl/atari/purgatory/atari_rainbow.yaml | 0 vel/rl/env_roller/step_env_roller.py | 1 + .../trajectory_replay_env_roller.py | 1 + .../transition_replay_env_roller.py | 31 +-- vel/rl/layer/double_nature_cnn.py | 2 +- .../double_noisy_nature_cnn.py | 60 +++-- .../q_distributional_noisy_dueling_head.py | 7 +- vel/rl/module/noisy_linear.py | 2 +- vel/rl/module/rainbow_policy.py | 67 +++++ vel/rl/policy/acer.py | 1 - vel/rl/policy/dqn.py | 1 - vel/rl/policy/purgatory/distributional_dqn.py | 191 -------------- vel/rl/policy/rainbow.py | 240 ++++++++++++++++++ ...fered_mixed_policy_iteration_reinforcer.py | 6 +- ...uffered_off_policy_iteration_reinforcer.py | 2 - .../on_policy_iteration_reinforcer.py | 5 +- vel/rl/util/actor.py | 10 + vel/rl/xpolicy/purgatory/q_rainbow_model.py | 110 -------- 29 files changed, 405 insertions(+), 1076 deletions(-) rename examples-configs/rl/atari/{dqn_rainbow_param => }/atari_rainbow.yaml (64%) delete mode 100644 examples-configs/rl/atari/dqn/atari_ddqn.yaml delete mode 100644 examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml delete mode 100644 examples-configs/rl/atari/dqn/atari_dqn_raw.yaml delete mode 100644 examples-configs/rl/atari/dqn/atari_dueling_ddqn.yaml delete mode 100644 examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml delete mode 100644 examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml delete mode 100644 examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml delete mode 100644 examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml delete mode 100644 examples-configs/rl/atari/purgatory/atari_ddqn.yaml delete mode 100644 examples-configs/rl/atari/purgatory/atari_rainbow.yaml rename vel/rl/layer/{purgatory => }/double_noisy_nature_cnn.py (64%) create mode 100644 vel/rl/module/rainbow_policy.py delete mode 100644 vel/rl/policy/purgatory/distributional_dqn.py create mode 100644 vel/rl/policy/rainbow.py delete mode 100644 vel/rl/xpolicy/purgatory/q_rainbow_model.py diff --git a/examples-configs/rl/atari/atari_ddqn.yaml b/examples-configs/rl/atari/atari_ddqn.yaml index ecbf6544..d4b7430b 100644 --- a/examples-configs/rl/atari/atari_ddqn.yaml +++ b/examples-configs/rl/atari/atari_ddqn.yaml @@ -17,7 +17,6 @@ model: double_dqn: true dueling_dqn: true target_update_frequency: 10_000 # After how many batches to update the target network - max_grad_norm: 0.5 discount_factor: 0.99 @@ -71,6 +70,7 @@ optimizer: alpha: 0.95 momentum: 0.95 epsilon: 1.0e-1 + max_grad_norm: 0.5 commands: diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml b/examples-configs/rl/atari/atari_rainbow.yaml similarity index 64% rename from examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml rename to examples-configs/rl/atari/atari_rainbow.yaml index 9e8a92cb..a11c3afc 100644 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rainbow.yaml +++ b/examples-configs/rl/atari/atari_rainbow.yaml @@ -14,7 +14,11 @@ vec_env: model: - name: vel.rl.models.q_rainbow_model + name: vel.rl.policy.rainbow + + target_update_frequency: 32_000 # After how many batches to update the target network + + discount_factor: 0.99 atoms: 51 # 51 bins for Distributional DQN vmin: -10.0 @@ -23,31 +27,28 @@ model: initial_std_dev: 0.5 factorized_noise: true - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.double_noisy_nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - initial_std_dev: 0.5 - factorized_noise: true + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + size: [84, 84, 4] # Number of channels is frame history + - name: vel.rl.layer.double_noisy_nature_cnn + # TODO(this should ideally be brough from level up) + initial_std_dev: 0.5 + factorized_noise: true reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer + name: vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer env_roller: name: vel.rl.env_roller.transition_replay_env_roller # N-Step Q-Learning forward_steps: 3 - discount_factor: 0.99 replay_buffer: - name: vel.rl.buffers.prioritized_circular_replay_buffer + name: vel.rl.buffer.prioritized_circular_replay_buffer buffer_initial_size: 80_000 # How many samples we need in the buffer before we start using replay buffer buffer_capacity: 1_000_000 @@ -58,45 +59,37 @@ reinforcer: priority_exponent: 0.5 priority_weight: - name: vel.schedules.linear + name: vel.function.linear initial_value: 0.4 final_value: 1.0 priority_epsilon: 1.0e-6 - algo: - name: vel.rl.algo.distributional_dqn - double_dqn: true - - target_update_frequency: 32_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training training_steps: 32 # How many environment steps (per env) to perform per training round parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper optimizer: - name: vel.optimizers.adam + name: vel.optimizer.adam lr: 6.25e-05 epsilon: 1.5e-4 + max_grad_norm: 0.5 commands: train: - name: vel.rl.commands.rl_train_command + name: vel.rl.command.rl_train_command total_frames: 1.1e7 # 11M batches_per_epoch: 2500 record: - name: vel.rl.commands.record_movie_command + name: vel.rl.command.record_movie_command takes: 10 videoname: 'atari_rainbow_vid_{:04}.avi' fps: 15 evaluate: - name: vel.rl.commands.evaluate_env_command + name: vel.rl.command.evaluate_env_command parallel_envs: 12 takes: 20 diff --git a/examples-configs/rl/atari/dqn/atari_ddqn.yaml b/examples-configs/rl/atari/dqn/atari_ddqn.yaml deleted file mode 100644 index 667ce429..00000000 --- a/examples-configs/rl/atari/dqn/atari_ddqn.yaml +++ /dev/null @@ -1,78 +0,0 @@ -name: 'atari_ddqn' - - -env: - name: vel.rl.env.classic_atari - game: !param game = 'BreakoutNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 30_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 250_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.dqn - - double_dqn: true - target_update_frequency: 10_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.rmsprop - lr: 2.5e-4 - alpha: 0.95 - momentum: 0.95 - epsilon: 1.0e-1 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 # 11M - batches_per_epoch: 2500 diff --git a/examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml b/examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml deleted file mode 100644 index dd5b62f3..00000000 --- a/examples-configs/rl/atari/dqn/atari_dqn_distributional.yaml +++ /dev/null @@ -1,90 +0,0 @@ -name: 'atari_dqn_distributional' - - -env: - name: vel.rl.env.classic_atari - game: !param game = 'BreakoutNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.distributional_q_model - - atoms: 51 # 51 bins for Distributional DQN - vmin: -10.0 - vmax: 10.0 - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 30_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 250_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.distributional_dqn - - target_update_frequency: 10_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.rmsprop - lr: 2.5e-4 - alpha: 0.95 - momentum: 0.95 - epsilon: 1.0e-1 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 # 11M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'atari_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 diff --git a/examples-configs/rl/atari/dqn/atari_dqn_raw.yaml b/examples-configs/rl/atari/dqn/atari_dqn_raw.yaml deleted file mode 100644 index a32427bd..00000000 --- a/examples-configs/rl/atari/dqn/atari_dqn_raw.yaml +++ /dev/null @@ -1,86 +0,0 @@ -name: 'atari_dqn_raw' - - -env: - name: vel.rl.env.classic_atari - game: !param game = 'BreakoutNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 30_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 250_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.dqn - - target_update_frequency: 10_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.rmsprop - lr: 2.5e-4 - alpha: 0.95 - momentum: 0.95 - epsilon: 1.0e-1 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 # 11M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'atari_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 diff --git a/examples-configs/rl/atari/dqn/atari_dueling_ddqn.yaml b/examples-configs/rl/atari/dqn/atari_dueling_ddqn.yaml deleted file mode 100644 index a5a225a9..00000000 --- a/examples-configs/rl/atari/dqn/atari_dueling_ddqn.yaml +++ /dev/null @@ -1,79 +0,0 @@ -name: 'atari_dueling_ddqn' - - -env: - name: vel.rl.env.classic_atari - game: !param game = 'BreakoutNoFrameskip-v4' - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_dueling_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.double_nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 30_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 250_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.dqn - - double_dqn: true - target_update_frequency: 10_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - - -optimizer: - name: vel.optimizers.rmsprop - lr: 2.5e-4 - alpha: 0.95 - momentum: 0.95 - epsilon: 1.0e-1 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.1e7 # 11M - batches_per_epoch: 2500 diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml deleted file mode 100644 index 20fabbd6..00000000 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_distributional.yaml +++ /dev/null @@ -1,93 +0,0 @@ -name: 'atari_dqn_distributional' - - -env: - name: vel.rl.env.classic_atari - game: !param game = 'BreakoutNoFrameskip-v4' - settings: - max_episode_frames: 108_000 - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_distributional_model - - atoms: 51 # 51 bins for Distributional DQN - vmin: -10.0 - vmax: 10.0 - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 80_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 1_000_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.distributional_dqn - - target_update_frequency: 10_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.adam - lr: 6.25e-05 - epsilon: 1.5e-4 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 5.0e7 # 50M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - fps: 15 - videoname: 'atari_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 12 - takes: 20 - diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml deleted file mode 100644 index 822e3085..00000000 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_noisynet.yaml +++ /dev/null @@ -1,86 +0,0 @@ -name: 'atari_rp_dqn_noisynet' - - -env: - name: vel.rl.env.classic_atari - game: !param game = 'BreakoutNoFrameskip-v4' - settings: - max_episode_frames: 108_000 - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.noisy_q_model - - initial_std_dev: 0.5 - factorized_noise: true - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.noisy_nature_cnn - - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - initial_std_dev: 0.5 - factorized_noise: true - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 80_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 1_000_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - algo: - name: vel.rl.algo.dqn - - target_update_frequency: 32_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.adam - lr: 6.25e-05 - epsilon: 1.5e-4 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 5.0e7 # 50M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'atari_vid_{:04}.avi' - fps: 15 - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 12 - takes: 20 diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml deleted file mode 100644 index af118e3d..00000000 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_nstep.yaml +++ /dev/null @@ -1,91 +0,0 @@ -name: 'atari_rp_dqn_nstep' - - -env: - name: vel.rl.env.classic_atari - game: !param game = 'BreakoutNoFrameskip-v4' - settings: - max_episode_frames: 108_000 - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - # N-Step Q-Learning - forward_steps: 3 - discount_factor: 0.99 - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 80_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 1_000_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.dqn - - target_update_frequency: 32_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.adam - lr: 6.25e-05 - epsilon: 1.5e-4 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 5.0e7 # 50M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'atari_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 12 - takes: 100 diff --git a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml b/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml deleted file mode 100644 index 8e7272b8..00000000 --- a/examples-configs/rl/atari/dqn_rainbow_param/atari_rp_dqn_raw.yaml +++ /dev/null @@ -1,88 +0,0 @@ -name: 'atari_rp_dqn_raw' - - -env: - name: vel.rl.env.classic_atari - game: !param game = 'BreakoutNoFrameskip-v4' - settings: - max_episode_frames: 108_000 - - -vec_env: - name: vel.rl.vecenv.dummy - frame_history: 4 # How many stacked frames go into a single observation - - -model: - name: vel.rl.models.q_model - - input_block: - name: vel.modules.input.image_to_tensor - - backbone: - name: vel.rl.models.backbone.nature_cnn - input_width: 84 - input_height: 84 - input_channels: 4 # The same as frame_history - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_initial_size: 80_000 # How many samples we need in the buffer before we start using replay buffer - buffer_capacity: 1_000_000 - - # Because env has a framestack already built-in, save memory by encoding only last frames in the replay buffer - frame_stack_compensation: true - frame_history: 4 # How many stacked frames go into a single observation - - action_noise: - name: vel.rl.modules.noise.eps_greedy - - epsilon: - name: vel.schedules.linear_and_constant - end_of_interpolation: 0.1 - initial_value: 1.0 - final_value: 0.1 - - algo: - name: vel.rl.algo.dqn - - target_update_frequency: 32_000 # After how many batches to update the target network - max_grad_norm: 0.5 - - discount_factor: 0.99 - - rollout_steps: 4 # How many environment steps (per env) to perform per batch of training - training_steps: 32 # How many environment steps (per env) to perform per training round - parallel_envs: 1 # Roll out only one env in parallel, just like in DeepMind paper - - -optimizer: - name: vel.optimizers.adam - lr: 6.25e-05 - epsilon: 1.5e-4 - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 5.0e7 # 50M - batches_per_epoch: 2500 - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'atari_vid_{:04}.avi' - fps: 15 - - evaluate: - name: vel.rl.commands.evaluate_env_command - parallel_envs: 12 - takes: 20 diff --git a/examples-configs/rl/atari/purgatory/atari_ddqn.yaml b/examples-configs/rl/atari/purgatory/atari_ddqn.yaml deleted file mode 100644 index e69de29b..00000000 diff --git a/examples-configs/rl/atari/purgatory/atari_rainbow.yaml b/examples-configs/rl/atari/purgatory/atari_rainbow.yaml deleted file mode 100644 index e69de29b..00000000 diff --git a/vel/rl/env_roller/step_env_roller.py b/vel/rl/env_roller/step_env_roller.py index 2b959f6e..bb70e1c5 100644 --- a/vel/rl/env_roller/step_env_roller.py +++ b/vel/rl/env_roller/step_env_roller.py @@ -30,6 +30,7 @@ def environment(self): @torch.no_grad() def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Calculate env rollout """ + self.actor.train() accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index 7e347edb..f294f4e0 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -37,6 +37,7 @@ def environment(self): @torch.no_grad() def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Calculate env rollout """ + self.actor.train() accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information diff --git a/vel/rl/env_roller/transition_replay_env_roller.py b/vel/rl/env_roller/transition_replay_env_roller.py index 5cf7a738..dc25b676 100644 --- a/vel/rl/env_roller/transition_replay_env_roller.py +++ b/vel/rl/env_roller/transition_replay_env_roller.py @@ -1,9 +1,8 @@ import torch -import torch.nn as nn import typing import numpy as np -from vel.api import BatchInfo, ModelFactory +from vel.api import BatchInfo from vel.openai.baselines.common.vec_env import VecEnv from vel.openai.baselines.common.running_mean_std import RunningMeanStd from vel.rl.api import ( @@ -21,26 +20,16 @@ class TransitionReplayEnvRoller(ReplayEnvRollerBase): """ def __init__(self, environment: VecEnv, policy: RlPolicy, device: torch.device, replay_buffer: ReplayBuffer, - discount_factor: typing.Optional[float] = None, normalize_returns: bool = False, - forward_steps: int = 1): + normalize_returns: bool = False, forward_steps: int = 1): self._environment = environment self.device = device self.replay_buffer = replay_buffer self.normalize_returns = normalize_returns self.forward_steps = forward_steps - self.discount_factor = discount_factor self.actor = PolicyActor(self.environment.num_envs, policy, device) assert not self.actor.is_stateful, "Does not support stateful policies" - if self.normalize_returns: - assert self.discount_factor is not None, \ - "TransitionReplayEnvRoller must have a discount factor defined if normalize_returns is turned on" - - if self.forward_steps > 1: - assert self.discount_factor is not None, \ - "TransitionReplayEnvRoller must have a discount factor defined if forward_steps is larger than one" - self.ret_rms = RunningMeanStd(shape=()) if normalize_returns else None # Initial observation @@ -59,6 +48,8 @@ def environment(self): @torch.no_grad() def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Calculate env rollout """ + self.actor.train() + accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information @@ -90,7 +81,7 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: ) if self.ret_rms is not None: - self.accumulated_returns = new_rewards + self.discount_factor * self.accumulated_returns + self.accumulated_returns = new_rewards + self.actor.discount_factor * self.accumulated_returns self.ret_rms.update(self.accumulated_returns) # Done is flagged true when the episode has ended AND the frame we see is already a first frame from the @@ -125,7 +116,7 @@ def sample(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: if self.forward_steps > 1: transitions = self.replay_buffer.sample_forward_transitions( batch_size=number_of_steps, batch_info=batch_info, forward_steps=self.forward_steps, - discount_factor=self.discount_factor + discount_factor=self.actor.discount_factor ) else: transitions = self.replay_buffer.sample_transitions(batch_size=number_of_steps, batch_info=batch_info) @@ -153,12 +144,11 @@ def update(self, rollout, batch_info): class TransitionReplayEnvRollerFactory(ReplayEnvRollerFactoryBase): """ Factory for the ReplayEnvRoller """ - def __init__(self, replay_buffer_factory: ReplayBufferFactory, discount_factor: typing.Optional[float] = None, - normalize_returns: bool = False, forward_steps: int = 1): + def __init__(self, replay_buffer_factory: ReplayBufferFactory, normalize_returns: bool = False, + forward_steps: int = 1): self.replay_buffer_factory = replay_buffer_factory self.normalize_returns = normalize_returns self.forward_steps = forward_steps - self.discount_factor = discount_factor def instantiate(self, environment, policy, device): replay_buffer = self.replay_buffer_factory.instantiate(environment) @@ -168,18 +158,15 @@ def instantiate(self, environment, policy, device): policy=policy, device=device, replay_buffer=replay_buffer, - discount_factor=self.discount_factor, normalize_returns=self.normalize_returns, forward_steps=self.forward_steps ) -def create(replay_buffer, discount_factor: typing.Optional[float] = None, normalize_returns: bool = False, - forward_steps: int = 1): +def create(replay_buffer, normalize_returns: bool = False, forward_steps: int = 1): """ Vel factory function """ return TransitionReplayEnvRollerFactory( replay_buffer_factory=replay_buffer, - discount_factor=discount_factor, forward_steps=forward_steps, normalize_returns=normalize_returns ) diff --git a/vel/rl/layer/double_nature_cnn.py b/vel/rl/layer/double_nature_cnn.py index 54599e9e..2e269783 100644 --- a/vel/rl/layer/double_nature_cnn.py +++ b/vel/rl/layer/double_nature_cnn.py @@ -108,7 +108,7 @@ def __init__(self, output_dim: int = 512): @property def name_base(self) -> str: """ Base of layer name """ - return "nature_cnn" + return "double_nature_cnn" def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: (b, c, w, h) = direct_input.assert_single(4) diff --git a/vel/rl/layer/purgatory/double_noisy_nature_cnn.py b/vel/rl/layer/double_noisy_nature_cnn.py similarity index 64% rename from vel/rl/layer/purgatory/double_noisy_nature_cnn.py rename to vel/rl/layer/double_noisy_nature_cnn.py index 7c31e719..25299baf 100644 --- a/vel/rl/layer/purgatory/double_noisy_nature_cnn.py +++ b/vel/rl/layer/double_noisy_nature_cnn.py @@ -12,20 +12,22 @@ import vel.util.network as net_util -from vel.api import LinearBackboneModel, ModelFactory +from vel.api import SizeHints, SizeHint + +from vel.net.layer_base import Layer, LayerFactory from vel.rl.module.noisy_linear import NoisyLinear -class DoubleNoisyNatureCnn(LinearBackboneModel): +class DoubleNoisyNatureCnn(Layer): """ Neural network as defined in the paper 'Human-level control through deep reinforcement learning' but with two separate heads and "noisy" linear layer. """ - def __init__(self, input_width, input_height, input_channels, output_dim=512, initial_std_dev=0.4, + def __init__(self, name: str, input_width, input_height, input_channels, output_dim=512, initial_std_dev=0.4, factorized_noise=True): - super().__init__() + super().__init__(name) - self._output_dim = output_dim + self.output_dim = output_dim self.conv1 = nn.Conv2d( in_channels=input_channels, @@ -76,10 +78,11 @@ def __init__(self, input_width, input_height, input_channels, output_dim=512, in factorized_noise=factorized_noise ) - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - return self._output_dim + def size_hints(self) -> SizeHints: + return SizeHints(( + SizeHint(None, self.output_dim), + SizeHint(None, self.output_dim) + )) def reset_weights(self): for m in self.modules(): @@ -94,7 +97,7 @@ def reset_weights(self): elif isinstance(m, NoisyLinear): m.reset_weights() - def forward(self, image): + def forward(self, image, state: dict = None, context: dict = None): result = image result = F.relu(self.conv1(result)) result = F.relu(self.conv2(result)) @@ -107,12 +110,37 @@ def forward(self, image): return output_one, output_two -def create(input_width, input_height, input_channels=1, output_dim=512, initial_std_dev=0.4, factorized_noise=True): - """ Vel factory function """ - def instantiate(**_): +class DoubleNoisyNatureCnnFactory(LayerFactory): + """ Nature Cnn Network Factory """ + + def __init__(self, initial_std_dev: float = 0.4, factorized_noise: bool = True, output_dim: int = 512): + self.initial_std_dev = initial_std_dev + self.factorized_noise = factorized_noise + self.output_dim = output_dim + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "double_noisy_nature_cnn" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + (b, c, w, h) = direct_input.assert_single(4) + return DoubleNoisyNatureCnn( - input_width=input_width, input_height=input_height, input_channels=input_channels, - output_dim=output_dim, initial_std_dev=initial_std_dev, factorized_noise=factorized_noise + name=name, + input_width=w, + input_height=h, + input_channels=c, + output_dim=self.output_dim, + initial_std_dev=self.initial_std_dev, + factorized_noise=self.factorized_noise ) - return ModelFactory.generic(instantiate) + +def create(initial_std_dev: float = 0.4, factorized_noise: bool = True, output_dim: int = 512): + """ Vel factory function """ + return DoubleNoisyNatureCnnFactory( + output_dim=output_dim, + initial_std_dev=initial_std_dev, + factorized_noise=factorized_noise + ) diff --git a/vel/rl/module/head/q_distributional_noisy_dueling_head.py b/vel/rl/module/head/q_distributional_noisy_dueling_head.py index 3e0f2794..59a22ac8 100644 --- a/vel/rl/module/head/q_distributional_noisy_dueling_head.py +++ b/vel/rl/module/head/q_distributional_noisy_dueling_head.py @@ -10,7 +10,7 @@ class QDistributionalNoisyDuelingHead(nn.Module): """ Network head calculating Q-function value for each (discrete) action. """ - def __init__(self, input_dim, action_space, vmin: float, vmax: float, atoms: int = 1, + def __init__(self, val_input_dim, adv_input_dim, action_space, vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): super().__init__() @@ -28,11 +28,12 @@ def __init__(self, input_dim, action_space, vmin: float, vmax: float, atoms: int self.atom_delta = (self.vmax - self.vmin) / (self.atoms - 1) self.linear_layer_advantage = NoisyLinear( - input_dim, self.action_size * self.atoms, initial_std_dev=initial_std_dev, factorized_noise=factorized_noise + adv_input_dim, self.action_size * self.atoms, initial_std_dev=initial_std_dev, + factorized_noise=factorized_noise ) self.linear_layer_value = NoisyLinear( - input_dim, self.atoms, initial_std_dev=initial_std_dev, factorized_noise=factorized_noise + val_input_dim, self.atoms, initial_std_dev=initial_std_dev, factorized_noise=factorized_noise ) self.register_buffer('support_atoms', torch.linspace(self.vmin, self.vmax, self.atoms)) diff --git a/vel/rl/module/noisy_linear.py b/vel/rl/module/noisy_linear.py index 1fdea082..2b94c90c 100644 --- a/vel/rl/module/noisy_linear.py +++ b/vel/rl/module/noisy_linear.py @@ -82,5 +82,5 @@ def extra_repr(self): """ return ( f'{self.in_features}, {self.out_features}, initial_std_dev={self.initial_std_dev}, ' - 'factorized_noise={self.factorized_noise} ' + f'factorized_noise={self.factorized_noise} ' ) diff --git a/vel/rl/module/rainbow_policy.py b/vel/rl/module/rainbow_policy.py new file mode 100644 index 00000000..a61ef126 --- /dev/null +++ b/vel/rl/module/rainbow_policy.py @@ -0,0 +1,67 @@ +import gym +import torch + +from vel.api import Network, BackboneNetwork +from vel.rl.module.head.q_distributional_noisy_dueling_head import QDistributionalNoisyDuelingHead + + +class RainbowPolicy(Network): + """ + A deterministic greedy action-value model. + Includes following commonly known modifications: + - Distributional Q-Learning + - Dueling architecture + - Noisy Nets + """ + + def __init__(self, net: BackboneNetwork, action_space: gym.Space, vmin: float, vmax: float, + atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): + super().__init__() + + self.net = net + + self.action_space = action_space + + (value_size, adv_size) = self.net.size_hints().assert_tuple(2) + + self.q_head = QDistributionalNoisyDuelingHead( + val_input_dim=value_size.last(), + adv_input_dim=adv_size.last(), + action_space=action_space, + vmin=vmin, vmax=vmax, atoms=atoms, + initial_std_dev=initial_std_dev, factorized_noise=factorized_noise + ) + + @property + def atom_delta(self) -> float: + return self.q_head.atom_delta + + @property + def support_atoms(self) -> torch.Tensor: + return self.q_head.support_atoms + + def reset_weights(self): + """ Initialize weights to reasonable defaults """ + self.net.reset_weights() + self.q_head.reset_weights() + + def forward(self, observations): + """ Model forward pass """ + advantage_features, value_features = self.net(observations) + log_histogram = self.q_head(advantage_features, value_features) + return log_histogram + + def histogram_info(self): + """ Return extra information about histogram """ + return self.q_head.histogram_info() + + # def step(self, observations): + # """ Sample action from an action space for given state """ + # log_histogram = self(observations) + # actions = self.q_head.sample(log_histogram) + # + # return { + # 'actions': actions, + # 'log_histogram': log_histogram + # } + diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index c320a25f..049c6842 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -152,7 +152,6 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: if self.trust_region: with torch.no_grad(): - self.target_policy.eval() target_logprobs = self.target_policy(observations)[0] actor_loss = policy_loss - self.entropy_coefficient * policy_entropy diff --git a/vel/rl/policy/dqn.py b/vel/rl/policy/dqn.py index e2460fe7..9ef180ba 100644 --- a/vel/rl/policy/dqn.py +++ b/vel/rl/policy/dqn.py @@ -72,7 +72,6 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: q = self.model(observations) with torch.no_grad(): - self.target_model.eval() target_q = self.target_model(observations_next) if self.double_dqn: diff --git a/vel/rl/policy/purgatory/distributional_dqn.py b/vel/rl/policy/purgatory/distributional_dqn.py deleted file mode 100644 index adbee949..00000000 --- a/vel/rl/policy/purgatory/distributional_dqn.py +++ /dev/null @@ -1,191 +0,0 @@ -import torch -import torch.nn.utils - -from vel.api import ModelFactory -from vel.metric import AveragingNamedMetric -from vel.rl.api import OptimizerAlgoBase - - -class DistributionalDeepQLearning(OptimizerAlgoBase): - """ Deep Q-Learning algorithm """ - - def __init__(self, model_factory: ModelFactory, discount_factor: float, double_dqn: bool, - target_update_frequency: int): - super().__init__(max_grad_norm) - - self.model_factory = model_factory - self.discount_factor = discount_factor - - self.double_dqn = double_dqn - self.target_update_frequency = target_update_frequency - - self.target_model = None - - self.vmin = None - self.vmax = None - self.num_atoms = None - self.support_atoms = None - self.atom_delta = None - - def initialize(self, training_info, model, environment, device): - """ Initialize policy gradient from reinforcer settings """ - self.target_model = self.model_factory.instantiate(action_space=environment.action_space).to(device) - self.target_model.load_state_dict(model.state_dict()) - self.target_model.eval() - - histogram_info = model.histogram_info() - - self.vmin = histogram_info['vmin'] - self.vmax = histogram_info['vmax'] - - self.num_atoms = histogram_info['num_atoms'] - - self.support_atoms = histogram_info['support_atoms'] - self.atom_delta = histogram_info['atom_delta'] - - def calculate_gradient(self, batch_info, device, model, rollout): - """ Calculate loss of the supplied rollout """ - evaluator = model.evaluate(rollout) - batch_size = rollout.frames() - - dones_tensor = evaluator.get('rollout:dones') - rewards_tensor = evaluator.get('rollout:rewards') - - assert dones_tensor.dtype == torch.float32 - - with torch.no_grad(): - target_evaluator = self.target_model.evaluate(rollout) - - if self.double_dqn: - # DOUBLE DQN - # Histogram gets returned as logits initially, we need to exp it before projection - target_value_histogram_for_all_actions = target_evaluator.get('model:q_dist_next').exp() - model_value_histogram_for_all_actions = evaluator.get('model:q_dist_next').exp() - - atoms_aligned = self.support_atoms.view(1, 1, self.num_atoms) - - selected_action_indices = ( - (atoms_aligned * model_value_histogram_for_all_actions).sum(dim=-1).argmax(dim=1) - ) - - # Select largest 'target' value based on action that 'model' selects - next_value_histograms = ( - target_value_histogram_for_all_actions[range(batch_size), selected_action_indices] - ) - else: - # REGULAR DQN - # Histogram gets returned as logits initially, we need to exp it before projection - target_value_histogram_for_all_actions = target_evaluator.get('model:q_dist_next').exp() - - atoms_aligned = self.support_atoms.view(1, 1, self.num_atoms) - - selected_action_indices = ( - (atoms_aligned * target_value_histogram_for_all_actions).sum(dim=-1).argmax(dim=1) - ) - - next_value_histograms = ( - target_value_histogram_for_all_actions[range(batch_size), selected_action_indices] - ) - - # HISTOGRAM PROJECTION CODE - forward_steps = rollout.extra_data.get('forward_steps', 1) - - atoms_projected = ( - rewards_tensor.unsqueeze(1) + - (self.discount_factor ** forward_steps) * - (1 - dones_tensor).unsqueeze(1) * self.support_atoms.unsqueeze(0) - ) - - atoms_projected = atoms_projected.clamp(min=self.vmin, max=self.vmax) - projection_indices = (atoms_projected - self.vmin) / self.atom_delta - - index_floor = projection_indices.floor().long() - index_ceil = projection_indices.ceil().long() - - # Fix corner case when index_floor == index_ceil - index_floor[(index_ceil > 0) * (index_floor == index_ceil)] -= 1 - index_ceil[(index_floor < (self.num_atoms - 1)) * (index_floor == index_ceil)] += 1 - - value_histogram_projected = torch.zeros_like(next_value_histograms) - - # Following part will be a bit convoluted, in an effort to fully vectorize projection operation - - # Special offset index tensor - offsets = ( - torch.arange(0, batch_size * self.num_atoms, self.num_atoms) - .unsqueeze(1) - .expand(batch_size, self.num_atoms) - .contiguous().view(-1).to(device) - ) - - # Linearize all the buffers - value_histogram_projected = value_histogram_projected.view(-1) - index_ceil = index_ceil.view(-1) - index_floor = index_floor.view(-1) - projection_indices = projection_indices.view(-1) - - value_histogram_projected.index_add_( - 0, - index_floor+offsets, - (next_value_histograms.view(-1) * (index_ceil.float() - projection_indices)) - ) - - value_histogram_projected.index_add_( - 0, - index_ceil+offsets, - (next_value_histograms.view(-1) * (projection_indices - index_floor.float())) - ) - - value_histogram_projected = value_histogram_projected.reshape(next_value_histograms.shape) - - q_log_histogram_selected = evaluator.get('model:action:q_dist') - - # Cross-entropy loss as usual - original_losses = -(value_histogram_projected * q_log_histogram_selected).sum(dim=1) - - if evaluator.is_provided('rollout:weights'): - weights = evaluator.get('rollout:weights') - else: - weights = torch.ones_like(rewards_tensor) - - loss_value = torch.mean(weights * original_losses) - loss_value.backward() - - with torch.no_grad(): - mean_q_model = (self.support_atoms.unsqueeze(0) * torch.exp(q_log_histogram_selected)).sum(dim=1).mean() - mean_q_target = (self.support_atoms.unsqueeze(0) * value_histogram_projected).sum(dim=1).mean() - - return { - 'loss': loss_value.item(), - # We need it to update priorities in the replay buffer: - 'errors': original_losses.detach().cpu().numpy(), - 'average_q_selected': mean_q_model.item(), - 'average_q_target': mean_q_target.item() - } - - def post_optimization_step(self, batch_info, device, model, rollout): - """ Steps to take after optimization has been done""" - if batch_info.aggregate_batch_number % self.target_update_frequency == 0: - self.target_model.load_state_dict(model.state_dict()) - self.target_model.eval() - - def metrics(self) -> list: - """ List of metrics to track for this learning process """ - return [ - AveragingNamedMetric("loss"), - AveragingNamedMetric("average_q_selected"), - AveragingNamedMetric("average_q_target"), - AveragingNamedMetric("grad_norm"), - ] - - -def create(model: ModelFactory, discount_factor: float, target_update_frequency: int, - max_grad_norm: float, double_dqn: bool = False): - """ Vel factory function """ - return DistributionalDeepQLearning( - model_factory=model, - discount_factor=discount_factor, - double_dqn=double_dqn, - target_update_frequency=target_update_frequency, - max_grad_norm=max_grad_norm - ) diff --git a/vel/rl/policy/rainbow.py b/vel/rl/policy/rainbow.py new file mode 100644 index 00000000..846f032c --- /dev/null +++ b/vel/rl/policy/rainbow.py @@ -0,0 +1,240 @@ +import gym +import torch +import torch.nn.utils + +from vel.api import ModelFactory, BackboneNetwork, BatchInfo +from vel.metric import AveragingNamedMetric +from vel.rl.api import RlPolicy, Rollout +from vel.rl.module.rainbow_policy import RainbowPolicy + + +class Rainbow(RlPolicy): + """ Deep Q-Learning algorithm """ + + # def __init__(self, model_factory: ModelFactory, discount_factor: float, double_dqn: bool, + + def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space: gym.Space, + discount_factor: float, target_update_frequency: int, + vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): + super().__init__(discount_factor) + + self.model = RainbowPolicy( + net=net, + action_space=action_space, + vmin=vmin, + vmax=vmax, + atoms=atoms, + initial_std_dev=initial_std_dev, + factorized_noise=factorized_noise + ) + + self.target_model = RainbowPolicy( + net=net_factory.instantiate(), + action_space=action_space, + vmin=vmin, + vmax=vmax, + atoms=atoms, + initial_std_dev=initial_std_dev, + factorized_noise=factorized_noise + ) + + self.discount_factor = discount_factor + self.target_update_frequency = target_update_frequency + + self.vmin = vmin + self.vmax = vmax + self.num_atoms = atoms + + # self.support_atoms = self.model.q + # self.atom_delta = histogram_info['atom_delta'] + self.register_buffer('support_atoms', self.model.support_atoms.clone()) + self.atom_delta = self.model.atom_delta + + def reset_weights(self): + """ Initialize properly model weights """ + self.model.reset_weights() + self.target_model.load_state_dict(self.model.state_dict()) + + def forward(self, observation, state=None): + """ Calculate model outputs """ + return self.model(observation) + + def act(self, observation, state=None, deterministic=False): + """ Select actions based on model's output """ + self.train(mode=not deterministic) + + q_values = self.model(observation) + actions = self.model.q_head.sample(q_values) + + return { + 'actions': actions, + 'q': q_values + } + + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ Calculate loss of the supplied rollout """ + batch_size = rollout.frames() + + observations = rollout.batch_tensor('observations') + observations_next = rollout.batch_tensor('observations_next') + + actions = rollout.batch_tensor('actions') + dones_tensor = rollout.batch_tensor('dones') + rewards_tensor = rollout.batch_tensor('rewards') + + assert dones_tensor.dtype == torch.float32 + + q = self.model(observations) + + with torch.no_grad(): + # DOUBLE DQN + # Histogram gets returned as logits initially, we need to exp it before projection + target_value_histogram_for_all_actions = self.target_model(observations_next).exp() + model_value_histogram_for_all_actions = self.model(observations_next).exp() + + atoms_aligned = self.support_atoms.view(1, 1, self.num_atoms) + + selected_action_indices = ( + (atoms_aligned * model_value_histogram_for_all_actions).sum(dim=-1).argmax(dim=1) + ) + + # Select largest 'target' value based on action that 'model' selects + next_value_histograms = ( + target_value_histogram_for_all_actions[range(batch_size), selected_action_indices] + ) + + # HISTOGRAM PROJECTION CODE + forward_steps = rollout.extra_data.get('forward_steps', 1) + + atoms_projected = ( + rewards_tensor.unsqueeze(1) + + (self.discount_factor ** forward_steps) * + (1 - dones_tensor).unsqueeze(1) * self.support_atoms.unsqueeze(0) + ) + + atoms_projected = atoms_projected.clamp(min=self.vmin, max=self.vmax) + projection_indices = (atoms_projected - self.vmin) / self.atom_delta + + index_floor = projection_indices.floor().long() + index_ceil = projection_indices.ceil().long() + + # Fix corner case when index_floor == index_ceil + index_floor[(index_ceil > 0) * (index_floor == index_ceil)] -= 1 + index_ceil[(index_floor < (self.num_atoms - 1)) * (index_floor == index_ceil)] += 1 + + value_histogram_projected = torch.zeros_like(next_value_histograms) + + # Following part will be a bit convoluted, in an effort to fully vectorize projection operation + + # Special offset index tensor + offsets = ( + torch.arange(0, batch_size * self.num_atoms, self.num_atoms) + .unsqueeze(1) + .expand(batch_size, self.num_atoms) + .contiguous().view(-1).to(value_histogram_projected.device) + ) + + # Linearize all the buffers + value_histogram_projected = value_histogram_projected.view(-1) + index_ceil = index_ceil.view(-1) + index_floor = index_floor.view(-1) + projection_indices = projection_indices.view(-1) + + value_histogram_projected.index_add_( + 0, + index_floor+offsets, + (next_value_histograms.view(-1) * (index_ceil.float() - projection_indices)) + ) + + value_histogram_projected.index_add_( + 0, + index_ceil+offsets, + (next_value_histograms.view(-1) * (projection_indices - index_floor.float())) + ) + + value_histogram_projected = value_histogram_projected.reshape(next_value_histograms.shape) + + q_log_histogram_selected = q[range(q.size(0)), actions] + + # Cross-entropy loss as usual + original_losses = -(value_histogram_projected * q_log_histogram_selected).sum(dim=1) + + if rollout.has_tensor('weights'): + weights = rollout.batch_tensor('weights') + else: + weights = torch.ones_like(rewards_tensor) + + loss_value = torch.mean(weights * original_losses) + loss_value.backward() + + with torch.no_grad(): + mean_q_model = (self.support_atoms.unsqueeze(0) * torch.exp(q_log_histogram_selected)).sum(dim=1).mean() + mean_q_target = (self.support_atoms.unsqueeze(0) * value_histogram_projected).sum(dim=1).mean() + + return { + 'loss': loss_value.item(), + # We need it to update priorities in the replay buffer: + 'errors': original_losses.detach().cpu().numpy(), + 'average_q_selected': mean_q_model.item(), + 'average_q_target': mean_q_target.item() + } + + def post_optimization_step(self, batch_info, rollout): + """ Steps to take after optimization has been done""" + if batch_info.aggregate_batch_number % self.target_update_frequency == 0: + self.target_model.load_state_dict(self.model.state_dict()) + + def metrics(self) -> list: + """ List of metrics to track for this learning process """ + return [ + AveragingNamedMetric("loss"), + AveragingNamedMetric("average_q_selected"), + AveragingNamedMetric("average_q_target") + ] + + +class RainbowFactory(ModelFactory): + def __init__(self, net_factory: ModelFactory, discount_factor: float, target_update_frequency: int, + vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): + self.net_factory = net_factory + self.discount_factor = discount_factor + self.target_update_frequency = target_update_frequency + self.vmin = vmin + self.vmax = vmax + self.atoms = atoms + self.initial_std_dev = initial_std_dev + self.factorized_noise = factorized_noise + + def instantiate(self, **extra_args): + """ Instantiate the model """ + action_space = extra_args.pop('action_space') + # TODO(jerry): Push noisy net parameters down the stack here + net = self.net_factory.instantiate(**extra_args) + + return Rainbow( + net=net, + net_factory=self.net_factory, + action_space=action_space, + discount_factor=self.discount_factor, + target_update_frequency=self.target_update_frequency, + vmin=self.vmin, + vmax=self.vmax, + atoms=self.atoms, + initial_std_dev=self.initial_std_dev, + factorized_noise=self.factorized_noise + ) + + +def create(net: ModelFactory, discount_factor: float, target_update_frequency: int, + vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): + """ Vel factory function """ + return RainbowFactory( + net_factory=net, + discount_factor=discount_factor, + target_update_frequency=target_update_frequency, + vmin=vmin, + vmax=vmax, + atoms=atoms, + initial_std_dev=initial_std_dev, + factorized_noise=factorized_noise + ) diff --git a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py index 2a6b42e4..d53225a9 100644 --- a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py @@ -106,13 +106,12 @@ def train_batch(self, batch_info: BatchInfo) -> None: def on_policy_train_batch(self, batch_info: BatchInfo): """ Perform an 'on-policy' training step of evaluating an env and a single backpropagation step """ - self.policy.train() - rollout = self.env_roller.rollout(batch_info, self.settings.number_of_steps).to_device(self.device) # Preprocessing of the rollout for this policy rollout = self.policy.process_rollout(rollout) + self.policy.train() batch_result = self.policy.optimize( batch_info=batch_info, rollout=rollout @@ -124,10 +123,9 @@ def on_policy_train_batch(self, batch_info: BatchInfo): def off_policy_train_batch(self, batch_info: BatchInfo): """ Perform an 'off-policy' training step of sampling the replay buffer and gradient descent """ - self.policy.train() - rollout = self.env_roller.sample(batch_info, self.settings.number_of_steps).to_device(self.device) + self.policy.train() batch_result = self.policy.optimize( batch_info=batch_info, rollout=rollout diff --git a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py index b5baaad5..d55b3bf0 100644 --- a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py @@ -103,8 +103,6 @@ def train_batch(self, batch_info: BatchInfo) -> None: def roll_out_and_store(self, batch_info): """ Roll out environment and store result in the replay buffer """ - self.policy.train() - if self.env_roller.is_ready_for_sampling(): rollout = self.env_roller.rollout(batch_info, self.settings.rollout_steps) rollout = rollout.to_device(self.device) diff --git a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py index 03b53e28..d2d0a50d 100644 --- a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py @@ -96,9 +96,6 @@ def train_batch(self, batch_info: BatchInfo) -> None: 1. Roll out the environmnent using current policy 2. Use that rollout to train the policy """ - # Calculate environment rollout on the evaluation version of the model - self.policy.train() - rollout = self.env_roller.rollout(batch_info, self.settings.number_of_steps) # Preprocessing of the rollout for this algorithm @@ -117,6 +114,8 @@ def train_batch(self, batch_info: BatchInfo) -> None: else: experience_replay_count = self.settings.experience_replay + self.policy.train() + # Repeat the experience N times for i in range(experience_replay_count): # We may potentially need to split rollout into multiple batches diff --git a/vel/rl/util/actor.py b/vel/rl/util/actor.py index 76fa9d94..55b3950b 100644 --- a/vel/rl/util/actor.py +++ b/vel/rl/util/actor.py @@ -13,6 +13,10 @@ def __init__(self, num_envs: int, policy: RlPolicy, device: torch.device): self.device = device self.state = to_device(self.policy.zero_state(num_envs), self.device) + @property + def discount_factor(self) -> float: + return self.policy.discount_factor + def act(self, observation, advance_state=True, deterministic=False): """ Return result of a policy on a given input """ result = self.policy.act(observation, state=self.state, deterministic=deterministic) @@ -39,3 +43,9 @@ def value(self, observation): def is_stateful(self) -> bool: """ If the model has a state that needs to be fed between individual observations """ return self.policy.is_stateful + + def eval(self): + self.policy.eval() + + def train(self): + self.policy.train() diff --git a/vel/rl/xpolicy/purgatory/q_rainbow_model.py b/vel/rl/xpolicy/purgatory/q_rainbow_model.py deleted file mode 100644 index d9b9dfbf..00000000 --- a/vel/rl/xpolicy/purgatory/q_rainbow_model.py +++ /dev/null @@ -1,110 +0,0 @@ -import gym -import typing - -from vel.api import LinearBackboneModel, Model, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Evaluator -from vel.rl.model.q_distributional_model import QDistributionalModelEvaluator -from vel.rl.module.q_distributional_noisy_dueling_head import QDistributionalNoisyDuelingHead - - -class QRainbowModel(Model): - """ - A deterministic greedy action-value model. - Includes following commonly known modifications: - - Distributional Q-Learning - - Dueling architecture - - Noisy Nets - """ - - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space, vmin: float, - vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): - super().__init__() - - self.action_space = action_space - - self.input_block = input_block - self.backbone = backbone - - self.q_head = QDistributionalNoisyDuelingHead( - input_dim=backbone.output_dim, - action_space=action_space, - vmin=vmin, vmax=vmax, atoms=atoms, - initial_std_dev=initial_std_dev, factorized_noise=factorized_noise - ) - - def reset_weights(self): - """ Initialize weights to reasonable defaults """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.q_head.reset_weights() - - def forward(self, observations): - """ Model forward pass """ - input_data = self.input_block(observations) - advantage_features, value_features = self.backbone(input_data) - log_histogram = self.q_head(advantage_features, value_features) - return log_histogram - - def histogram_info(self): - """ Return extra information about histogram """ - return self.q_head.histogram_info() - - def step(self, observations): - """ Sample action from an action space for given state """ - log_histogram = self(observations) - actions = self.q_head.sample(log_histogram) - - return { - 'actions': actions, - 'log_histogram': log_histogram - } - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return QDistributionalModelEvaluator(self, rollout) - - -class QDistributionalModelFactory(ModelFactory): - """ Factory class for q-learning models """ - def __init__(self, input_block: ModelFactory, backbone: ModelFactory, vmin: float, vmax: float, atoms: int, - initial_std_dev: float = 0.4, factorized_noise: bool = True): - self.input_block = input_block - self.backbone = backbone - self.vmin = vmin - self.vmax = vmax - self.atoms = atoms - self.initial_std_dev = initial_std_dev - self.factorized_noise = factorized_noise - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return QRainbowModel( - input_block=input_block, - backbone=backbone, - action_space=extra_args['action_space'], - vmin=self.vmin, - vmax=self.vmax, - atoms=self.atoms, - initial_std_dev=self.initial_std_dev, - factorized_noise=self.factorized_noise - ) - - -def create(backbone: ModelFactory, vmin: float, vmax: float, atoms: int, initial_std_dev: float = 0.4, - factorized_noise: bool = True, input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return QDistributionalModelFactory( - input_block=input_block, backbone=backbone, - vmin=vmin, - vmax=vmax, - atoms=atoms, - initial_std_dev=initial_std_dev, - factorized_noise=factorized_noise - ) From 936b2b92f2c301eebf83ff80646b6d123e8c0a3a Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 09:14:08 -0700 Subject: [PATCH 107/162] A2C yaml works now. --- examples-configs/rl/mujoco/mujoco_a2c.yaml | 28 +++++------ vel/module/input/normalize_observations.py | 17 +------ vel/net/layer/input/image_to_tensor.py | 4 +- vel/net/layer/input/normalize.py | 42 ++++++++++++++++ vel/{rl/layer/purgatory => net/layer}/mlp.py | 53 ++++++++++++-------- vel/net/layer/util/repeat_tensor.py | 4 +- vel/net/modular.py | 7 ++- vel/rl/vecenv/dummy.py | 11 ++-- 8 files changed, 102 insertions(+), 64 deletions(-) create mode 100644 vel/net/layer/input/normalize.py rename vel/{rl/layer/purgatory => net/layer}/mlp.py (51%) diff --git a/examples-configs/rl/mujoco/mujoco_a2c.yaml b/examples-configs/rl/mujoco/mujoco_a2c.yaml index 266f7353..2e33f23b 100644 --- a/examples-configs/rl/mujoco/mujoco_a2c.yaml +++ b/examples-configs/rl/mujoco/mujoco_a2c.yaml @@ -4,33 +4,31 @@ name: 'mujoco_a2c' env: name: vel.rl.env.mujoco game: !param game = 'Reacher-v2' - normalize_returns: true vec_env: name: vel.rl.vecenv.dummy + normalize_returns: true model: - name: vel.rl.algo.a2c + name: vel.rl.policy.a2c entropy_coefficient: 0.0 value_coefficient: 0.5 gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter discount_factor: 0.99 # Discount factor for the rewards - policy: - name: vel.rl.policy.stochastic_policy - - input_block: - name: vel.module.input.normalize_observations - input_shape: 11 - - backbone: - name: vel.rl.backbone.mlp - input_length: 11 - hidden_layers: [64, 64] - activation: 'tanh' + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.normalize + shape: 11 + - name: vel.net.layer.mlp + hidden_layers: [64, 64] + activation: 'tanh' + - name: vel.net.layer.util.repeat_tensor + times: 2 # Need to repeat output twice, for action and value heads reinforcer: @@ -61,5 +59,3 @@ commands: name: vel.rl.command.record_movie_command takes: 10 videoname: 'reacher_vid_{:04}.avi' - sample_args: - argmax_sampling: true diff --git a/vel/module/input/normalize_observations.py b/vel/module/input/normalize_observations.py index d3013238..52dc8de9 100644 --- a/vel/module/input/normalize_observations.py +++ b/vel/module/input/normalize_observations.py @@ -1,10 +1,9 @@ import torch -import numbers -from vel.api import BackboneModel, ModelFactory +from vel.api import Network -class NormalizeObservations(BackboneModel): +class NormalizeObservations(Network): """ Normalize a vector of observations """ def __init__(self, input_shape, epsilon=1e-6): @@ -46,15 +45,3 @@ def forward(self, input_vector): return (input_vector - self.running_mean.unsqueeze(0)) / torch.sqrt(self.running_var.unsqueeze(0)) - -def create(input_shape): - """ Vel factory function """ - if isinstance(input_shape, numbers.Number): - input_shape = (input_shape,) - elif not isinstance(input_shape, tuple): - input_shape = tuple(input_shape) - - def instantiate(**_): - return NormalizeObservations(input_shape) - - return ModelFactory.generic(instantiate) diff --git a/vel/net/layer/input/image_to_tensor.py b/vel/net/layer/input/image_to_tensor.py index cd034320..3019e933 100644 --- a/vel/net/layer/input/image_to_tensor.py +++ b/vel/net/layer/input/image_to_tensor.py @@ -1,8 +1,6 @@ -import typing - from vel.api import SizeHints, SizeHint -from vel.net.modular import LayerFactory, Layer from vel.module.input.image_to_tensor import image_to_tensor +from vel.net.layer_base import LayerFactory, Layer class ImageToTensorLayer(Layer): diff --git a/vel/net/layer/input/normalize.py b/vel/net/layer/input/normalize.py new file mode 100644 index 00000000..e2ac6a03 --- /dev/null +++ b/vel/net/layer/input/normalize.py @@ -0,0 +1,42 @@ +import collections.abc as abc + +from vel.api import SizeHints, SizeHint +from vel.module.input.normalize_observations import NormalizeObservations +from vel.net.layer_base import LayerFactory, Layer + + +class NormalizeLayer(Layer): + def __init__(self, name: str, shape): + super().__init__(name) + if not isinstance(shape, abc.Sequence): + self.shape = (shape,) + else: + self.shape = shape + + self.normalize = NormalizeObservations(input_shape=shape) + + def forward(self, direct, state: dict = None, context: dict = None): + return self.normalize(direct) + + def size_hints(self) -> SizeHints: + return SizeHints(SizeHint(*([None] + list(self.shape)))) + + +class NormalizeLayerFactory(LayerFactory): + def __init__(self, shape=None): + self.shape = shape + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "image_to_tensor" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + """ Create a given layer object """ + # Potential improvement here is to use either direct input or size parameter + return NormalizeLayer(name=name, shape=self.shape) + + +def create(shape=None): + """ Vel factory function """ + return NormalizeLayerFactory(shape=shape) diff --git a/vel/rl/layer/purgatory/mlp.py b/vel/net/layer/mlp.py similarity index 51% rename from vel/rl/layer/purgatory/mlp.py rename to vel/net/layer/mlp.py index 65560553..1be0d57b 100644 --- a/vel/rl/layer/purgatory/mlp.py +++ b/vel/net/layer/mlp.py @@ -11,15 +11,16 @@ import torch.nn.init as init import vel.util.network as net_util +from vel.api import SizeHints, SizeHint -from vel.api import LinearBackboneModel, ModelFactory +from vel.net.layer_base import LayerFactory, Layer -class MLP(LinearBackboneModel): +class MLP(Layer): """ Simple Multi-Layer-Perceptron network """ - def __init__(self, input_length: int, hidden_layers: typing.List[int], activation: str = 'tanh', + def __init__(self, name: str, input_length: int, hidden_layers: typing.List[int], activation: str = 'tanh', normalization: typing.Optional[str] = None): - super().__init__() + super().__init__(name) self.input_length = input_length self.hidden_layers = hidden_layers @@ -40,11 +41,6 @@ def __init__(self, input_length: int, hidden_layers: typing.List[int], activatio self.model = nn.Sequential(*layer_objects) self.hidden_units = hidden_layers[-1] if hidden_layers else input_length - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - return self.hidden_units - def reset_weights(self): for m in self.modules(): if isinstance(m, nn.Linear): @@ -52,19 +48,36 @@ def reset_weights(self): init.orthogonal_(m.weight, gain=np.sqrt(2)) init.constant_(m.bias, 0.0) - def forward(self, input_data): - input_data = input_data.float() - return self.model(input_data) + def forward(self, direct, state: dict = None, context: dict = None): + return self.model(direct.float()) + def size_hints(self) -> SizeHints: + return SizeHints(SizeHint(None, self.hidden_units)) -def create(input_length, hidden_layers, activation='tanh', normalization=None): - """ Vel factory function """ - def instantiate(**_): + +class MLPFactory(LayerFactory): + def __init__(self, hidden_layers: typing.List[int], activation: str = 'tanh', + normalization: typing.Optional[str] = None): + self.hidden_layers = hidden_layers + self.activation = activation + self.normalization = normalization + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "mlp" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + """ Create a given layer object """ return MLP( - input_length=input_length, - hidden_layers=hidden_layers, - activation=activation, - normalization=normalization + name=name, + input_length=direct_input.assert_single().last(), + hidden_layers=self.hidden_layers, + activation=self.activation, + normalization=self.normalization ) - return ModelFactory.generic(instantiate) + +def create(hidden_layers, activation='tanh', normalization=None): + """ Vel factory function """ + return MLPFactory(hidden_layers=hidden_layers, activation=activation, normalization=normalization) diff --git a/vel/net/layer/util/repeat_tensor.py b/vel/net/layer/util/repeat_tensor.py index 58ea5dc1..32ca7ede 100644 --- a/vel/net/layer/util/repeat_tensor.py +++ b/vel/net/layer/util/repeat_tensor.py @@ -1,7 +1,5 @@ -import typing - from vel.api import SizeHints, SizeHint -from vel.net.modular import LayerFactory, Layer +from vel.net.layer_base import LayerFactory, Layer class RepeatTensor(Layer): diff --git a/vel/net/modular.py b/vel/net/modular.py index e3147c3f..774689fd 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -1,10 +1,9 @@ -import typing import collections -import torch.nn as nn -from vel.api import Network, BackboneNetwork, ModelFactory, SizeHints, SizeHint +import torch.nn as nn -from .layer_base import Layer, LayerFactory +from vel.api import BackboneNetwork, ModelFactory, SizeHints +from .layer_base import LayerFactory def instantiate_layers(layers: [LayerFactory]) -> nn.Module: diff --git a/vel/rl/vecenv/dummy.py b/vel/rl/vecenv/dummy.py index b37f6e27..29b405e0 100644 --- a/vel/rl/vecenv/dummy.py +++ b/vel/rl/vecenv/dummy.py @@ -2,6 +2,7 @@ from vel.openai.baselines.common.atari_wrappers import FrameStack from vel.openai.baselines.common.vec_env.dummy_vec_env import DummyVecEnv from vel.openai.baselines.common.vec_env.vec_frame_stack import VecFrameStack +from vel.openai.baselines.common.vec_env.vec_normalize import VecNormalize from vel.rl.api import VecEnvFactory @@ -9,9 +10,10 @@ class DummyVecEnvWrapper(VecEnvFactory): """ Wraps a single-threaded environment into a one-element vector environment """ - def __init__(self, env, frame_history=None): + def __init__(self, env, frame_history=None, normalize_returns=False): self.env = env self.frame_history = frame_history + self.normalize_returns = normalize_returns def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv: """ Create vectorized environments """ @@ -20,6 +22,9 @@ def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv: if self.frame_history is not None: envs = VecFrameStack(envs, self.frame_history) + if self.normalize_returns: + envs = VecNormalize(envs, ob=False, ret=True) + return envs def instantiate_single(self, seed=0, preset='default'): @@ -36,6 +41,6 @@ def _creation_function(self, idx, seed, preset): return lambda: self.env.instantiate(seed=seed, serial_id=idx, preset=preset) -def create(env, frame_history=None): +def create(env, frame_history=None, normalize_returns=False): """ Vel factory function """ - return DummyVecEnvWrapper(env, frame_history=frame_history) + return DummyVecEnvWrapper(env, frame_history=frame_history, normalize_returns=normalize_returns) From c3b8c991193ca2efb9f1571dd0ae5b4543d9fbf3 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 09:29:25 -0700 Subject: [PATCH 108/162] Revived MuJoCo A2C --- vel/net/layer/input/normalize.py | 2 ++ vel/rl/env_roller/step_env_roller.py | 4 ++-- vel/rl/env_roller/trajectory_replay_env_roller.py | 4 ++-- vel/rl/env_roller/transition_replay_env_roller.py | 4 ++-- vel/rl/layer/double_noisy_nature_cnn.py | 9 +++++++-- .../head/q_distributional_noisy_dueling_head.py | 11 ++++++++--- vel/rl/module/noisy_linear.py | 4 ++-- vel/rl/module/rainbow_policy.py | 15 ++------------- vel/rl/policy/acer.py | 7 +++++++ vel/rl/policy/dqn.py | 5 +++++ vel/rl/policy/rainbow.py | 11 +++++++---- .../buffered_mixed_policy_iteration_reinforcer.py | 4 ++-- .../buffered_off_policy_iteration_reinforcer.py | 5 ++--- 13 files changed, 50 insertions(+), 35 deletions(-) diff --git a/vel/net/layer/input/normalize.py b/vel/net/layer/input/normalize.py index e2ac6a03..f8a8dcb5 100644 --- a/vel/net/layer/input/normalize.py +++ b/vel/net/layer/input/normalize.py @@ -6,6 +6,8 @@ class NormalizeLayer(Layer): + """ Layer that normalizes the inputs """ + def __init__(self, name: str, shape): super().__init__(name) if not isinstance(shape, abc.Sequence): diff --git a/vel/rl/env_roller/step_env_roller.py b/vel/rl/env_roller/step_env_roller.py index bb70e1c5..b3b701bf 100644 --- a/vel/rl/env_roller/step_env_roller.py +++ b/vel/rl/env_roller/step_env_roller.py @@ -30,12 +30,12 @@ def environment(self): @torch.no_grad() def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Calculate env rollout """ - self.actor.train() + self.actor.eval() accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information for step_idx in range(number_of_steps): - step = self.actor.act(self.last_observation.to(self.device)) + step = self.actor.act(self.last_observation.to(self.device), deterministic=False) # Add step to the tensor accumulator for name, tensor in step.items(): diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index f294f4e0..259a7497 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -37,12 +37,12 @@ def environment(self): @torch.no_grad() def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Calculate env rollout """ - self.actor.train() + self.actor.eval() accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information for step_idx in range(number_of_steps): - step = self.actor.act(self.last_observation) + step = self.actor.act(self.last_observation, deterministic=False) replay_extra_information = {} diff --git a/vel/rl/env_roller/transition_replay_env_roller.py b/vel/rl/env_roller/transition_replay_env_roller.py index dc25b676..1bf96acc 100644 --- a/vel/rl/env_roller/transition_replay_env_roller.py +++ b/vel/rl/env_roller/transition_replay_env_roller.py @@ -48,13 +48,13 @@ def environment(self): @torch.no_grad() def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: """ Calculate env rollout """ - self.actor.train() + self.actor.eval() accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information for step_idx in range(number_of_steps): - step = self.actor.act(self.last_observation) + step = self.actor.act(self.last_observation, deterministic=False) replay_extra_information = {} diff --git a/vel/rl/layer/double_noisy_nature_cnn.py b/vel/rl/layer/double_noisy_nature_cnn.py index 25299baf..acade064 100644 --- a/vel/rl/layer/double_noisy_nature_cnn.py +++ b/vel/rl/layer/double_noisy_nature_cnn.py @@ -98,14 +98,19 @@ def reset_weights(self): m.reset_weights() def forward(self, image, state: dict = None, context: dict = None): + if context is not None: + deterministic = context.get('deterministic', False) + else: + deterministic = False + result = image result = F.relu(self.conv1(result)) result = F.relu(self.conv2(result)) result = F.relu(self.conv3(result)) flattened = result.view(result.size(0), -1) - output_one = F.relu(self.linear_layer_one(flattened)) - output_two = F.relu(self.linear_layer_two(flattened)) + output_one = F.relu(self.linear_layer_one(flattened, deterministic=deterministic)) + output_two = F.relu(self.linear_layer_two(flattened, deterministic=deterministic)) return output_one, output_two diff --git a/vel/rl/module/head/q_distributional_noisy_dueling_head.py b/vel/rl/module/head/q_distributional_noisy_dueling_head.py index 59a22ac8..8cc39aa9 100644 --- a/vel/rl/module/head/q_distributional_noisy_dueling_head.py +++ b/vel/rl/module/head/q_distributional_noisy_dueling_head.py @@ -52,9 +52,14 @@ def reset_weights(self): self.linear_layer_advantage.reset_weights() self.linear_layer_value.reset_weights() - def forward(self, advantage_features, value_features): - adv = self.linear_layer_advantage(advantage_features).view(-1, self.action_size, self.atoms) - val = self.linear_layer_value(value_features).view(-1, 1, self.atoms) + def forward(self, advantage_features, value_features, deterministic=False): + adv = self.linear_layer_advantage( + advantage_features, deterministic=deterministic + ).view(-1, self.action_size, self.atoms) + + val = self.linear_layer_value( + value_features, deterministic=deterministic + ).view(-1, 1, self.atoms) # I'm quite unsure if this is the right way to combine these, but this is what paper seems to be suggesting # and I don't know any better way. diff --git a/vel/rl/module/noisy_linear.py b/vel/rl/module/noisy_linear.py index 2b94c90c..9a56fa3a 100644 --- a/vel/rl/module/noisy_linear.py +++ b/vel/rl/module/noisy_linear.py @@ -54,8 +54,8 @@ def reset_weights(self): self.weight_sigma.data.fill_(self.initial_std_dev / math.sqrt(self.in_features)) self.bias_sigma.data.fill_(self.initial_std_dev / math.sqrt(self.out_features)) - def forward(self, input_data): - if self.training: + def forward(self, input_data, deterministic=False): + if not deterministic: if self.factorized_noise: weight_epsilon, bias_epsilon = factorized_gaussian_noise( self.in_features, self.out_features, device=input_data.device diff --git a/vel/rl/module/rainbow_policy.py b/vel/rl/module/rainbow_policy.py index a61ef126..8e709758 100644 --- a/vel/rl/module/rainbow_policy.py +++ b/vel/rl/module/rainbow_policy.py @@ -45,23 +45,12 @@ def reset_weights(self): self.net.reset_weights() self.q_head.reset_weights() - def forward(self, observations): + def forward(self, observations, deterministic=False): """ Model forward pass """ - advantage_features, value_features = self.net(observations) + advantage_features, value_features = self.net(observations, context={'deterministic': deterministic}) log_histogram = self.q_head(advantage_features, value_features) return log_histogram def histogram_info(self): """ Return extra information about histogram """ return self.q_head.histogram_info() - - # def step(self, observations): - # """ Sample action from an action space for given state """ - # log_histogram = self(observations) - # actions = self.q_head.sample(log_histogram) - # - # return { - # 'actions': actions, - # 'log_histogram': log_histogram - # } - diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index 049c6842..f85136a7 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -41,6 +41,13 @@ def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space else: self.target_policy = None + def train(self, mode=True): + """ Override train to make sure target model is always in eval mode """ + self.policy.train(mode) + + if self.trust_region: + self.target_policy.train(False) + def reset_weights(self): """ Initialize properly model weights """ self.policy.reset_weights() diff --git a/vel/rl/policy/dqn.py b/vel/rl/policy/dqn.py index 9ef180ba..b1b4ac16 100644 --- a/vel/rl/policy/dqn.py +++ b/vel/rl/policy/dqn.py @@ -38,6 +38,11 @@ def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space self.target_model = QPolicy(net=net_factory.instantiate(), action_space=action_space, dueling_dqn=dueling_dqn) + def train(self, mode=True): + """ Override train to make sure target model is always in eval mode """ + self.model.train(mode) + self.target_model.train(False) + def reset_weights(self): """ Initialize properly model weights """ self.model.reset_weights() diff --git a/vel/rl/policy/rainbow.py b/vel/rl/policy/rainbow.py index 846f032c..57b34fbf 100644 --- a/vel/rl/policy/rainbow.py +++ b/vel/rl/policy/rainbow.py @@ -11,11 +11,9 @@ class Rainbow(RlPolicy): """ Deep Q-Learning algorithm """ - # def __init__(self, model_factory: ModelFactory, discount_factor: float, double_dqn: bool, - def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space: gym.Space, - discount_factor: float, target_update_frequency: int, - vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): + discount_factor: float, target_update_frequency: int, vmin: float, vmax: float, atoms: int = 1, + initial_std_dev: float = 0.4, factorized_noise: bool = True): super().__init__(discount_factor) self.model = RainbowPolicy( @@ -50,6 +48,11 @@ def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space self.register_buffer('support_atoms', self.model.support_atoms.clone()) self.atom_delta = self.model.atom_delta + def train(self, mode=True): + """ Override train to make sure target model is always in eval mode """ + self.model.train(mode) + self.target_model.train(False) + def reset_weights(self): """ Initialize properly model weights """ self.model.reset_weights() diff --git a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py index d53225a9..f48183be 100644 --- a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py @@ -1,10 +1,10 @@ +import sys import attr import numpy as np -import sys import torch import tqdm -from vel.api import TrainingInfo, EpochInfo, BatchInfo, Model, ModelFactory +from vel.api import TrainingInfo, EpochInfo, BatchInfo, ModelFactory from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( Reinforcer, ReinforcerFactory, VecEnvFactory, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, diff --git a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py index d55b3bf0..c42751dc 100644 --- a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py @@ -1,8 +1,7 @@ -import attr import sys -import tqdm - +import attr import torch +import tqdm from vel.api import TrainingInfo, EpochInfo, BatchInfo, Model, ModelFactory from vel.openai.baselines.common.vec_env import VecEnv From 0ce852fed3c47fec61de43501d33a16960c1b01e Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 10:47:57 -0700 Subject: [PATCH 109/162] Revied MuJoCo PPO. --- examples-configs/rl/mujoco/mujoco_ppo.yaml | 37 +++++++-------- vel/api/size_hint.py | 4 ++ vel/net/layer/arch/__init__.py | 0 vel/net/layer/arch/parallel.py | 55 ++++++++++++++++++++++ 4 files changed, 76 insertions(+), 20 deletions(-) create mode 100644 vel/net/layer/arch/__init__.py create mode 100644 vel/net/layer/arch/parallel.py diff --git a/examples-configs/rl/mujoco/mujoco_ppo.yaml b/examples-configs/rl/mujoco/mujoco_ppo.yaml index a1cc2113..780cc882 100644 --- a/examples-configs/rl/mujoco/mujoco_ppo.yaml +++ b/examples-configs/rl/mujoco/mujoco_ppo.yaml @@ -4,15 +4,15 @@ name: 'mujoco_ppo' env: name: vel.rl.env.mujoco game: !param game = 'Reacher-v2' - normalize_returns: true vec_env: name: vel.rl.vecenv.dummy + normalize_returns: true model: - name: vel.rl.algo.ppo + name: vel.rl.policy.ppo entropy_coefficient: 0.0 value_coefficient: 0.5 @@ -22,24 +22,21 @@ model: discount_factor: 0.99 # Discount factor for the rewards gae_lambda: 0.95 # Generalized Advantage Estimator Lambda parameter - policy: - name: vel.rl.policy.stochastic_policy_separate - - input_block: - name: vel.module.input.normalize_observations - input_shape: 11 - - policy_backbone: - name: vel.rl.backbone.mlp - input_length: 11 - hidden_layers: [64, 64] - activation: 'tanh' - - value_backbone: - name: vel.rl.backbone.mlp - input_length: 11 - hidden_layers: [64, 64] - activation: 'tanh' + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.normalize + shape: 11 + - name: vel.net.layer.util.repeat_tensor + times: 2 # Need to repeat output twice, to consume by the 'parallel' layers + - name: vel.net.layer.arch.parallel + layers: + - name: vel.net.layer.mlp + hidden_layers: [64, 64] + activation: 'tanh' + - name: vel.net.layer.mlp + hidden_layers: [64, 64] + activation: 'tanh' reinforcer: diff --git a/vel/api/size_hint.py b/vel/api/size_hint.py index d6a3879b..063810ac 100644 --- a/vel/api/size_hint.py +++ b/vel/api/size_hint.py @@ -68,5 +68,9 @@ def assert_single(self, length: typing.Optional[int] = None) -> SizeHint: return self.size_hints + def unwrap(self): + """ Return the underlying data """ + return self.size_hints + def __repr__(self): return repr(self.size_hints) diff --git a/vel/net/layer/arch/__init__.py b/vel/net/layer/arch/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/net/layer/arch/parallel.py b/vel/net/layer/arch/parallel.py new file mode 100644 index 00000000..de592f2a --- /dev/null +++ b/vel/net/layer/arch/parallel.py @@ -0,0 +1,55 @@ +import torch.nn as nn + +from vel.api import SizeHints +from vel.net.layer_base import LayerFactory, Layer + + +class ParallelLayer(Layer): + """ Network that consists of parallel "towers" """ + + def __init__(self, name: str, layers: [Layer]): + super().__init__(name) + + self.layers = nn.ModuleList(layers) + self._size_hints = SizeHints(tuple(layer.size_hints().unwrap() for layer in self.layers)) + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return self._size_hints + + def forward(self, direct, state: dict = None, context: dict = None): + """ Forward propagation of a single layer """ + results = [layer(x, state, context) for layer, x in zip(self.layers, direct)] + return tuple(results) + + +class ParallelLayerFactory(LayerFactory): + """ Factory for Parallel layer """ + + def __init__(self, layers: [LayerFactory]): + self.layers = layers + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "parallel" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + hints = direct_input.assert_tuple(len(self.layers)) + + layers = [] + + for idx, (size_hint, layer_factory) in enumerate(zip(hints, self.layers)): + counter = idx + 1 + local_name = "{}_{:04d}".format(layer_factory.name_base, counter) + global_name = f"{name}/{local_name}" + + layer = layer_factory.instantiate(name=global_name, direct_input=SizeHints(size_hint), context=context) + layers.append(layer) + + return ParallelLayer(name, layers) + + +def create(layers: [LayerFactory]): + """ Vel factory function """ + return ParallelLayerFactory(layers=layers) From 3c47758ca36434ba8f8c5fa82b502ca751cf742f Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 11:58:10 -0700 Subject: [PATCH 110/162] Sizing network input according to the environment size. --- examples-configs/rl/atari/atari_a2c.yaml | 1 - .../rl/atari/atari_a2c_tf_rmsprop.yaml | 1 - examples-configs/rl/atari/atari_acer.yaml | 1 - examples-configs/rl/atari/atari_ddqn.yaml | 1 - examples-configs/rl/atari/atari_dqn.yaml | 1 - examples-configs/rl/atari/atari_ppo.yaml | 1 - examples-configs/rl/atari/atari_rainbow.yaml | 3 +- examples-configs/rl/atari/atari_trpo.yaml | 1 - examples-configs/rl/mujoco/mujoco_a2c.yaml | 1 - examples-configs/rl/mujoco/mujoco_ppo.yaml | 1 - examples-configs/rl/mujoco/mujoco_trpo.yaml | 37 +++++---- vel/api/size_hint.py | 4 + vel/net/layer/input/image_to_tensor.py | 24 +++--- vel/net/layer/input/normalize.py | 7 +- vel/net/modular.py | 10 ++- vel/rl/env_roller/step_env_roller.py | 2 +- .../trajectory_replay_env_roller.py | 2 +- .../transition_replay_env_roller.py | 2 +- vel/rl/layer/nature_cnn.py | 3 +- vel/rl/policy/a2c.py | 13 +++- vel/rl/policy/acer.py | 19 ++++- vel/rl/policy/dqn.py | 16 ++-- vel/rl/policy/ppo.py | 13 +++- vel/rl/policy/rainbow.py | 14 +++- vel/rl/policy/trpo.py | 78 +++++++++++++------ ...fered_mixed_policy_iteration_reinforcer.py | 2 +- ...uffered_off_policy_iteration_reinforcer.py | 2 +- .../on_policy_iteration_reinforcer.py | 2 +- vel/util/situational.py | 17 ++++ vel/util/tensor_accumulator.py | 16 ---- vel/util/tensor_util.py | 14 ++++ 31 files changed, 196 insertions(+), 113 deletions(-) delete mode 100644 vel/util/tensor_accumulator.py diff --git a/examples-configs/rl/atari/atari_a2c.yaml b/examples-configs/rl/atari/atari_a2c.yaml index cbe9dc46..fa5fdcab 100644 --- a/examples-configs/rl/atari/atari_a2c.yaml +++ b/examples-configs/rl/atari/atari_a2c.yaml @@ -22,7 +22,6 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.image_to_tensor - size: [84, 84, 4] # Number of channels is frame history - name: vel.rl.layer.nature_cnn - name: vel.net.layer.util.repeat_tensor times: 2 # Need to repeat output twice, for action and value heads diff --git a/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml index 13de0fef..fc398e5a 100644 --- a/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml +++ b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml @@ -22,7 +22,6 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.image_to_tensor - size: [84, 84, 4] # Number of channels is frame history - name: vel.rl.layer.nature_cnn - name: vel.net.layer.util.repeat_tensor times: 2 # Need to repeat output twice, for action and value heads diff --git a/examples-configs/rl/atari/atari_acer.yaml b/examples-configs/rl/atari/atari_acer.yaml index 256b09bc..e0bff147 100644 --- a/examples-configs/rl/atari/atari_acer.yaml +++ b/examples-configs/rl/atari/atari_acer.yaml @@ -29,7 +29,6 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.image_to_tensor - size: [84, 84, 4] # Number of channels is frame history - name: vel.rl.layer.nature_cnn - name: vel.net.layer.util.repeat_tensor times: 2 # Need to repeat output twice, for action and value heads diff --git a/examples-configs/rl/atari/atari_ddqn.yaml b/examples-configs/rl/atari/atari_ddqn.yaml index d4b7430b..9decd793 100644 --- a/examples-configs/rl/atari/atari_ddqn.yaml +++ b/examples-configs/rl/atari/atari_ddqn.yaml @@ -30,7 +30,6 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.image_to_tensor - size: [84, 84, 4] # Number of channels is frame history - name: vel.rl.layer.double_nature_cnn diff --git a/examples-configs/rl/atari/atari_dqn.yaml b/examples-configs/rl/atari/atari_dqn.yaml index 9851ecba..c2fd5cde 100644 --- a/examples-configs/rl/atari/atari_dqn.yaml +++ b/examples-configs/rl/atari/atari_dqn.yaml @@ -27,7 +27,6 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.image_to_tensor - size: [84, 84, 4] # Number of channels is frame history - name: vel.rl.layer.nature_cnn diff --git a/examples-configs/rl/atari/atari_ppo.yaml b/examples-configs/rl/atari/atari_ppo.yaml index 12d043e0..882926a9 100644 --- a/examples-configs/rl/atari/atari_ppo.yaml +++ b/examples-configs/rl/atari/atari_ppo.yaml @@ -29,7 +29,6 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.image_to_tensor - size: [84, 84, 4] # Number of channels is frame history - name: vel.rl.layer.nature_cnn - name: vel.net.layer.util.repeat_tensor times: 2 # Need to repeat output twice, for action and value heads diff --git a/examples-configs/rl/atari/atari_rainbow.yaml b/examples-configs/rl/atari/atari_rainbow.yaml index a11c3afc..78525aea 100644 --- a/examples-configs/rl/atari/atari_rainbow.yaml +++ b/examples-configs/rl/atari/atari_rainbow.yaml @@ -31,9 +31,8 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.image_to_tensor - size: [84, 84, 4] # Number of channels is frame history - name: vel.rl.layer.double_noisy_nature_cnn - # TODO(this should ideally be brough from level up) + # TODO(this should ideally be brought from level up) initial_std_dev: 0.5 factorized_noise: true diff --git a/examples-configs/rl/atari/atari_trpo.yaml b/examples-configs/rl/atari/atari_trpo.yaml index 56af94db..adc7850a 100644 --- a/examples-configs/rl/atari/atari_trpo.yaml +++ b/examples-configs/rl/atari/atari_trpo.yaml @@ -29,7 +29,6 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.image_to_tensor - size: [84, 84, 4] # Number of channels is frame history - name: vel.rl.layer.nature_cnn_small value_net: diff --git a/examples-configs/rl/mujoco/mujoco_a2c.yaml b/examples-configs/rl/mujoco/mujoco_a2c.yaml index 2e33f23b..5c49c725 100644 --- a/examples-configs/rl/mujoco/mujoco_a2c.yaml +++ b/examples-configs/rl/mujoco/mujoco_a2c.yaml @@ -23,7 +23,6 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.normalize - shape: 11 - name: vel.net.layer.mlp hidden_layers: [64, 64] activation: 'tanh' diff --git a/examples-configs/rl/mujoco/mujoco_ppo.yaml b/examples-configs/rl/mujoco/mujoco_ppo.yaml index 780cc882..df6db42c 100644 --- a/examples-configs/rl/mujoco/mujoco_ppo.yaml +++ b/examples-configs/rl/mujoco/mujoco_ppo.yaml @@ -26,7 +26,6 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.normalize - shape: 11 - name: vel.net.layer.util.repeat_tensor times: 2 # Need to repeat output twice, to consume by the 'parallel' layers - name: vel.net.layer.arch.parallel diff --git a/examples-configs/rl/mujoco/mujoco_trpo.yaml b/examples-configs/rl/mujoco/mujoco_trpo.yaml index 47356e0d..743877f1 100644 --- a/examples-configs/rl/mujoco/mujoco_trpo.yaml +++ b/examples-configs/rl/mujoco/mujoco_trpo.yaml @@ -3,15 +3,15 @@ name: 'mujoco_trpo' env: name: vel.rl.env.mujoco game: !param game = 'Reacher-v2' - normalize_returns: true vec_env: name: vel.rl.vecenv.dummy + normalize_returns: true model: - name: vel.rl.algo.trpo + name: vel.rl.policy.trpo discount_factor: 0.99 # Discount factor for the rewards gae_lambda: 0.98 # Generalized Advantage Estimator Lambda parameter @@ -24,21 +24,24 @@ model: vf_iters: 5 entropy_coefficient: 0.0 - input_block: - name: vel.module.input.normalize_observations - input_shape: 11 - - policy_backbone: - name: vel.rl.backbone.mlp - input_length: 11 - hidden_layers: [32, 32] - activation: 'tanh' - - value_backbone: - name: vel.rl.backbone.mlp - input_length: 11 - hidden_layers: [32, 32] - activation: 'tanh' + input_net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.normalize + + policy_net: + name: vel.net.modular + layers: + - name: vel.net.layer.mlp + hidden_layers: [32, 32] + activation: 'tanh' + + value_net: + name: vel.net.modular + layers: + - name: vel.net.layer.mlp + hidden_layers: [32, 32] + activation: 'tanh' reinforcer: diff --git a/vel/api/size_hint.py b/vel/api/size_hint.py index 063810ac..78851126 100644 --- a/vel/api/size_hint.py +++ b/vel/api/size_hint.py @@ -15,6 +15,10 @@ def last(self) -> int: assert self[-1] is not None, "Size hint shouldn't be None" return self[-1] + def shape(self, idx=1) -> typing.Tuple[int]: + """ Get shape of size hint, except for a number of dimensions (batch dimensions """ + return self[idx:] + def __repr__(self): internal = ", ".join([self._inner_repr(s) for s in self]) return f"{self.__class__.__name__}({internal})" diff --git a/vel/net/layer/input/image_to_tensor.py b/vel/net/layer/input/image_to_tensor.py index 3019e933..4924c33f 100644 --- a/vel/net/layer/input/image_to_tensor.py +++ b/vel/net/layer/input/image_to_tensor.py @@ -9,12 +9,12 @@ class ImageToTensorLayer(Layer): Flip channels to a [C, W, H] order and potentially convert 8-bit color values to floats """ - def __init__(self, name: str, size: tuple = None): + def __init__(self, name: str, shape: tuple = None): super().__init__(name) - if size is not None: - assert len(size) == 3, "Images must have three dimensions" - self.w, self.h, self.c = size + if shape is not None: + assert len(shape) == 3, "Images must have three dimensions" + self.w, self.h, self.c = shape else: self.w, self.h, self.c = (None, None, None) @@ -26,8 +26,8 @@ def size_hints(self) -> SizeHints: class ImageToTensorLayerFactory(LayerFactory): - def __init__(self, size: tuple = None): - self.size = size + def __init__(self, shape: tuple = None): + self.shape = shape @property def name_base(self) -> str: @@ -36,10 +36,14 @@ def name_base(self) -> str: def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: """ Create a given layer object """ - # Potential improvement here is to use either direct input or size parameter - return ImageToTensorLayer(name=name, size=self.size) + if self.shape is None: + shape = direct_input.assert_single().shape() + else: + shape = self.shape + + return ImageToTensorLayer(name=name, shape=shape) -def create(size: tuple = None): +def create(shape: tuple = None): """ Vel factory function """ - return ImageToTensorLayerFactory(size=size) + return ImageToTensorLayerFactory(shape=shape) diff --git a/vel/net/layer/input/normalize.py b/vel/net/layer/input/normalize.py index f8a8dcb5..8da64fa7 100644 --- a/vel/net/layer/input/normalize.py +++ b/vel/net/layer/input/normalize.py @@ -36,7 +36,12 @@ def name_base(self) -> str: def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: """ Create a given layer object """ # Potential improvement here is to use either direct input or size parameter - return NormalizeLayer(name=name, shape=self.shape) + if self.shape is None: + shape = direct_input.assert_single().shape() + else: + shape = self.shape + + return NormalizeLayer(name=name, shape=shape) def create(shape=None): diff --git a/vel/net/modular.py b/vel/net/modular.py index 774689fd..9416992a 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -6,9 +6,8 @@ from .layer_base import LayerFactory -def instantiate_layers(layers: [LayerFactory]) -> nn.Module: +def instantiate_layers(layers: [LayerFactory], size_hint: SizeHints) -> nn.Module: """ Instantiate list of layer factories into PyTorch Module """ - size_hint = SizeHints() # Empty input at first module_dict = collections.OrderedDict() context = {} @@ -96,9 +95,12 @@ class ModularNetworkFactory(ModelFactory): def __init__(self, layers: [LayerFactory]): self.layers = layers - def instantiate(self, **extra_args) -> BackboneNetwork: + def instantiate(self, size_hint=None, **extra_args) -> BackboneNetwork: """ Create either stateful or not modular network instance """ - layers = instantiate_layers(self.layers) + if size_hint is None: + size_hint = SizeHints() + + layers = instantiate_layers(self.layers, size_hint=size_hint) is_stateful = any(l.is_stateful for l in layers) if is_stateful: diff --git a/vel/rl/env_roller/step_env_roller.py b/vel/rl/env_roller/step_env_roller.py index b3b701bf..1a6c22a5 100644 --- a/vel/rl/env_roller/step_env_roller.py +++ b/vel/rl/env_roller/step_env_roller.py @@ -5,7 +5,7 @@ from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import Trajectories, Rollout, EnvRollerBase, EnvRollerFactoryBase, RlPolicy from vel.rl.util.actor import PolicyActor -from vel.util.tensor_accumulator import TensorAccumulator +from vel.util.tensor_util import TensorAccumulator class StepEnvRoller(EnvRollerBase): diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index 259a7497..b2bd9092 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -7,7 +7,7 @@ Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, RlPolicy ) from vel.rl.util.actor import PolicyActor -from vel.util.tensor_accumulator import TensorAccumulator +from vel.util.tensor_util import TensorAccumulator class TrajectoryReplayEnvRoller(ReplayEnvRollerBase): diff --git a/vel/rl/env_roller/transition_replay_env_roller.py b/vel/rl/env_roller/transition_replay_env_roller.py index 1bf96acc..64e48f02 100644 --- a/vel/rl/env_roller/transition_replay_env_roller.py +++ b/vel/rl/env_roller/transition_replay_env_roller.py @@ -9,7 +9,7 @@ Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, RlPolicy ) from vel.rl.util.actor import PolicyActor -from vel.util.tensor_accumulator import TensorAccumulator +from vel.util.tensor_util import TensorAccumulator class TransitionReplayEnvRoller(ReplayEnvRollerBase): diff --git a/vel/rl/layer/nature_cnn.py b/vel/rl/layer/nature_cnn.py index 16cfed1b..f2503a62 100644 --- a/vel/rl/layer/nature_cnn.py +++ b/vel/rl/layer/nature_cnn.py @@ -13,7 +13,7 @@ import vel.util.network as net_util from vel.api import SizeHint, SizeHints -from vel.net.modular import Layer, LayerFactory +from vel.net.layer_base import Layer, LayerFactory class NatureCnn(Layer): @@ -109,4 +109,3 @@ def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Laye def create(output_dim=512): """ Vel factory function """ return NatureCnnFactory(output_dim=output_dim) - diff --git a/vel/rl/policy/a2c.py b/vel/rl/policy/a2c.py index cd03eead..7b23d33d 100644 --- a/vel/rl/policy/a2c.py +++ b/vel/rl/policy/a2c.py @@ -3,6 +3,7 @@ import torch.nn.functional as F from vel.metric.base import AveragingNamedMetric +from vel.util.situational import observation_space_to_size_hint from vel.util.stats import explained_variance from vel.api import ModelFactory, BatchInfo, BackboneNetwork @@ -113,8 +114,8 @@ def metrics(self) -> list: class A2CFactory(ModelFactory): """ Factory class for policy gradient models """ - def __init__(self, net, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): - self.net = net + def __init__(self, net_factory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): + self.net_factory = net_factory self.entropy_coefficient = entropy_coefficient self.value_coefficient = value_coefficient self.discount_factor = discount_factor @@ -123,7 +124,11 @@ def __init__(self, net, entropy_coefficient, value_coefficient, discount_factor, def instantiate(self, **extra_args): """ Instantiate the model """ action_space = extra_args.pop('action_space') - net = self.net.instantiate(**extra_args) + observation_space = extra_args.pop('observation_space') + + size_hint = observation_space_to_size_hint(observation_space) + + net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) return A2C( net=net, @@ -138,7 +143,7 @@ def instantiate(self, **extra_args): def create(net: ModelFactory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): """ Vel factory function """ return A2CFactory( - net=net, + net_factory=net, entropy_coefficient=entropy_coefficient, value_coefficient=value_coefficient, discount_factor=discount_factor, diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index f85136a7..b468eb30 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -1,3 +1,4 @@ +import typing import gym import torch import torch.nn.functional as F @@ -6,6 +7,7 @@ from vel.metric.base import AveragingNamedMetric from vel.rl.api import Trajectories, RlPolicy, Rollout from vel.rl.module.q_stochastic_policy import QStochasticPolicy +from vel.util.situational import observation_space_to_size_hint def select_indices(tensor, indices): @@ -16,7 +18,7 @@ def select_indices(tensor, indices): class ACER(RlPolicy): """ Actor-Critic with Experience Replay - policy gradient calculations """ - def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space: gym.Space, + def __init__(self, net: BackboneNetwork, target_net: typing.Optional[BackboneNetwork], action_space: gym.Space, discount_factor: float, trust_region: bool = True, entropy_coefficient: float = 0.01, q_coefficient: float = 0.5, rho_cap: float = 10.0, retrace_rho_cap: float = 1.0, average_model_alpha: float = 0.99, trust_region_delta: float = 1.0): @@ -37,7 +39,7 @@ def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space self.policy = QStochasticPolicy(net, action_space) if self.trust_region: - self.target_policy = QStochasticPolicy(net_factory.instantiate(), action_space) + self.target_policy = QStochasticPolicy(target_net, action_space) else: self.target_policy = None @@ -257,11 +259,20 @@ def __init__(self, net_factory, trust_region: bool, entropy_coefficient: float, def instantiate(self, **extra_args): """ Instantiate the model """ action_space = extra_args.pop('action_space') - net = self.net_factory.instantiate(**extra_args) + observation_space = extra_args.pop('observation_space') + + size_hint = observation_space_to_size_hint(observation_space) + + net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) + + if self.trust_region: + target_net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) + else: + target_net = None return ACER( net=net, - net_factory=self.net_factory, + target_net=target_net, action_space=action_space, trust_region=self.trust_region, entropy_coefficient=self.entropy_coefficient, diff --git a/vel/rl/policy/dqn.py b/vel/rl/policy/dqn.py index b1b4ac16..cb563e5d 100644 --- a/vel/rl/policy/dqn.py +++ b/vel/rl/policy/dqn.py @@ -12,17 +12,19 @@ from vel.rl.api import RlPolicy, Rollout from vel.rl.module.q_policy import QPolicy from vel.rl.module.noise.eps_greedy import EpsGreedy +from vel.util.situational import observation_space_to_size_hint class DQN(RlPolicy): """ Deep Q-Learning algorithm """ - def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space: gym.Space, + def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_space: gym.Space, epsilon: typing.Union[float, Schedule], discount_factor: float, double_dqn: bool, dueling_dqn: bool, target_update_frequency: int): super().__init__(discount_factor) self.model = QPolicy(net=net, action_space=action_space, dueling_dqn=dueling_dqn) + self.target_model = QPolicy(net=target_net, action_space=action_space, dueling_dqn=dueling_dqn) self.double_dqn = double_dqn self.target_update_frequency = target_update_frequency @@ -33,11 +35,8 @@ def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space self.epsilon_schedule = epsilon self.epsilon_value = self.epsilon_schedule.value(0.0) - self.action_noise = EpsGreedy(action_space=action_space) - self.target_model = QPolicy(net=net_factory.instantiate(), action_space=action_space, dueling_dqn=dueling_dqn) - def train(self, mode=True): """ Override train to make sure target model is always in eval mode """ self.model.train(mode) @@ -141,11 +140,16 @@ def __init__(self, net_factory: ModelFactory, epsilon: typing.Union[float, Sched def instantiate(self, **extra_args): """ Instantiate the model """ action_space = extra_args.pop('action_space') - net = self.net_factory.instantiate(**extra_args) + observation_space = extra_args.pop('observation_space') + + size_hint = observation_space_to_size_hint(observation_space) + + net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) + target_net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) return DQN( net=net, - net_factory=self.net_factory, + target_net=target_net, action_space=action_space, epsilon=self.epsilon, discount_factor=self.discount_factor, diff --git a/vel/rl/policy/ppo.py b/vel/rl/policy/ppo.py index 6230020d..ea09a29c 100644 --- a/vel/rl/policy/ppo.py +++ b/vel/rl/policy/ppo.py @@ -4,6 +4,7 @@ import numbers from vel.api import BatchInfo, ModelFactory, BackboneNetwork +from vel.util.situational import observation_space_to_size_hint from vel.util.stats import explained_variance from vel.function.constant import ConstantSchedule from vel.metric.base import AveragingNamedMetric @@ -153,9 +154,9 @@ def metrics(self) -> list: class PPOFactory(ModelFactory): """ Factory class for policy gradient models """ - def __init__(self, net, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, + def __init__(self, net_factory, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): - self.net = net + self.net_factory = net_factory self.entropy_coefficient = entropy_coefficient self.value_coefficient = value_coefficient self.cliprange = cliprange @@ -166,7 +167,11 @@ def __init__(self, net, entropy_coefficient, value_coefficient, cliprange, disco def instantiate(self, **extra_args): """ Instantiate the model """ action_space = extra_args.pop('action_space') - net = self.net.instantiate(**extra_args) + observation_space = extra_args.pop('observation_space') + + size_hint = observation_space_to_size_hint(observation_space) + + net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) return PPO( net=net, @@ -184,7 +189,7 @@ def create(net: ModelFactory, entropy_coefficient, value_coefficient, cliprange, normalize_advantage: bool = True, gae_lambda: float = 1.0): """ Vel factory function """ return PPOFactory( - net=net, + net_factory=net, entropy_coefficient=entropy_coefficient, value_coefficient=value_coefficient, cliprange=cliprange, diff --git a/vel/rl/policy/rainbow.py b/vel/rl/policy/rainbow.py index 57b34fbf..c28c3b29 100644 --- a/vel/rl/policy/rainbow.py +++ b/vel/rl/policy/rainbow.py @@ -6,12 +6,13 @@ from vel.metric import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout from vel.rl.module.rainbow_policy import RainbowPolicy +from vel.util.situational import observation_space_to_size_hint class Rainbow(RlPolicy): """ Deep Q-Learning algorithm """ - def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space: gym.Space, + def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_space: gym.Space, discount_factor: float, target_update_frequency: int, vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): super().__init__(discount_factor) @@ -27,7 +28,7 @@ def __init__(self, net: BackboneNetwork, net_factory: ModelFactory, action_space ) self.target_model = RainbowPolicy( - net=net_factory.instantiate(), + net=target_net, action_space=action_space, vmin=vmin, vmax=vmax, @@ -211,12 +212,17 @@ def __init__(self, net_factory: ModelFactory, discount_factor: float, target_upd def instantiate(self, **extra_args): """ Instantiate the model """ action_space = extra_args.pop('action_space') + observation_space = extra_args.pop('observation_space') + + size_hint = observation_space_to_size_hint(observation_space) + # TODO(jerry): Push noisy net parameters down the stack here - net = self.net_factory.instantiate(**extra_args) + net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) + target_net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) return Rainbow( net=net, - net_factory=self.net_factory, + target_net=target_net, action_space=action_space, discount_factor=self.discount_factor, target_update_frequency=self.target_update_frequency, diff --git a/vel/rl/policy/trpo.py b/vel/rl/policy/trpo.py index 4d614cd8..8c041c6f 100644 --- a/vel/rl/policy/trpo.py +++ b/vel/rl/policy/trpo.py @@ -1,6 +1,7 @@ import gym import numpy as np import itertools as it +import typing import torch import torch.autograd as autograd @@ -13,8 +14,9 @@ from vel.rl.api import Rollout, Trajectories, RlPolicy from vel.rl.discount_bootstrap import discount_bootstrap_gae -from vel.rl.module.stochastic_action_head import StochasticActionHead -from vel.rl.module.value_head import ValueHead +from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head +from vel.rl.module.head.value_head import ValueHead +from vel.util.situational import observation_space_to_size_hint def p2v(params): @@ -57,11 +59,11 @@ def conjugate_gradient_method(matrix_vector_operator, loss_gradient, nsteps, rdo class TRPO(RlPolicy): """ Trust Region Policy Optimization - https://arxiv.org/abs/1502.05477 """ - def __init__(self, - policy_net: BackboneNetwork, value_net: BackboneNetwork, - action_space: gym.Space, + def __init__(self, policy_net: BackboneNetwork, value_net: BackboneNetwork, action_space: gym.Space, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, - discount_factor, gae_lambda, improvement_acceptance_ratio): + discount_factor, gae_lambda, improvement_acceptance_ratio, + input_net: typing.Optional[BackboneNetwork] = None, + ): super().__init__(discount_factor) self.mak_kl = max_kl @@ -73,10 +75,11 @@ def __init__(self, self.gae_lambda = gae_lambda self.improvement_acceptance_ratio = improvement_acceptance_ratio + self.input_net = input_net self.policy_net = policy_net self.value_net = value_net - self.action_head = StochasticActionHead( + self.action_head = make_stockastic_action_head( action_space=action_space, input_dim=self.policy_net.size_hints().assert_single(2).last() ) @@ -87,6 +90,9 @@ def __init__(self, def reset_weights(self): """ Initialize properly model weights """ + if self.input_net: + self.input_net.reset_weights() + self.policy_net.reset_weights() self.value_net.reset_weights() @@ -95,23 +101,28 @@ def reset_weights(self): def forward(self, observations): """ Calculate model outputs """ - policy_base_output = self.policy_net(observations) - value_base_output = self.value_net(observations) + if self.input_net is not None: + normalized_observations = self.input_net(observations) + else: + normalized_observations = observations + + policy_base_output = self.policy_net(normalized_observations) + value_base_output = self.value_net(normalized_observations) action_output = self.action_head(policy_base_output) value_output = self.value_head(value_base_output) return action_output, value_output - def value(self, observations, state=None): + def _value(self, normalized_observations): """ Calculate only value head for given state """ - base_output = self.value_net(observations) + base_output = self.value_net(normalized_observations) value_output = self.value_head(base_output) return value_output - def policy(self, observations): + def _policy(self, normalized_observations): """ Calculate only action head for given state """ - policy_base_output = self.policy_net(observations) + policy_base_output = self.policy_net(normalized_observations) policy_params = self.action_head(policy_base_output) return policy_params @@ -174,10 +185,16 @@ def optimize(self, batch_info: BatchInfo, rollout: Rollout) -> dict: rollout = rollout.to_transitions() observations = rollout.batch_tensor('observations') + + if self.input_net is not None: + normalized_observations = self.input_net(observations) + else: + normalized_observations = observations + returns = rollout.batch_tensor('returns') # Evaluate model on the observations - action_pd_params = self.policy(observations) + action_pd_params = self._policy(normalized_observations) policy_entropy = torch.mean(self.action_head.entropy(action_pd_params)) policy_loss = self.calc_policy_loss(action_pd_params, policy_entropy, rollout) @@ -205,7 +222,8 @@ def optimize(self, batch_info: BatchInfo, rollout: Rollout) -> dict: (policy_optimization_success, ratio, policy_loss_improvement, new_policy_loss, kl_divergence_step) = ( self.line_search( - rollout, policy_loss, action_pd_params, original_parameter_vec, full_step, expected_improvement + normalized_observations, rollout, policy_loss, action_pd_params, original_parameter_vec, + full_step, expected_improvement ) ) @@ -213,7 +231,7 @@ def optimize(self, batch_info: BatchInfo, rollout: Rollout) -> dict: for i in range(self.vf_iters): batch_info.optimizer.zero_grad() - value_loss = self.value_loss(observations, returns) + value_loss = self._value_loss(normalized_observations, returns) value_loss.backward() @@ -238,7 +256,7 @@ def optimize(self, batch_info: BatchInfo, rollout: Rollout) -> dict: 'explained_variance': explained_variance(returns, rollout.batch_tensor('values')) } - def line_search(self, rollout, original_policy_loss, original_policy_params, original_parameter_vec, + def line_search(self, normalized_observations, rollout, original_policy_loss, original_policy_params, original_parameter_vec, full_step, expected_improvement_full): """ Find the right stepsize to make sure policy improves """ current_parameter_vec = original_parameter_vec.clone() @@ -253,7 +271,7 @@ def line_search(self, rollout, original_policy_loss, original_policy_params, ori # Calculate new loss with torch.no_grad(): - policy_params = self.policy(rollout.batch_tensor('observations')) + policy_params = self._policy(normalized_observations) policy_entropy = torch.mean(self.action_head.entropy(policy_params)) kl_divergence = torch.mean(self.action_head.kl_divergence(original_policy_params, policy_params)) @@ -289,9 +307,9 @@ def fisher_vector_product(self, vector, kl_divergence_gradient): return fvp + vector * self.cg_damping - def value_loss(self, observations, returns): + def _value_loss(self, normalized_observations, returns): """ Loss of value function head """ - value_outputs = self.value(observations) + value_outputs = self._value(normalized_observations) value_loss = 0.5 * F.mse_loss(value_outputs, returns) return value_loss @@ -337,9 +355,10 @@ class TRPOFactory(ModelFactory): """ Factory class for policy gradient models """ def __init__(self, policy_net: ModelFactory, value_net: ModelFactory, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, - discount_factor, gae_lambda, improvement_acceptance_ratio): + discount_factor, gae_lambda, improvement_acceptance_ratio, input_net: typing.Optional[ModelFactory]): self.policy_net = policy_net self.value_net = value_net + self.input_net = input_net self.entropy_coefficient = entropy_coefficient self.mak_kl = max_kl @@ -354,13 +373,23 @@ def __init__(self, policy_net: ModelFactory, value_net: ModelFactory, def instantiate(self, **extra_args): """ Instantiate the model """ action_space = extra_args.pop('action_space') + observation_space = extra_args.pop('observation_space') + + size_hint = observation_space_to_size_hint(observation_space) + + if self.input_net is None: + input_net = None + else: + input_net = self.input_net.instantiate(size_hint=size_hint, **extra_args) + size_hint = input_net.size_hints() - policy_net = self.policy_net.instantiate(**extra_args) - value_net = self.value_net.instantiate(**extra_args) + policy_net = self.policy_net.instantiate(size_hint=size_hint, **extra_args) + value_net = self.value_net.instantiate(size_hint=size_hint, **extra_args) return TRPO( policy_net=policy_net, value_net=value_net, + input_net=input_net, action_space=action_space, max_kl=self.mak_kl, cg_iters=self.cg_iters, @@ -376,12 +405,13 @@ def instantiate(self, **extra_args): def create(policy_net: ModelFactory, value_net: ModelFactory, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, - discount_factor, gae_lambda, improvement_acceptance_ratio): + discount_factor, gae_lambda, improvement_acceptance_ratio, input_net: typing.Optional[ModelFactory]=None): """ Vel factory function """ return TRPOFactory( policy_net=policy_net, value_net=value_net, + input_net=input_net, max_kl=max_kl, cg_iters=cg_iters, line_search_iters=line_search_iters, diff --git a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py index f48183be..92c30a5d 100644 --- a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py @@ -148,7 +148,7 @@ def __init__(self, settings, env_factory: VecEnvFactory, model_factory: ModelFac def instantiate(self, device: torch.device) -> Reinforcer: env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, seed=self.seed) - policy = self.model_factory.instantiate(action_space=env.action_space) + policy = self.model_factory.instantiate(action_space=env.action_space, observation_space=env.observation_space) env_roller = self.env_roller_factory.instantiate(environment=env, policy=policy, device=device) return BufferedMixedPolicyIterationReinforcer(device, self.settings, env, policy, env_roller) diff --git a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py index c42751dc..9deeb210 100644 --- a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py @@ -165,7 +165,7 @@ def __init__(self, settings, env_factory: VecEnvFactory, model_factory: ModelFac def instantiate(self, device: torch.device) -> BufferedOffPolicyIterationReinforcer: env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, seed=self.seed) - policy = self.model_factory.instantiate(action_space=env.action_space) + policy = self.model_factory.instantiate(action_space=env.action_space, observation_space=env.observation_space) env_roller = self.env_roller_factory.instantiate(environment=env, policy=policy, device=device) return BufferedOffPolicyIterationReinforcer( diff --git a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py index d2d0a50d..64af89e5 100644 --- a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py @@ -158,7 +158,7 @@ def __init__(self, settings, parallel_envs: int, env_factory: VecEnvFactory, mod def instantiate(self, device: torch.device) -> Reinforcer: env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, seed=self.seed) - policy = self.model_factory.instantiate(action_space=env.action_space) + policy = self.model_factory.instantiate(action_space=env.action_space, observation_space=env.observation_space) env_roller = self.env_roller_factory.instantiate(environment=env, policy=policy, device=device) return OnPolicyIterationReinforcer(device, self.settings, policy, env_roller) diff --git a/vel/util/situational.py b/vel/util/situational.py index 56bb25a8..c0f9c55a 100644 --- a/vel/util/situational.py +++ b/vel/util/situational.py @@ -1,5 +1,8 @@ +import gym import typing +from vel.api import SizeHints, SizeHint + def process_environment_settings(default_dictionary: dict, settings: typing.Optional[dict] = None, presets: typing.Optional[dict] = None): @@ -25,3 +28,17 @@ def process_environment_settings(default_dictionary: dict, settings: typing.Opti result_dict[key] = new_dict return result_dict + + +def observation_space_to_size_hint(space: gym.Space) -> SizeHints: + """ Convert Gym observation space to size hints """ + if isinstance(space, gym.spaces.Box): + return size_hint_from_shape(space.shape) + else: + raise NotImplementedError + + +def size_hint_from_shape(shape: typing.Tuple[int]) -> SizeHints: + """ Convert tensor shape (without batch dimension) into a size hint """ + return SizeHints(SizeHint(*([None] + list(shape)))) + diff --git a/vel/util/tensor_accumulator.py b/vel/util/tensor_accumulator.py deleted file mode 100644 index fd13c2d9..00000000 --- a/vel/util/tensor_accumulator.py +++ /dev/null @@ -1,16 +0,0 @@ -import collections - -import torch - - -class TensorAccumulator: - """ Buffer for tensors that will be stacked together """ - def __init__(self): - self.accumulants = collections.defaultdict(list) - - def add(self, name, tensor): - self.accumulants[name].append(tensor) - - def result(self): - """ Concatenate accumulated tensors """ - return {k: torch.stack(v) for k, v in self.accumulants.items()} diff --git a/vel/util/tensor_util.py b/vel/util/tensor_util.py index 554ce2d2..29f41ab3 100644 --- a/vel/util/tensor_util.py +++ b/vel/util/tensor_util.py @@ -1,4 +1,5 @@ import torch +import collections def one_hot_encoding(input_tensor, num_labels): @@ -32,3 +33,16 @@ def to_device(tensor, device: torch.device): return tuple(to_device(v, device) for v in tensor) else: raise NotImplementedError + + +class TensorAccumulator: + """ Buffer for tensors that will be stacked together """ + def __init__(self): + self.accumulants = collections.defaultdict(list) + + def add(self, name, tensor): + self.accumulants[name].append(tensor) + + def result(self): + """ Concatenate accumulated tensors """ + return {k: torch.stack(v) for k, v in self.accumulants.items()} From 6c8a61863fc74f7c3868ca897d9da1ec3400e883 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 12:04:28 -0700 Subject: [PATCH 111/162] Disable integration tests for now. --- vel/rl/test/test_integration.py | 432 ++++++++++++++++---------------- 1 file changed, 216 insertions(+), 216 deletions(-) diff --git a/vel/rl/test/test_integration.py b/vel/rl/test/test_integration.py index dfac3a61..cc488751 100644 --- a/vel/rl/test/test_integration.py +++ b/vel/rl/test/test_integration.py @@ -1,219 +1,219 @@ -import torch -import torch.optim as optim - -from vel.module.input.image_to_tensor import ImageToTensorFactory -from vel.module.input.normalize_observations import NormalizeObservationsFactory -from vel.rl.buffer.circular_replay_buffer import CircularReplayBuffer -from vel.rl.buffer.prioritized_circular_replay_buffer import PrioritizedCircularReplayBuffer -from vel.rl.command.rl_train_command import FrameTracker -from vel.rl.env_roller.step_env_roller import StepEnvRoller -from vel.rl.env_roller.trajectory_replay_env_roller import TrajectoryReplayEnvRoller -from vel.rl.env_roller.transition_replay_env_roller import TransitionReplayEnvRoller -from vel.rl.metrics import EpisodeRewardMetric -from vel.rl.module.noise.eps_greedy import EpsGreedy -from vel.rl.module.noise.ou_noise import OuNoise -from vel.function.linear import LinearSchedule -from vel.function.linear_and_constant import LinearAndConstantSchedule -from vel.util.random import set_seed - -from vel.rl.env.classic_atari import ClassicAtariEnv -from vel.rl.env.mujoco import MujocoEnv -from vel.rl.vecenv.subproc import SubprocVecEnvWrapper -from vel.rl.vecenv.dummy import DummyVecEnvWrapper - -from vel.rl.policy.stochastic_policy import StochasticPolicyFactory -# from vel.rl.model.q_stochastic_policy_model import QStochasticPolicyModelFactory -# from vel.rl.model.q_model import QModelFactory -# from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory -# from vel.rl.model.stochastic_policy_model_separate import StochasticPolicyModelSeparateFactory - -from vel.rl.backbone.nature_cnn import NatureCnnFactory -from vel.rl.backbone.mlp import MLPFactory - -from vel.rl.reinforcer.on_policy_iteration_reinforcer import ( - OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings -) - -from vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer import ( - BufferedOffPolicyIterationReinforcer, BufferedOffPolicyIterationReinforcerSettings -) - -from vel.rl.reinforcer.buffered_mixed_policy_iteration_reinforcer import ( - BufferedMixedPolicyIterationReinforcer, BufferedMixedPolicyIterationReinforcerSettings -) - -from vel.rl.algo.dqn import DeepQLearning -from vel.rl.algo.policy_gradient.a2c import A2CPolicyGradient -from vel.rl.algo.policy_gradient.ppo import PpoPolicyGradient -from vel.rl.algo.policy_gradient.trpo import TrpoPolicyGradient -from vel.rl.algo.policy_gradient.acer import AcerPolicyGradient -from vel.rl.algo.policy_gradient.ddpg import DeepDeterministicPolicyGradient - -from vel.api.info import TrainingInfo, EpochInfo - - -CPU_DEVICE = torch.device('cpu') - - -def test_a2c_breakout(): - """ - Simple 1 iteration of a2c breakout - """ - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers - # These are just helper functions for that - vec_env = SubprocVecEnvWrapper( - ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 - ).instantiate(parallel_envs=16, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - policy = StochasticPolicyFactory( - input_block=ImageToTensorFactory(), - backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) - ).instantiate(action_space=vec_env.action_space) - - # Reinforcer - an object managing the learning process - reinforcer = OnPolicyIterationReinforcer( - device=CPU_DEVICE, - settings=OnPolicyIterationReinforcerSettings( - batch_size=256, - number_of_steps=5 - ), - policy=policy, - algo=A2CPolicyGradient( - entropy_coefficient=0.01, - value_coefficient=0.5, - discount_factor=0.99, - max_grad_norm=0.5 - ), - env_roller=StepEnvRoller( - environment=vec_env, - policy=policy, - device=CPU_DEVICE - ) - ) - - # Model optimizer - optimizer = optim.RMSprop(reinforcer.policy.parameters(), lr=7.0e-4, eps=1e-3) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 100 batches per epoch to average metrics nicely - num_epochs = 1 - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=1, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info, interactive=False) - - training_info.on_train_end() - - -def test_ppo_breakout(): - """ - Simple 1 iteration of ppo breakout - """ - device = torch.device('cpu') - seed = 1001 - - # Set random seed in python std lib, numpy and pytorch - set_seed(seed) - - # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers - # These are just helper functions for that - vec_env = SubprocVecEnvWrapper( - ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 - ).instantiate(parallel_envs=8, seed=seed) - - # Again, use a helper to create a model - # But because model is owned by the reinforcer, model should not be accessed using this variable - # but from reinforcer.model property - policy = StochasticPolicyFactory( - input_block=ImageToTensorFactory(), - backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) - ).instantiate(action_space=vec_env.action_space) - - # Reinforcer - an object managing the learning process - reinforcer = OnPolicyIterationReinforcer( - device=device, - settings=OnPolicyIterationReinforcerSettings( - number_of_steps=12, - batch_size=4, - experience_replay=2, - ), - policy=policy, - algo=PpoPolicyGradient( - entropy_coefficient=0.01, - value_coefficient=0.5, - max_grad_norm=0.5, - cliprange=LinearSchedule(0.1, 0.0), - discount_factor=0.99, - normalize_advantage=True - ), - env_roller=StepEnvRoller( - environment=vec_env, - policy=policy, - device=device, - ) - ) - - # Model optimizer - # optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3) - optimizer = optim.Adam(reinforcer.policy.parameters(), lr=2.5e-4, eps=1e-5) - - # Overall information store for training information - training_info = TrainingInfo( - metrics=[ - EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode - ], - callbacks=[ - FrameTracker(100_000) - ] # Print live metrics every epoch to standard output - ) - - # A bit of training initialization bookkeeping... - training_info.initialize() - reinforcer.initialize_training(training_info) - training_info.on_train_begin() - - # Let's make 100 batches per epoch to average metrics nicely - num_epochs = 1 - - # Normal handrolled training loop - for i in range(1, num_epochs+1): - epoch_info = EpochInfo( - training_info=training_info, - global_epoch_idx=i, - batches_per_epoch=1, - optimizer=optimizer - ) - - reinforcer.train_epoch(epoch_info, interactive=False) - - training_info.on_train_end() +# import torch +# import torch.optim as optim +# +# from vel.module.input.image_to_tensor import ImageToTensorFactory +# from vel.module.input.normalize_observations import NormalizeObservationsFactory +# from vel.rl.buffer.circular_replay_buffer import CircularReplayBuffer +# from vel.rl.buffer.prioritized_circular_replay_buffer import PrioritizedCircularReplayBuffer +# from vel.rl.command.rl_train_command import FrameTracker +# from vel.rl.env_roller.step_env_roller import StepEnvRoller +# from vel.rl.env_roller.trajectory_replay_env_roller import TrajectoryReplayEnvRoller +# from vel.rl.env_roller.transition_replay_env_roller import TransitionReplayEnvRoller +# from vel.rl.metrics import EpisodeRewardMetric +# from vel.rl.module.noise.eps_greedy import EpsGreedy +# from vel.rl.module.noise.ou_noise import OuNoise +# from vel.function.linear import LinearSchedule +# from vel.function.linear_and_constant import LinearAndConstantSchedule +# from vel.util.random import set_seed +# +# from vel.rl.env.classic_atari import ClassicAtariEnv +# from vel.rl.env.mujoco import MujocoEnv +# from vel.rl.vecenv.subproc import SubprocVecEnvWrapper +# from vel.rl.vecenv.dummy import DummyVecEnvWrapper +# +# from vel.rl.policy.stochastic_policy import StochasticPolicyFactory +# # from vel.rl.model.q_stochastic_policy_model import QStochasticPolicyModelFactory +# # from vel.rl.model.q_model import QModelFactory +# # from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory +# # from vel.rl.model.stochastic_policy_model_separate import StochasticPolicyModelSeparateFactory +# +# from vel.rl.backbone.nature_cnn import NatureCnnFactory +# from vel.rl.backbone.mlp import MLPFactory +# +# from vel.rl.reinforcer.on_policy_iteration_reinforcer import ( +# OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings +# ) +# +# from vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer import ( +# BufferedOffPolicyIterationReinforcer, BufferedOffPolicyIterationReinforcerSettings +# ) +# +# from vel.rl.reinforcer.buffered_mixed_policy_iteration_reinforcer import ( +# BufferedMixedPolicyIterationReinforcer, BufferedMixedPolicyIterationReinforcerSettings +# ) +# +# from vel.rl.algo.dqn import DeepQLearning +# from vel.rl.algo.policy_gradient.a2c import A2CPolicyGradient +# from vel.rl.algo.policy_gradient.ppo import PpoPolicyGradient +# from vel.rl.algo.policy_gradient.trpo import TrpoPolicyGradient +# from vel.rl.algo.policy_gradient.acer import AcerPolicyGradient +# from vel.rl.algo.policy_gradient.ddpg import DeepDeterministicPolicyGradient +# +# from vel.api.info import TrainingInfo, EpochInfo +# +# +# CPU_DEVICE = torch.device('cpu') +# +# +# def test_a2c_breakout(): +# """ +# Simple 1 iteration of a2c breakout +# """ +# seed = 1001 +# +# # Set random seed in python std lib, numpy and pytorch +# set_seed(seed) +# +# # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers +# # These are just helper functions for that +# vec_env = SubprocVecEnvWrapper( +# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 +# ).instantiate(parallel_envs=16, seed=seed) +# +# # Again, use a helper to create a model +# # But because model is owned by the reinforcer, model should not be accessed using this variable +# # but from reinforcer.model property +# policy = StochasticPolicyFactory( +# input_block=ImageToTensorFactory(), +# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) +# ).instantiate(action_space=vec_env.action_space) +# +# # Reinforcer - an object managing the learning process +# reinforcer = OnPolicyIterationReinforcer( +# device=CPU_DEVICE, +# settings=OnPolicyIterationReinforcerSettings( +# batch_size=256, +# number_of_steps=5 +# ), +# policy=policy, +# algo=A2CPolicyGradient( +# entropy_coefficient=0.01, +# value_coefficient=0.5, +# discount_factor=0.99, +# max_grad_norm=0.5 +# ), +# env_roller=StepEnvRoller( +# environment=vec_env, +# policy=policy, +# device=CPU_DEVICE +# ) +# ) +# +# # Model optimizer +# optimizer = optim.RMSprop(reinforcer.policy.parameters(), lr=7.0e-4, eps=1e-3) +# +# # Overall information store for training information +# training_info = TrainingInfo( +# metrics=[ +# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode +# ], +# callbacks=[] # Print live metrics every epoch to standard output +# ) +# +# # A bit of training initialization bookkeeping... +# training_info.initialize() +# reinforcer.initialize_training(training_info) +# training_info.on_train_begin() +# +# # Let's make 100 batches per epoch to average metrics nicely +# num_epochs = 1 +# +# # Normal handrolled training loop +# for i in range(1, num_epochs+1): +# epoch_info = EpochInfo( +# training_info=training_info, +# global_epoch_idx=i, +# batches_per_epoch=1, +# optimizer=optimizer +# ) +# +# reinforcer.train_epoch(epoch_info, interactive=False) +# +# training_info.on_train_end() +# +# +# def test_ppo_breakout(): +# """ +# Simple 1 iteration of ppo breakout +# """ +# device = torch.device('cpu') +# seed = 1001 +# +# # Set random seed in python std lib, numpy and pytorch +# set_seed(seed) +# +# # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers +# # These are just helper functions for that +# vec_env = SubprocVecEnvWrapper( +# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 +# ).instantiate(parallel_envs=8, seed=seed) +# +# # Again, use a helper to create a model +# # But because model is owned by the reinforcer, model should not be accessed using this variable +# # but from reinforcer.model property +# policy = StochasticPolicyFactory( +# input_block=ImageToTensorFactory(), +# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) +# ).instantiate(action_space=vec_env.action_space) +# +# # Reinforcer - an object managing the learning process +# reinforcer = OnPolicyIterationReinforcer( +# device=device, +# settings=OnPolicyIterationReinforcerSettings( +# number_of_steps=12, +# batch_size=4, +# experience_replay=2, +# ), +# policy=policy, +# algo=PpoPolicyGradient( +# entropy_coefficient=0.01, +# value_coefficient=0.5, +# max_grad_norm=0.5, +# cliprange=LinearSchedule(0.1, 0.0), +# discount_factor=0.99, +# normalize_advantage=True +# ), +# env_roller=StepEnvRoller( +# environment=vec_env, +# policy=policy, +# device=device, +# ) +# ) +# +# # Model optimizer +# # optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3) +# optimizer = optim.Adam(reinforcer.policy.parameters(), lr=2.5e-4, eps=1e-5) +# +# # Overall information store for training information +# training_info = TrainingInfo( +# metrics=[ +# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode +# ], +# callbacks=[ +# FrameTracker(100_000) +# ] # Print live metrics every epoch to standard output +# ) +# +# # A bit of training initialization bookkeeping... +# training_info.initialize() +# reinforcer.initialize_training(training_info) +# training_info.on_train_begin() +# +# # Let's make 100 batches per epoch to average metrics nicely +# num_epochs = 1 +# +# # Normal handrolled training loop +# for i in range(1, num_epochs+1): +# epoch_info = EpochInfo( +# training_info=training_info, +# global_epoch_idx=i, +# batches_per_epoch=1, +# optimizer=optimizer +# ) +# +# reinforcer.train_epoch(epoch_info, interactive=False) +# +# training_info.on_train_end() # def test_dqn_breakout(): From c0a012851ad5a4f13956bec74940211a40188474 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 12:09:03 -0700 Subject: [PATCH 112/162] Remove evaluator cache. --- vel/rl/test/test_evaluator_cache.py | 39 ----------------------------- 1 file changed, 39 deletions(-) delete mode 100644 vel/rl/test/test_evaluator_cache.py diff --git a/vel/rl/test/test_evaluator_cache.py b/vel/rl/test/test_evaluator_cache.py deleted file mode 100644 index 1f0b3724..00000000 --- a/vel/rl/test/test_evaluator_cache.py +++ /dev/null @@ -1,39 +0,0 @@ -from vel.rl.api import Evaluator, Rollout - -calls = { - "a": 0, - "b": 0, - "c": 0, -} - -class TestEvaluator(Evaluator): - @Evaluator.provides('test:a') - def test_a(self): - calls["a"] += 1 - - @Evaluator.provides('test:b', cache=False) - def test_b(self): - calls["b"] += 1 - - @Evaluator.provides('test:c') - def test_c(self): - calls["c"] += 1 - - -def test_evaluator(): - e = TestEvaluator(Rollout()) - e.get("test:a") - e.get("test:a") - e.get("test:a") - - e.get("test:b") - e.get("test:b") - e.get("test:b") - - e.get("test:c") - e.get("test:c") - e.get("test:c", cache=False) - - assert calls["a"] == 1 # test:a is cached so just one call - assert calls["b"] == 3 # test:b is never cached so three calls - assert calls["c"] == 2 # test:c is cached but one call is not so two calls \ No newline at end of file From c6f6e727148abb492d6afefb5938658aade8a440 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 14:16:54 -0700 Subject: [PATCH 113/162] Brought back DDPG. --- examples-configs/rl/atari/atari_a2c.yaml | 2 +- .../rl/atari/atari_a2c_tf_rmsprop.yaml | 2 +- examples-configs/rl/atari/atari_acer.yaml | 2 +- examples-configs/rl/atari/atari_ppo.yaml | 2 +- .../rl/mujoco/ddpg/half_cheetah_ddpg.yaml | 93 --------- examples-configs/rl/mujoco/mujoco_a2c.yaml | 2 +- examples-configs/rl/mujoco/mujoco_ddpg.yaml | 85 ++++++++ examples-configs/rl/mujoco/mujoco_ppo.yaml | 2 +- vel/api/size_hint.py | 7 +- vel/internal/test/test_provider.py | 2 +- vel/module/input/embedding.py | 4 +- vel/module/input/flatten.py | 11 +- vel/module/input/identity.py | 21 -- vel/net/layer/util/concat.py | 59 ++++++ .../util/{repeat_tensor.py => repeat.py} | 0 vel/rl/api/policy.py | 6 + .../trajectory_replay_env_roller.py | 2 + .../transition_replay_env_roller.py | 2 + vel/rl/module/actor_critic_policy.py | 87 ++++++++ vel/rl/module/noise/ou_noise.py | 7 +- vel/rl/module/test/test_action_head.py | 2 +- vel/rl/policy/a2c.py | 4 +- vel/rl/policy/acer.py | 12 +- vel/rl/policy/ddpg.py | 197 ++++++++++++++++++ vel/rl/policy/dqn.py | 12 +- vel/rl/policy/ppo.py | 4 +- vel/rl/policy/purgatory/ddpg.py | 94 --------- vel/rl/policy/rainbow.py | 12 +- vel/rl/policy/trpo.py | 4 +- vel/rl/util/actor.py | 5 +- .../xpolicy/purgatory/deterministic_policy.py | 164 --------------- vel/util/situational.py | 2 +- 32 files changed, 495 insertions(+), 415 deletions(-) delete mode 100644 examples-configs/rl/mujoco/ddpg/half_cheetah_ddpg.yaml create mode 100644 examples-configs/rl/mujoco/mujoco_ddpg.yaml delete mode 100644 vel/module/input/identity.py create mode 100644 vel/net/layer/util/concat.py rename vel/net/layer/util/{repeat_tensor.py => repeat.py} (100%) create mode 100644 vel/rl/module/actor_critic_policy.py create mode 100644 vel/rl/policy/ddpg.py delete mode 100644 vel/rl/policy/purgatory/ddpg.py delete mode 100644 vel/rl/xpolicy/purgatory/deterministic_policy.py diff --git a/examples-configs/rl/atari/atari_a2c.yaml b/examples-configs/rl/atari/atari_a2c.yaml index fa5fdcab..f4e6e76f 100644 --- a/examples-configs/rl/atari/atari_a2c.yaml +++ b/examples-configs/rl/atari/atari_a2c.yaml @@ -23,7 +23,7 @@ model: layers: - name: vel.net.layer.input.image_to_tensor - name: vel.rl.layer.nature_cnn - - name: vel.net.layer.util.repeat_tensor + - name: vel.net.layer.util.repeat times: 2 # Need to repeat output twice, for action and value heads diff --git a/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml index fc398e5a..0b013bed 100644 --- a/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml +++ b/examples-configs/rl/atari/atari_a2c_tf_rmsprop.yaml @@ -23,7 +23,7 @@ model: layers: - name: vel.net.layer.input.image_to_tensor - name: vel.rl.layer.nature_cnn - - name: vel.net.layer.util.repeat_tensor + - name: vel.net.layer.util.repeat times: 2 # Need to repeat output twice, for action and value heads diff --git a/examples-configs/rl/atari/atari_acer.yaml b/examples-configs/rl/atari/atari_acer.yaml index e0bff147..cde689a8 100644 --- a/examples-configs/rl/atari/atari_acer.yaml +++ b/examples-configs/rl/atari/atari_acer.yaml @@ -30,7 +30,7 @@ model: layers: - name: vel.net.layer.input.image_to_tensor - name: vel.rl.layer.nature_cnn - - name: vel.net.layer.util.repeat_tensor + - name: vel.net.layer.util.repeat times: 2 # Need to repeat output twice, for action and value heads diff --git a/examples-configs/rl/atari/atari_ppo.yaml b/examples-configs/rl/atari/atari_ppo.yaml index 882926a9..617b8a1f 100644 --- a/examples-configs/rl/atari/atari_ppo.yaml +++ b/examples-configs/rl/atari/atari_ppo.yaml @@ -30,7 +30,7 @@ model: layers: - name: vel.net.layer.input.image_to_tensor - name: vel.rl.layer.nature_cnn - - name: vel.net.layer.util.repeat_tensor + - name: vel.net.layer.util.repeat times: 2 # Need to repeat output twice, for action and value heads diff --git a/examples-configs/rl/mujoco/ddpg/half_cheetah_ddpg.yaml b/examples-configs/rl/mujoco/ddpg/half_cheetah_ddpg.yaml deleted file mode 100644 index 978227b5..00000000 --- a/examples-configs/rl/mujoco/ddpg/half_cheetah_ddpg.yaml +++ /dev/null @@ -1,93 +0,0 @@ -name: 'half_cheetah_ddpg' - -env: - name: vel.rl.env.mujoco - game: 'HalfCheetah-v2' -# normalize_returns: true -# normalize_observations: true - - -vec_env: - name: vel.rl.vecenv.dummy - - -model: - name: vel.rl.models.deterministic_policy_model - - input_block: - name: vel.modules.input.normalize_observations - input_shape: 17 - - policy_backbone: - name: vel.rl.models.backbone.mlp - input_length: 17 - hidden_layers: [64, 64] - activation: 'tanh' -# normalization: 'layer' - - value_backbone: - name: vel.rl.models.backbone.mlp - input_length: 23 # Has to be observation size(17) + action size(6) - hidden_layers: [64, 64] - activation: 'tanh' -# normalization: 'layer' - - -reinforcer: - name: vel.rl.reinforcers.buffered_off_policy_iteration_reinforcer - - env_roller: - name: vel.rl.env_roller.transition_replay_env_roller - - replay_buffer: - name: vel.rl.buffers.circular_replay_buffer - - buffer_capacity: 1_000_000 - buffer_initial_size: 2_000 - - normalize_returns: true - discount_factor: 0.99 - - action_noise: - name: vel.rl.modules.noise.ou_noise - std_dev: 0.2 - - algo: - name: vel.rl.algo.policy_gradient.ddpg - - tau: 0.01 - discount_factor: 0.99 - - rollout_steps: 2 - training_steps: 64 - - parallel_envs: 1 - - -optimizer: - name: vel.optimizers.adam - # OpenAI has two different optimizers optimizing each network separately. - # As far as I know it should be equivalent to optimizing two separate networks together with a sum of loss functions - lr: [1.0e-4, 1.0e-3, 1.0e-3] - weight_decay: [0.0, 0.0, 0.01] - epsilon: 1.0e-4 - layer_groups: on - - -commands: - train: - name: vel.rl.commands.rl_train_command - total_frames: 1.0e6 - batches_per_epoch: 1000 - -# openai_logging: true - - record: - name: vel.rl.commands.record_movie_command - takes: 10 - videoname: 'half_cheetah_vid_{:04}.avi' - - evaluate: - name: vel.rl.commands.evaluate_env_command - takes: 100 - frame_history: 4 diff --git a/examples-configs/rl/mujoco/mujoco_a2c.yaml b/examples-configs/rl/mujoco/mujoco_a2c.yaml index 5c49c725..504871e1 100644 --- a/examples-configs/rl/mujoco/mujoco_a2c.yaml +++ b/examples-configs/rl/mujoco/mujoco_a2c.yaml @@ -26,7 +26,7 @@ model: - name: vel.net.layer.mlp hidden_layers: [64, 64] activation: 'tanh' - - name: vel.net.layer.util.repeat_tensor + - name: vel.net.layer.util.repeat times: 2 # Need to repeat output twice, for action and value heads diff --git a/examples-configs/rl/mujoco/mujoco_ddpg.yaml b/examples-configs/rl/mujoco/mujoco_ddpg.yaml new file mode 100644 index 00000000..d82f0baf --- /dev/null +++ b/examples-configs/rl/mujoco/mujoco_ddpg.yaml @@ -0,0 +1,85 @@ +name: 'mujoco_ddpg' + +env: + name: vel.rl.env.mujoco + game: !param game = 'Reacher-v2' + + +vec_env: + name: vel.rl.vecenv.dummy + normalize_returns: true + + +model: + name: vel.rl.policy.ddpg + + tau: 0.01 + discount_factor: 0.99 + noise_std_dev: 0.2 + + input_net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.normalize + + actor_net: + name: vel.net.modular + layers: + - name: vel.net.layer.mlp + hidden_layers: [64, 64] + activation: 'tanh' + + critic_net: + name: vel.net.modular + layers: + - name: vel.net.layer.util.concat # Concatenate observation and action + - name: vel.net.layer.mlp + hidden_layers: [64, 64] + activation: 'tanh' + + +reinforcer: + name: vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer + + env_roller: + name: vel.rl.env_roller.transition_replay_env_roller + + replay_buffer: + name: vel.rl.buffer.circular_replay_buffer + + buffer_capacity: 1_000_000 + buffer_initial_size: 2_000 + + normalize_returns: true + discount_factor: 0.99 + + rollout_steps: 2 + training_steps: 64 + + parallel_envs: 1 + + +optimizer: + name: vel.optimizer.adam + # OpenAI has two different optimizers optimizing each network separately. + # As far as I know it should be equivalent to optimizing two separate networks together with a sum of loss functions + lr: [1.0e-4, 1.0e-3] + weight_decay: [0.0, 0.0] + epsilon: 1.0e-4 + + +commands: + train: + name: vel.rl.command.rl_train_command + total_frames: 1.0e6 + batches_per_epoch: 1000 + + record: + name: vel.rl.command.record_movie_command + takes: 10 + videoname: 'half_cheetah_vid_{:04}.avi' + + evaluate: + name: vel.rl.command.evaluate_env_command + takes: 100 + frame_history: 4 diff --git a/examples-configs/rl/mujoco/mujoco_ppo.yaml b/examples-configs/rl/mujoco/mujoco_ppo.yaml index df6db42c..63de365a 100644 --- a/examples-configs/rl/mujoco/mujoco_ppo.yaml +++ b/examples-configs/rl/mujoco/mujoco_ppo.yaml @@ -26,7 +26,7 @@ model: name: vel.net.modular layers: - name: vel.net.layer.input.normalize - - name: vel.net.layer.util.repeat_tensor + - name: vel.net.layer.util.repeat times: 2 # Need to repeat output twice, to consume by the 'parallel' layers - name: vel.net.layer.arch.parallel layers: diff --git a/vel/api/size_hint.py b/vel/api/size_hint.py index 78851126..8263c0a5 100644 --- a/vel/api/size_hint.py +++ b/vel/api/size_hint.py @@ -57,10 +57,13 @@ def __init__(self, size_hints: typing.Union[SizeHint, SizeTuple, SizeDict] = Non else: raise VelException("Invalid size hints: {}".format(self.size_hints)) - def assert_tuple(self, length) -> SizeTuple: + def assert_tuple(self, length : typing.Optional[int] = None) -> SizeTuple: """ Assert given size hints is a tuple """ assert self.type == self.TYPE_TUPLE, "Network needs to return a tuple" - assert len(self.size_hints) == length, "Network must return {} results".format(length) + + if length is not None: + assert len(self.size_hints) == length, "Network must return {} results".format(length) + return self.size_hints def assert_single(self, length: typing.Optional[int] = None) -> SizeHint: diff --git a/vel/internal/test/test_provider.py b/vel/internal/test/test_provider.py index 7428756f..594fa74a 100644 --- a/vel/internal/test/test_provider.py +++ b/vel/internal/test/test_provider.py @@ -139,7 +139,7 @@ def test_render_configuration(): }, parameters={'xxx': 5}) - configuration = provider.render_configuration() + configuration = provider.render_environment() assert configuration == { 'a': 1, diff --git a/vel/module/input/embedding.py b/vel/module/input/embedding.py index b576ddfa..97927ff1 100644 --- a/vel/module/input/embedding.py +++ b/vel/module/input/embedding.py @@ -1,9 +1,9 @@ import torch.nn as nn -from vel.api import LinearBackboneModel, ModelFactory, LanguageSource +from vel.api import Network, LanguageSource, ModelFactory -class EmbeddingInput(LinearBackboneModel): +class EmbeddingInput(Network): """ Learnable Embedding input layer """ def __init__(self, alphabet_size: int, output_dim: int, pretrained: bool = False, frozen: bool = False, diff --git a/vel/module/input/flatten.py b/vel/module/input/flatten.py index faf424df..5e140cc0 100644 --- a/vel/module/input/flatten.py +++ b/vel/module/input/flatten.py @@ -1,10 +1,10 @@ from vel.module.layers import Flatten -from vel.api import ModelFactory, BackboneModel +from vel.api import ModelFactory, BackboneNetwork -class FlattenInput(BackboneModel): +class FlattenInput(BackboneNetwork): """ Sequence input """ def __init__(self): super().__init__() @@ -13,10 +13,3 @@ def __init__(self): def forward(self, input_data): return self.model(input_data) - -def create(): - """ Vel factory function """ - def instantiate(**_): - return Flatten() - - return ModelFactory.generic(instantiate) diff --git a/vel/module/input/identity.py b/vel/module/input/identity.py deleted file mode 100644 index 6b9ee547..00000000 --- a/vel/module/input/identity.py +++ /dev/null @@ -1,21 +0,0 @@ -from vel.api import BackboneModel, ModelFactory - - -class Identity(BackboneModel): - """ Identity transformation that doesn't do anything """ - def __init__(self): - super().__init__() - - def forward(self, x): - return x - - def reset_weights(self): - pass - - -def create(): - """ Vel factory function """ - def instantiate(**_): - return Identity() - - return ModelFactory.generic(instantiate) diff --git a/vel/net/layer/util/concat.py b/vel/net/layer/util/concat.py new file mode 100644 index 00000000..d80e2d57 --- /dev/null +++ b/vel/net/layer/util/concat.py @@ -0,0 +1,59 @@ +import torch + +from vel.api import SizeHints, SizeHint +from vel.net.layer_base import LayerFactory, Layer + + +class Concat(Layer): + """ Repeat single tensor multiple times """ + + def __init__(self, name: str, size_hints: SizeHints, axis: int = -1): + super().__init__(name) + + self.axis = axis + self._size_hints = size_hints + + def forward(self, direct, state: dict = None, context: dict = None): + return torch.cat(direct, dim=self.axis) + + def size_hints(self) -> SizeHints: + return self._size_hints + + +class ConcatFactory(LayerFactory): + def __init__(self, axis: int = -1): + self.axis = axis + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "concat" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + inputs = direct_input.assert_tuple() + + result = [] + dimension = len(inputs[0]) + + for i in range(dimension): + + if i == (self.axis % dimension): + candidates = [el[i] for el in inputs] + + if None in candidates: + result.append(None) + else: + result.append(sum(candidates)) + else: + result.append(inputs[0][i]) + + return Concat( + name=name, + axis=self.axis, + size_hints=SizeHints(SizeHint(*result)) + ) + + +def create(axis: int = -1): + """ Vel factory function """ + return ConcatFactory(axis=axis) diff --git a/vel/net/layer/util/repeat_tensor.py b/vel/net/layer/util/repeat.py similarity index 100% rename from vel/net/layer/util/repeat_tensor.py rename to vel/net/layer/util/repeat.py diff --git a/vel/rl/api/policy.py b/vel/rl/api/policy.py index 4d1109c2..d9215180 100644 --- a/vel/rl/api/policy.py +++ b/vel/rl/api/policy.py @@ -1,3 +1,5 @@ +import torch + from vel.api import OptimizedModel, VelOptimizer, OptimizerFactory, BatchInfo from vel.rl.api import Rollout @@ -22,6 +24,10 @@ def act(self, observation, state=None, deterministic=False) -> dict: """ raise NotImplementedError + def reset_episodic_state(self, dones: torch.Tensor): + """ Called by the rollout worker, whenever episode is finished """ + pass + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: """ Create optimizer for the purpose of optimizing this model """ parameters = filter(lambda p: p.requires_grad, self.parameters()) diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index b2bd9092..a9cdc06b 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -74,6 +74,8 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: dones_tensor = torch.from_numpy(new_dones.astype(np.float32)).clone() accumulator.add('dones', dones_tensor) + self.actor.reset_states(dones_tensor) + self.last_observation_cpu = torch.from_numpy(new_obs).clone() self.last_observation = self.last_observation_cpu.to(self.device) accumulator.add('rewards', torch.from_numpy(new_rewards.astype(np.float32)).clone()) diff --git a/vel/rl/env_roller/transition_replay_env_roller.py b/vel/rl/env_roller/transition_replay_env_roller.py index 64e48f02..37c65c2f 100644 --- a/vel/rl/env_roller/transition_replay_env_roller.py +++ b/vel/rl/env_roller/transition_replay_env_roller.py @@ -89,6 +89,8 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: dones_tensor = torch.from_numpy(new_dones.astype(np.float32)).clone() accumulator.add('dones', dones_tensor) + self.actor.reset_states(dones_tensor) + self.accumulated_returns = self.accumulated_returns * (1.0 - new_dones.astype(np.float32)) self.last_observation_cpu = torch.from_numpy(new_obs).clone() diff --git a/vel/rl/module/actor_critic_policy.py b/vel/rl/module/actor_critic_policy.py new file mode 100644 index 00000000..dc6ae23f --- /dev/null +++ b/vel/rl/module/actor_critic_policy.py @@ -0,0 +1,87 @@ +import itertools as it + +import gym +import torch + +from vel.api import Network, BackboneNetwork + + +from vel.rl.module.head.deterministic_action_head import DeterministicActionHead +from vel.rl.module.head.deterministic_critic_head import DeterministicCriticHead + + +class ActorCriticPolicy(Network): + """ Deterministic Policy Gradient - model """ + + def __init__(self, input_net: BackboneNetwork, policy_net: BackboneNetwork, + value_net: BackboneNetwork, action_space: gym.Space): + super().__init__() + + self.input_net = input_net + self.policy_backbone = policy_net + self.value_backbone = value_net + + self.action_head = DeterministicActionHead( + input_dim=self.policy_backbone.size_hints().assert_single().last(), + action_space=action_space + ) + + self.critic_head = DeterministicCriticHead( + input_dim=self.value_backbone.size_hints().assert_single().last() + ) + + def layer_groups(self): + """ Grouped layers for optimization purposes """ + return [ + [self.input_net, self.policy_backbone, self.action_head], + [self.input_net, self.value_backbone, self.critic_head], + ] + + def reset_weights(self): + """ Initialize properly model weights """ + self.input_net.reset_weights() + self.policy_backbone.reset_weights() + self.value_backbone.reset_weights() + self.action_head.reset_weights() + self.critic_head.reset_weights() + + def forward(self, observations, input_actions=None): + """ Calculate model outputs """ + observations = self.input_net(observations) + + if input_actions is not None: + actions = input_actions + + value_hidden = self.value_backbone((observations, actions)) + + values = self.critic_head(value_hidden) + else: + policy_hidden = self.policy_backbone(observations) + actions = self.action_head(policy_hidden) + + # value_input = torch.cat([observations, actions], dim=1) + value_hidden = self.value_backbone((observations, actions)) + + values = self.critic_head(value_hidden) + + return actions, values + + def policy_parameters(self): + """ Parameters of policy """ + return it.chain(self.input_net(), self.policy_backbone.parameters(), self.action_head.parameters()) + + def value_parameters(self): + """ Parameters of policy """ + return it.chain(self.input_net(), self.value_backbone.parameters(), self.critic_head.parameters()) + + def value(self, observation, input_actions=None): + """ Calculate value for given state """ + action, value = self(observation, input_actions) + return value + + def action(self, observations): + """ Calculate value for given state """ + observations = self.input_net(observations) + policy_hidden = self.policy_backbone(observations) + action = self.action_head(policy_hidden) + return action diff --git a/vel/rl/module/noise/ou_noise.py b/vel/rl/module/noise/ou_noise.py index edda86f8..721b8772 100644 --- a/vel/rl/module/noise/ou_noise.py +++ b/vel/rl/module/noise/ou_noise.py @@ -1,7 +1,6 @@ import gym import numpy as np import torch -import torch.nn as nn from vel.api import Network from vel.util.process import OrnsteinUhlenbeckNoiseProcess @@ -21,13 +20,13 @@ def __init__(self, std_dev: float, action_space: gym.Space): self.register_buffer('low_tensor', torch.from_numpy(self.action_space.low).unsqueeze(0)) self.register_buffer('high_tensor', torch.from_numpy(self.action_space.high).unsqueeze(0)) - def reset_training_state(self, dones, batch_info): + def reset_episodic_state(self, dones): """ A hook for a model to react when during training episode is finished """ - for idx, done in enumerate(dones): + for idx, done in enumerate(dones.cpu()): if done > 0.5: self.processes[idx].reset() - def forward(self, actions, batch_info): + def forward(self, actions): """ Return model step after applying noise """ while len(self.processes) < actions.shape[0]: len_action_space = self.action_space.shape[-1] diff --git a/vel/rl/module/test/test_action_head.py b/vel/rl/module/test/test_action_head.py index b0364e5c..8d80e3cc 100644 --- a/vel/rl/module/test/test_action_head.py +++ b/vel/rl/module/test/test_action_head.py @@ -7,7 +7,7 @@ import torch.nn.functional as F import torch.distributions as d -from vel.rl.module.stochastic_action_head import DiagGaussianActionHead, CategoricalActionHead +from vel.rl.module.head.stochastic_action_head import DiagGaussianActionHead, CategoricalActionHead def test_sample_diag_gaussian(): diff --git a/vel/rl/policy/a2c.py b/vel/rl/policy/a2c.py index 7b23d33d..299e8c2b 100644 --- a/vel/rl/policy/a2c.py +++ b/vel/rl/policy/a2c.py @@ -3,7 +3,7 @@ import torch.nn.functional as F from vel.metric.base import AveragingNamedMetric -from vel.util.situational import observation_space_to_size_hint +from vel.util.situational import gym_space_to_size_hint from vel.util.stats import explained_variance from vel.api import ModelFactory, BatchInfo, BackboneNetwork @@ -126,7 +126,7 @@ def instantiate(self, **extra_args): action_space = extra_args.pop('action_space') observation_space = extra_args.pop('observation_space') - size_hint = observation_space_to_size_hint(observation_space) + size_hint = gym_space_to_size_hint(observation_space) net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index b468eb30..e1120fa7 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -3,11 +3,11 @@ import torch import torch.nn.functional as F -from vel.api import BackboneNetwork, ModelFactory, BatchInfo +from vel.api import BackboneNetwork, ModelFactory, BatchInfo, OptimizerFactory, VelOptimizer from vel.metric.base import AveragingNamedMetric from vel.rl.api import Trajectories, RlPolicy, Rollout from vel.rl.module.q_stochastic_policy import QStochasticPolicy -from vel.util.situational import observation_space_to_size_hint +from vel.util.situational import gym_space_to_size_hint def select_indices(tensor, indices): @@ -40,9 +40,15 @@ def __init__(self, net: BackboneNetwork, target_net: typing.Optional[BackboneNet if self.trust_region: self.target_policy = QStochasticPolicy(target_net, action_space) + self.target_policy.requires_grad_(False) else: self.target_policy = None + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + """ Create optimizer for the purpose of optimizing this model """ + parameters = filter(lambda p: p.requires_grad, self.policy.parameters()) + return optimizer_factory.instantiate(parameters) + def train(self, mode=True): """ Override train to make sure target model is always in eval mode """ self.policy.train(mode) @@ -261,7 +267,7 @@ def instantiate(self, **extra_args): action_space = extra_args.pop('action_space') observation_space = extra_args.pop('observation_space') - size_hint = observation_space_to_size_hint(observation_space) + size_hint = gym_space_to_size_hint(observation_space) net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) diff --git a/vel/rl/policy/ddpg.py b/vel/rl/policy/ddpg.py new file mode 100644 index 00000000..0e011ecb --- /dev/null +++ b/vel/rl/policy/ddpg.py @@ -0,0 +1,197 @@ +import typing + +import gym +import torch +import torch.autograd +import torch.nn as nn +import torch.nn.functional as F + +import vel.util.module_util as mu + +from vel.api import BackboneNetwork, BatchInfo, ModelFactory, OptimizerFactory, VelOptimizer, SizeHints +from vel.metric.base import AveragingNamedMetric +from vel.rl.api import RlPolicy, Rollout +from vel.rl.module.actor_critic_policy import ActorCriticPolicy +from vel.rl.module.noise.ou_noise import OuNoise +from vel.util.situational import gym_space_to_size_hint + + +class DDPG(RlPolicy): + """ Deep Deterministic Policy Gradient (DDPG) - policy gradient calculations """ + + def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_space: gym.Space, + discount_factor: float, tau: float, noise_std_dev: float): + super().__init__(discount_factor) + + self.net = net + self.target_net = target_net + + self.tau = tau + self.discount_factor = discount_factor + + self.action_noise = OuNoise(std_dev=noise_std_dev, action_space=action_space) + + def train(self, mode=True): + """ Override train to make sure target model is always in eval mode """ + self.net.train(mode) + self.target_net.train(False) + + def reset_weights(self): + """ Initialize properly model weights """ + self.net.reset_weights() + self.target_net.load_state_dict(self.net.state_dict()) + + def reset_episodic_state(self, dones: torch.Tensor): + """ Called by the rollout worker, whenever episode is finished """ + self.action_noise.reset_episodic_state(dones) + + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + """ Create optimizer for the purpose of optimizing this model """ + parameter_groups = mu.to_parameter_groups(self.net.layer_groups()) + return optimizer_factory.instantiate_parameter_groups(parameter_groups) + + def forward(self, observation, state=None): + """ Calculate model outputs """ + return self.net(observation) + + def act(self, observation, state=None, deterministic=False) -> dict: + """ Select actions based on model's output """ + action, value = self(observation) + + if deterministic: + noisy_action = action + else: + noisy_action = self.action_noise(action) + + return { + 'actions': noisy_action, + 'values': value + } + + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: + """ Calculate loss of the supplied rollout """ + rollout = rollout.to_transitions() + + dones = rollout.batch_tensor('dones') + rewards = rollout.batch_tensor('rewards') + observations_next = rollout.batch_tensor('observations_next') + actions = rollout.batch_tensor('actions') + observations = rollout.batch_tensor('observations') + + # Calculate value loss - or critic loss + with torch.no_grad(): + target_next_value = self.target_net.value(observations_next) + target_value = rewards + (1.0 - dones) * self.discount_factor * target_next_value + + # Value estimation error vs the target network + model_value = self.net.value(observations, actions) + value_loss = F.mse_loss(model_value, target_value) + + # It may seem a bit tricky what I'm doing here, but the underlying idea is simple + # All other implementations I found keep two separate optimizers for actor and critic + # and update them separately + # What I'm trying to do is to optimize them both with a single optimizer + # but I need to make sure gradients flow correctly + # From critic loss to critic network only and from actor loss to actor network only + + # Backpropagate value loss to critic only + value_loss.backward() + + model_action = self.net.action(observations) + model_action_value = self.net.value(observations, model_action) + + policy_loss = -model_action_value.mean() + + model_action_grad = torch.autograd.grad(policy_loss, model_action)[0] + + # Backpropagate actor loss to actor only + model_action.backward(gradient=model_action_grad) + + return { + 'policy_loss': policy_loss.item(), + 'value_loss': value_loss.item(), + } + + def post_optimization_step(self, batch_info: BatchInfo, rollout: Rollout): + """ Steps to take after optimization has been done""" + # Update target model + for model_param, target_param in zip(self.net.parameters(), self.target_net.parameters()): + # EWMA average model update + target_param.data.mul_(1 - self.tau).add_(model_param.data * self.tau) + + def metrics(self) -> list: + """ List of metrics to track for this learning process """ + return [ + AveragingNamedMetric("value_loss"), + AveragingNamedMetric("policy_loss"), + ] + + +class DDPGFactory(ModelFactory): + """ Factory for the DDPG policy """ + + def __init__(self, actor_net: ModelFactory, critic_net: ModelFactory, + discount_factor: float, tau: float, noise_std_dev: float, + input_net: typing.Optional[ModelFactory] = None): + self.actor_net_factory = actor_net + self.critic_net_factory = critic_net + self.input_net_factory = input_net + + self.discount_factor = discount_factor + self.tau = tau + self.noise_std_dev = noise_std_dev + + def instantiate(self, **extra_args): + """ Instantiate the model """ + action_space = extra_args.pop('action_space') + observation_space = extra_args.pop('observation_space') + + size_hint = gym_space_to_size_hint(observation_space) + action_hint = gym_space_to_size_hint(action_space) + + if self.input_net_factory is None: + target_input_net = input_net = nn.Identity() + else: + input_net = self.input_net_factory.instantiate(size_hint=size_hint, **extra_args) + target_input_net = self.input_net_factory.instantiate(size_hint=size_hint, **extra_args) + size_hint = input_net.size_hints() + + critic_size_hint = SizeHints((size_hint.unwrap(), action_hint.unwrap())) + + actor_net = self.actor_net_factory.instantiate(size_hint=size_hint, **extra_args) + critic_net = self.critic_net_factory.instantiate(size_hint=critic_size_hint, **extra_args) + + net = ActorCriticPolicy( + input_net, actor_net, critic_net, action_space + ) + + target_actor_net = self.actor_net_factory.instantiate(size_hint=size_hint, **extra_args) + target_critic_net = self.critic_net_factory.instantiate(size_hint=critic_size_hint, **extra_args) + + target_net = ActorCriticPolicy( + target_input_net, target_actor_net, target_critic_net, action_space + ) + + return DDPG( + net=net, + target_net=target_net, + action_space=action_space, + discount_factor=self.discount_factor, + tau=self.tau, + noise_std_dev=self.noise_std_dev + ) + + +def create(actor_net: ModelFactory, critic_net: ModelFactory, + discount_factor: float, tau: float, noise_std_dev: float, + input_net: typing.Optional[ModelFactory] = None + ): + """ Vel factory function """ + return DDPGFactory( + actor_net=actor_net, + critic_net=critic_net, + input_net=input_net, + discount_factor=discount_factor, + tau=tau, + noise_std_dev=noise_std_dev + ) diff --git a/vel/rl/policy/dqn.py b/vel/rl/policy/dqn.py index cb563e5d..243bb1e2 100644 --- a/vel/rl/policy/dqn.py +++ b/vel/rl/policy/dqn.py @@ -6,13 +6,13 @@ import torch.nn.functional as F import torch.nn.utils -from vel.api import ModelFactory, BackboneNetwork, BatchInfo, Schedule +from vel.api import ModelFactory, BackboneNetwork, BatchInfo, Schedule, OptimizerFactory, VelOptimizer from vel.function.constant import ConstantSchedule from vel.metric import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout from vel.rl.module.q_policy import QPolicy from vel.rl.module.noise.eps_greedy import EpsGreedy -from vel.util.situational import observation_space_to_size_hint +from vel.util.situational import gym_space_to_size_hint class DQN(RlPolicy): @@ -25,6 +25,7 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa self.model = QPolicy(net=net, action_space=action_space, dueling_dqn=dueling_dqn) self.target_model = QPolicy(net=target_net, action_space=action_space, dueling_dqn=dueling_dqn) + self.target_model.requires_grad_(False) self.double_dqn = double_dqn self.target_update_frequency = target_update_frequency @@ -37,6 +38,11 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa self.epsilon_value = self.epsilon_schedule.value(0.0) self.action_noise = EpsGreedy(action_space=action_space) + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + """ Create optimizer for the purpose of optimizing this model """ + parameters = filter(lambda p: p.requires_grad, self.model.parameters()) + return optimizer_factory.instantiate(parameters) + def train(self, mode=True): """ Override train to make sure target model is always in eval mode """ self.model.train(mode) @@ -142,7 +148,7 @@ def instantiate(self, **extra_args): action_space = extra_args.pop('action_space') observation_space = extra_args.pop('observation_space') - size_hint = observation_space_to_size_hint(observation_space) + size_hint = gym_space_to_size_hint(observation_space) net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) target_net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) diff --git a/vel/rl/policy/ppo.py b/vel/rl/policy/ppo.py index ea09a29c..92a8cd23 100644 --- a/vel/rl/policy/ppo.py +++ b/vel/rl/policy/ppo.py @@ -4,7 +4,7 @@ import numbers from vel.api import BatchInfo, ModelFactory, BackboneNetwork -from vel.util.situational import observation_space_to_size_hint +from vel.util.situational import gym_space_to_size_hint from vel.util.stats import explained_variance from vel.function.constant import ConstantSchedule from vel.metric.base import AveragingNamedMetric @@ -169,7 +169,7 @@ def instantiate(self, **extra_args): action_space = extra_args.pop('action_space') observation_space = extra_args.pop('observation_space') - size_hint = observation_space_to_size_hint(observation_space) + size_hint = gym_space_to_size_hint(observation_space) net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) diff --git a/vel/rl/policy/purgatory/ddpg.py b/vel/rl/policy/purgatory/ddpg.py deleted file mode 100644 index 1e47b5a0..00000000 --- a/vel/rl/policy/purgatory/ddpg.py +++ /dev/null @@ -1,94 +0,0 @@ -import torch -import typing -import torch.autograd -import torch.nn.functional as F - -from vel.rl.api import OptimizerAlgoBase -from vel.metric.base import AveragingNamedMetric - - -class DeepDeterministicPolicyGradient(OptimizerAlgoBase): - """ Deep Deterministic Policy Gradient (DDPG) - policy gradient calculations """ - - def __init__(self, model_factory, discount_factor: float, tau: float, max_grad_norm: typing.Optional[float] = None): - super().__init__(max_grad_norm) - - self.model_factory = model_factory - self.tau = tau - self.discount_factor = discount_factor - - self.target_model = None - - def initialize(self, training_info, model, environment, device): - """ Initialize algo from reinforcer settings """ - self.target_model = self.model_factory.instantiate(action_space=environment.action_space).to(device) - self.target_model.load_state_dict(model.state_dict()) - self.target_model.eval() - - def calculate_gradient(self, batch_info, device, model, rollout): - """ Calculate loss of the supplied rollout """ - rollout = rollout.to_transitions() - - dones = rollout.batch_tensor('dones') - rewards = rollout.batch_tensor('rewards') - observations_next = rollout.batch_tensor('observations_next') - actions = rollout.batch_tensor('actions') - observations = rollout.batch_tensor('observations') - - # Calculate value loss - or critic loss - with torch.no_grad(): - target_next_value = self.target_model.value(observations_next) - target_value = rewards + (1.0 - dones) * self.discount_factor * target_next_value - - # Value estimation error vs the target network - model_value = model.value(observations, actions) - value_loss = F.mse_loss(model_value, target_value) - - # It may seem a bit tricky what I'm doing here, but the underlying idea is simple - # All other implementations I found keep two separate optimizers for actor and critic - # and update them separately - # What I'm trying to do is to optimize them both with a single optimizer - # but I need to make sure gradients flow correctly - # From critic loss to critic network only and from actor loss to actor network only - - # Backpropagate value loss to critic only - value_loss.backward() - - model_action = model.action(observations) - model_action_value = model.value(observations, model_action) - - policy_loss = -model_action_value.mean() - - model_action_grad = torch.autograd.grad(policy_loss, model_action)[0] - - # Backpropagate actor loss to actor only - model_action.backward(gradient=model_action_grad) - - return { - 'policy_loss': policy_loss.item(), - 'value_loss': value_loss.item(), - } - - def post_optimization_step(self, batch_info, device, model, rollout): - """ Steps to take after optimization has been done""" - # Update target model - for model_param, target_param in zip(model.parameters(), self.target_model.parameters()): - # EWMA average model update - target_param.data.mul_(1 - self.tau).add_(model_param.data * self.tau) - - def metrics(self) -> list: - """ List of metrics to track for this learning process """ - return [ - AveragingNamedMetric("value_loss"), - AveragingNamedMetric("policy_loss"), - ] - - -def create(model, discount_factor: float, tau: float, max_grad_norm: float = None): - """ Vel factory function """ - return DeepDeterministicPolicyGradient( - tau=tau, - discount_factor=discount_factor, - model_factory=model, - max_grad_norm=max_grad_norm - ) diff --git a/vel/rl/policy/rainbow.py b/vel/rl/policy/rainbow.py index c28c3b29..968588c0 100644 --- a/vel/rl/policy/rainbow.py +++ b/vel/rl/policy/rainbow.py @@ -2,11 +2,11 @@ import torch import torch.nn.utils -from vel.api import ModelFactory, BackboneNetwork, BatchInfo +from vel.api import ModelFactory, BackboneNetwork, BatchInfo, OptimizerFactory, VelOptimizer from vel.metric import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout from vel.rl.module.rainbow_policy import RainbowPolicy -from vel.util.situational import observation_space_to_size_hint +from vel.util.situational import gym_space_to_size_hint class Rainbow(RlPolicy): @@ -36,6 +36,7 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa initial_std_dev=initial_std_dev, factorized_noise=factorized_noise ) + self.target_model.requires_grad_(False) self.discount_factor = discount_factor self.target_update_frequency = target_update_frequency @@ -49,6 +50,11 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa self.register_buffer('support_atoms', self.model.support_atoms.clone()) self.atom_delta = self.model.atom_delta + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + """ Create optimizer for the purpose of optimizing this model """ + parameters = filter(lambda p: p.requires_grad, self.model.parameters()) + return optimizer_factory.instantiate(parameters) + def train(self, mode=True): """ Override train to make sure target model is always in eval mode """ self.model.train(mode) @@ -214,7 +220,7 @@ def instantiate(self, **extra_args): action_space = extra_args.pop('action_space') observation_space = extra_args.pop('observation_space') - size_hint = observation_space_to_size_hint(observation_space) + size_hint = gym_space_to_size_hint(observation_space) # TODO(jerry): Push noisy net parameters down the stack here net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) diff --git a/vel/rl/policy/trpo.py b/vel/rl/policy/trpo.py index 8c041c6f..c7fe2cda 100644 --- a/vel/rl/policy/trpo.py +++ b/vel/rl/policy/trpo.py @@ -16,7 +16,7 @@ from vel.rl.discount_bootstrap import discount_bootstrap_gae from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head from vel.rl.module.head.value_head import ValueHead -from vel.util.situational import observation_space_to_size_hint +from vel.util.situational import gym_space_to_size_hint def p2v(params): @@ -375,7 +375,7 @@ def instantiate(self, **extra_args): action_space = extra_args.pop('action_space') observation_space = extra_args.pop('observation_space') - size_hint = observation_space_to_size_hint(observation_space) + size_hint = gym_space_to_size_hint(observation_space) if self.input_net is None: input_net = None diff --git a/vel/rl/util/actor.py b/vel/rl/util/actor.py index 55b3950b..e23097d8 100644 --- a/vel/rl/util/actor.py +++ b/vel/rl/util/actor.py @@ -26,13 +26,14 @@ def act(self, observation, advance_state=True, deterministic=False): return result - def reset_states(self, dones): + def reset_states(self, dones: torch.Tensor): """ Reset states given dones """ + self.policy.reset_episodic_state(dones) + if not self.policy.is_stateful: return dones = dones.to(self.device) - self.state = self.policy.reset_state(self.state, dones) def value(self, observation): diff --git a/vel/rl/xpolicy/purgatory/deterministic_policy.py b/vel/rl/xpolicy/purgatory/deterministic_policy.py deleted file mode 100644 index 58d908fe..00000000 --- a/vel/rl/xpolicy/purgatory/deterministic_policy.py +++ /dev/null @@ -1,164 +0,0 @@ -import gym -import itertools as it -import torch -import typing - -from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Evaluator, RlPolicy -from vel.rl.module.deterministic_action_head import DeterministicActionHead -from vel.rl.module.deterministic_critic_head import DeterministicCriticHead - - -class DeterministicPolicyEvaluator(Evaluator): - """ Evaluator for DeterministicPolicyModel """ - - def __init__(self, model: 'DeterministicPolicyModel', rollout: Rollout): - super().__init__(rollout) - - self.model = model - - @Evaluator.provides('model:values_next') - def model_estimated_values_next(self): - """ Estimate state-value of the transition next state """ - observations = self.get('rollout:observations_next') - action, value = self.model(observations) - return value - - @Evaluator.provides('model:actions') - def model_actions(self): - """ Estimate state-value of the transition next state """ - observations = self.get('rollout:observations') - model_action = self.model.action(observations) - return model_action - - @Evaluator.provides('model:model_action:q') - def model_model_action_q(self): - observations = self.get('rollout:observations') - model_actions = self.get('model:actions') - return self.model.value(observations, model_actions) - - @Evaluator.provides('model:action:q') - def model_action_q(self): - observations = self.get('rollout:observations') - rollout_actions = self.get('rollout:actions') - return self.model.value(observations, rollout_actions) - - -class DeterministicPolicyModel(RlPolicy): - """ Deterministic Policy Gradient - model """ - - def __init__(self, input_block: BackboneModel, policy_backbone: LinearBackboneModel, - value_backbone: LinearBackboneModel, action_space: gym.Space): - super().__init__() - - self.input_block = input_block - self.policy_backbone = policy_backbone - self.value_backbone = value_backbone - - self.action_head = DeterministicActionHead(self.policy_backbone.output_dim, action_space) - self.critic_head = DeterministicCriticHead(self.value_backbone.output_dim) - - def reset_weights(self): - """ Initialize properly model weights """ - self.input_block.reset_weights() - self.policy_backbone.reset_weights() - self.value_backbone.reset_weights() - self.action_head.reset_weights() - self.critic_head.reset_weights() - - def forward(self, observations, input_actions=None): - """ Calculate model outputs """ - observations = self.input_block(observations) - - if input_actions is not None: - actions = input_actions - - value_input = torch.cat([observations, actions], dim=1) - value_hidden = self.value_backbone(value_input) - - values = self.critic_head(value_hidden) - else: - policy_hidden = self.policy_backbone(observations) - actions = self.action_head(policy_hidden) - - value_input = torch.cat([observations, actions], dim=1) - value_hidden = self.value_backbone(value_input) - - values = self.critic_head(value_hidden) - - return actions, values - - def policy_parameters(self): - """ Parameters of policy """ - return it.chain(self.policy_backbone.parameters(), self.action_head.parameters()) - - def value_parameters(self): - """ Parameters of policy """ - return it.chain(self.value_backbone.parameters(), self.critic_head.parameters()) - - def get_layer_groups(self): - """ Return layers grouped """ - return [ - [self.policy_backbone, self.action_head], - [self.value_backbone, [y for (x, y) in self.critic_head.named_parameters() if x.endswith('bias')]], - # OpenAI regularizes only weight on the last layer. I'm just replicating that - [[y for (x, y) in self.critic_head.named_parameters() if x.endswith('weight')]] - ] - - def step(self, observations): - """ Select actions based on model's output """ - action, value = self(observations) - - return { - 'actions': action, - 'values': value - } - - def value(self, observation, input_actions=None): - """ Calculate value for given state """ - action, value = self(observation, input_actions) - return value - - def action(self, observations): - """ Calculate value for given state """ - observations = self.input_block(observations) - policy_hidden = self.policy_backbone(observations) - action = self.action_head(policy_hidden) - return action - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return DeterministicPolicyEvaluator(self, rollout) - - -class DeterministicPolicyModelFactory(ModelFactory): - """ Factory class for policy gradient models """ - def __init__(self, input_block: ModelFactory, policy_backbone: ModelFactory, value_backbone: ModelFactory): - self.input_block = input_block - self.policy_backbone = policy_backbone - self.value_backbone = value_backbone - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - policy_backbone = self.policy_backbone.instantiate(**extra_args) - value_backbone = self.value_backbone.instantiate(**extra_args) - - return DeterministicPolicyModel( - input_block=input_block, - policy_backbone=policy_backbone, - value_backbone=value_backbone, - action_space=extra_args['action_space'], - ) - - -def create(policy_backbone: ModelFactory, value_backbone: ModelFactory, - input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return DeterministicPolicyModelFactory( - input_block=input_block, policy_backbone=policy_backbone, value_backbone=value_backbone - ) diff --git a/vel/util/situational.py b/vel/util/situational.py index c0f9c55a..4a4cb402 100644 --- a/vel/util/situational.py +++ b/vel/util/situational.py @@ -30,7 +30,7 @@ def process_environment_settings(default_dictionary: dict, settings: typing.Opti return result_dict -def observation_space_to_size_hint(space: gym.Space) -> SizeHints: +def gym_space_to_size_hint(space: gym.Space) -> SizeHints: """ Convert Gym observation space to size hints """ if isinstance(space, gym.spaces.Box): return size_hint_from_shape(space.shape) From d6d286a21e9863601c75ff70b4c1b544bd13736f Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 14:18:34 -0700 Subject: [PATCH 114/162] Standardized model naming. --- vel/rl/policy/a2c.py | 14 +++++++------- vel/rl/policy/acer.py | 30 +++++++++++++++--------------- vel/rl/policy/dqn.py | 30 +++++++++++++++--------------- vel/rl/policy/ppo.py | 14 +++++++------- vel/rl/policy/rainbow.py | 34 +++++++++++++++++----------------- 5 files changed, 61 insertions(+), 61 deletions(-) diff --git a/vel/rl/policy/a2c.py b/vel/rl/policy/a2c.py index 299e8c2b..ba739527 100644 --- a/vel/rl/policy/a2c.py +++ b/vel/rl/policy/a2c.py @@ -23,23 +23,23 @@ def __init__(self, net: BackboneNetwork, action_space: gym.Space, self.value_coefficient = value_coefficient self.gae_lambda = gae_lambda - self.policy = StochasticPolicy(net, action_space) + self.net = StochasticPolicy(net, action_space) def reset_weights(self): """ Initialize properly model weights """ - self.policy.reset_weights() + self.net.reset_weights() def forward(self, observation, state=None): """ Calculate model outputs """ - return self.policy(observation) + return self.net(observation) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ action_pd_params, value_output = self(observation) - actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + actions = self.net.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action - logprobs = self.policy.action_head.logprob(actions, action_pd_params) + logprobs = self.net.action_head.logprob(actions, action_pd_params) return { 'actions': actions, @@ -79,8 +79,8 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: pd_params, model_values = self(observations) - log_probs = self.policy.action_head.logprob(actions, pd_params) - entropy = self.policy.action_head.entropy(pd_params) + log_probs = self.net.action_head.logprob(actions, pd_params) + entropy = self.net.action_head.entropy(pd_params) # Actual calculations. Pretty trivial policy_loss = -torch.mean(advantages * log_probs) diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index e1120fa7..433a4ea2 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -36,44 +36,44 @@ def __init__(self, net: BackboneNetwork, target_net: typing.Optional[BackboneNet self.average_model_alpha = average_model_alpha self.trust_region_delta = trust_region_delta - self.policy = QStochasticPolicy(net, action_space) + self.net = QStochasticPolicy(net, action_space) if self.trust_region: - self.target_policy = QStochasticPolicy(target_net, action_space) - self.target_policy.requires_grad_(False) + self.target_net = QStochasticPolicy(target_net, action_space) + self.target_net.requires_grad_(False) else: - self.target_policy = None + self.target_net = None def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: """ Create optimizer for the purpose of optimizing this model """ - parameters = filter(lambda p: p.requires_grad, self.policy.parameters()) + parameters = filter(lambda p: p.requires_grad, self.net.parameters()) return optimizer_factory.instantiate(parameters) def train(self, mode=True): """ Override train to make sure target model is always in eval mode """ - self.policy.train(mode) + self.net.train(mode) if self.trust_region: - self.target_policy.train(False) + self.target_net.train(False) def reset_weights(self): """ Initialize properly model weights """ - self.policy.reset_weights() + self.net.reset_weights() if self.trust_region: - self.target_policy.load_state_dict(self.policy.state_dict()) + self.target_net.load_state_dict(self.net.state_dict()) def forward(self, observation, state=None): """ Calculate model outputs """ - return self.policy(observation) + return self.net(observation) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ logprobs, q = self(observation) - actions = self.policy.action_head.sample(logprobs, deterministic=deterministic) + actions = self.net.action_head.sample(logprobs, deterministic=deterministic) # log likelihood of selected action - action_logprobs = self.policy.action_head.logprob(actions, logprobs) + action_logprobs = self.net.action_head.logprob(actions, logprobs) values = (torch.exp(logprobs) * q).sum(dim=1) return { @@ -86,7 +86,7 @@ def act(self, observation, state=None, deterministic=False): def update_target_policy(self): """ Update weights of the average model with new model observation """ - for model_param, average_param in zip(self.policy.parameters(), self.target_policy.parameters()): + for model_param, average_param in zip(self.net.parameters(), self.target_net.parameters()): # EWMA average model update average_param.data.mul_(self.average_model_alpha).add_(model_param.data * (1 - self.average_model_alpha)) @@ -145,7 +145,7 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: explained_variance = 1 - torch.var(q_retraced - action_q) / torch.var(q_retraced) # Entropy of the policy distribution - policy_entropy = torch.mean(self.policy.action_head.entropy(logprobs)) + policy_entropy = torch.mean(self.net.action_head.entropy(logprobs)) policy_gradient_loss = -torch.mean(advantages * importance_sampling_coefficient * action_logprobs) # Policy gradient bias correction @@ -167,7 +167,7 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: if self.trust_region: with torch.no_grad(): - target_logprobs = self.target_policy(observations)[0] + target_logprobs = self.target_net(observations)[0] actor_loss = policy_loss - self.entropy_coefficient * policy_entropy q_loss = self.q_coefficient * q_function_loss diff --git a/vel/rl/policy/dqn.py b/vel/rl/policy/dqn.py index 243bb1e2..9bc8dd64 100644 --- a/vel/rl/policy/dqn.py +++ b/vel/rl/policy/dqn.py @@ -23,9 +23,9 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa dueling_dqn: bool, target_update_frequency: int): super().__init__(discount_factor) - self.model = QPolicy(net=net, action_space=action_space, dueling_dqn=dueling_dqn) - self.target_model = QPolicy(net=target_net, action_space=action_space, dueling_dqn=dueling_dqn) - self.target_model.requires_grad_(False) + self.net = QPolicy(net=net, action_space=action_space, dueling_dqn=dueling_dqn) + self.target_net = QPolicy(net=target_net, action_space=action_space, dueling_dqn=dueling_dqn) + self.target_net.requires_grad_(False) self.double_dqn = double_dqn self.target_update_frequency = target_update_frequency @@ -40,27 +40,27 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: """ Create optimizer for the purpose of optimizing this model """ - parameters = filter(lambda p: p.requires_grad, self.model.parameters()) + parameters = filter(lambda p: p.requires_grad, self.net.parameters()) return optimizer_factory.instantiate(parameters) def train(self, mode=True): """ Override train to make sure target model is always in eval mode """ - self.model.train(mode) - self.target_model.train(False) + self.net.train(mode) + self.target_net.train(False) def reset_weights(self): """ Initialize properly model weights """ - self.model.reset_weights() - self.target_model.load_state_dict(self.model.state_dict()) + self.net.reset_weights() + self.target_net.load_state_dict(self.net.state_dict()) def forward(self, observation, state=None): """ Calculate model outputs """ - return self.model(observation) + return self.net(observation) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ - q_values = self.model(observation) - actions = self.model.q_head.sample(q_values) + q_values = self.net(observation) + actions = self.net.q_head.sample(q_values) noisy_actions = self.action_noise(actions, epsilon=self.epsilon_value, deterministic=deterministic) return { @@ -79,14 +79,14 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: assert dones_tensor.dtype == torch.float32 - q = self.model(observations) + q = self.net(observations) with torch.no_grad(): - target_q = self.target_model(observations_next) + target_q = self.target_net(observations_next) if self.double_dqn: # DOUBLE DQN - model_q_next = self.model(observations_next) + model_q_next = self.net(observations_next) # Select largest 'target' value based on action that 'model' selects values = target_q.gather(1, model_q_next.argmax(dim=1, keepdim=True)).squeeze(1) else: @@ -120,7 +120,7 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: def post_optimization_step(self, batch_info, rollout): """ Steps to take after optimization has been done""" if batch_info.aggregate_batch_number % self.target_update_frequency == 0: - self.target_model.load_state_dict(self.model.state_dict()) + self.target_net.load_state_dict(self.net.state_dict()) self.epsilon_value = self.epsilon_schedule.value(batch_info['progress']) diff --git a/vel/rl/policy/ppo.py b/vel/rl/policy/ppo.py index 92a8cd23..1c922f10 100644 --- a/vel/rl/policy/ppo.py +++ b/vel/rl/policy/ppo.py @@ -31,23 +31,23 @@ def __init__(self, net: BackboneNetwork, action_space: gym.Space, else: self.cliprange = cliprange - self.policy = StochasticPolicy(net, action_space) + self.net = StochasticPolicy(net, action_space) def reset_weights(self): """ Initialize properly model weights """ - self.policy.reset_weights() + self.net.reset_weights() def forward(self, observation, state=None): """ Calculate model outputs """ - return self.policy(observation) + return self.net(observation) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ action_pd_params, value_output = self(observation) - actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + actions = self.net.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action - logprobs = self.policy.action_head.logprob(actions, action_pd_params) + logprobs = self.net.action_head.logprob(actions, action_pd_params) return { 'actions': actions, @@ -91,8 +91,8 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: # PART 0.1 - Model evaluation pd_params, model_values = self(observations) - model_action_logprobs = self.policy.action_head.logprob(actions, pd_params) - entropy = self.policy.action_head.entropy(pd_params) + model_action_logprobs = self.net.action_head.logprob(actions, pd_params) + entropy = self.net.action_head.entropy(pd_params) # Select the cliprange current_cliprange = self.cliprange.value(batch_info['progress']) diff --git a/vel/rl/policy/rainbow.py b/vel/rl/policy/rainbow.py index 968588c0..f8693131 100644 --- a/vel/rl/policy/rainbow.py +++ b/vel/rl/policy/rainbow.py @@ -17,7 +17,7 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa initial_std_dev: float = 0.4, factorized_noise: bool = True): super().__init__(discount_factor) - self.model = RainbowPolicy( + self.net = RainbowPolicy( net=net, action_space=action_space, vmin=vmin, @@ -27,7 +27,7 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa factorized_noise=factorized_noise ) - self.target_model = RainbowPolicy( + self.target_net = RainbowPolicy( net=target_net, action_space=action_space, vmin=vmin, @@ -36,7 +36,7 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa initial_std_dev=initial_std_dev, factorized_noise=factorized_noise ) - self.target_model.requires_grad_(False) + self.target_net.requires_grad_(False) self.discount_factor = discount_factor self.target_update_frequency = target_update_frequency @@ -47,34 +47,34 @@ def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_spa # self.support_atoms = self.model.q # self.atom_delta = histogram_info['atom_delta'] - self.register_buffer('support_atoms', self.model.support_atoms.clone()) - self.atom_delta = self.model.atom_delta + self.register_buffer('support_atoms', self.net.support_atoms.clone()) + self.atom_delta = self.net.atom_delta def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: """ Create optimizer for the purpose of optimizing this model """ - parameters = filter(lambda p: p.requires_grad, self.model.parameters()) + parameters = filter(lambda p: p.requires_grad, self.net.parameters()) return optimizer_factory.instantiate(parameters) def train(self, mode=True): """ Override train to make sure target model is always in eval mode """ - self.model.train(mode) - self.target_model.train(False) + self.net.train(mode) + self.target_net.train(False) def reset_weights(self): """ Initialize properly model weights """ - self.model.reset_weights() - self.target_model.load_state_dict(self.model.state_dict()) + self.net.reset_weights() + self.target_net.load_state_dict(self.net.state_dict()) def forward(self, observation, state=None): """ Calculate model outputs """ - return self.model(observation) + return self.net(observation) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ self.train(mode=not deterministic) - q_values = self.model(observation) - actions = self.model.q_head.sample(q_values) + q_values = self.net(observation) + actions = self.net.q_head.sample(q_values) return { 'actions': actions, @@ -94,13 +94,13 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: assert dones_tensor.dtype == torch.float32 - q = self.model(observations) + q = self.net(observations) with torch.no_grad(): # DOUBLE DQN # Histogram gets returned as logits initially, we need to exp it before projection - target_value_histogram_for_all_actions = self.target_model(observations_next).exp() - model_value_histogram_for_all_actions = self.model(observations_next).exp() + target_value_histogram_for_all_actions = self.target_net(observations_next).exp() + model_value_histogram_for_all_actions = self.net(observations_next).exp() atoms_aligned = self.support_atoms.view(1, 1, self.num_atoms) @@ -192,7 +192,7 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: def post_optimization_step(self, batch_info, rollout): """ Steps to take after optimization has been done""" if batch_info.aggregate_batch_number % self.target_update_frequency == 0: - self.target_model.load_state_dict(self.model.state_dict()) + self.target_net.load_state_dict(self.net.state_dict()) def metrics(self) -> list: """ List of metrics to track for this learning process """ From 5d05e58bb6a7229c29c30e780297eadf005c19fe Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 14:19:37 -0700 Subject: [PATCH 115/162] get_layer_groups -> layer_groups --- vel/model/imagenet/resnet34.py | 4 ++-- vel/model/rnn/multilayer_rnn_sequence_classification.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/vel/model/imagenet/resnet34.py b/vel/model/imagenet/resnet34.py index 18bdd667..f9901eba 100644 --- a/vel/model/imagenet/resnet34.py +++ b/vel/model/imagenet/resnet34.py @@ -77,7 +77,7 @@ def unfreeze(self): for idx, child in enumerate(self.model.children()): mu.unfreeze_layer(child) - def get_layer_groups(self): + def layer_groups(self): """ Return layers grouped """ g1 = list(self.model[:self.group_cut_layers[0]]) g2 = list(self.model[self.group_cut_layers[0]:self.group_cut_layers[1]]) @@ -85,7 +85,7 @@ def get_layer_groups(self): return [g1, g2, g3] def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: - parameters = mu.to_parameter_groups(self.get_layer_groups()) + parameters = mu.to_parameter_groups(self.layer_groups()) return optimizer_factory.instantiate_parameter_groups(parameters) def forward(self, x): diff --git a/vel/model/rnn/multilayer_rnn_sequence_classification.py b/vel/model/rnn/multilayer_rnn_sequence_classification.py index d19f40f3..20f40706 100644 --- a/vel/model/rnn/multilayer_rnn_sequence_classification.py +++ b/vel/model/rnn/multilayer_rnn_sequence_classification.py @@ -123,7 +123,7 @@ def forward(self, sequence): return self.output_activation(data) - def get_layer_groups(self): + def layer_groups(self): return [ self.input_block, self.rnn_layers, @@ -133,7 +133,7 @@ def get_layer_groups(self): def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: """ Create optimizer for the purpose of optimizing this model """ - parameters = mu.to_parameter_groups(self.get_layer_groups()) + parameters = mu.to_parameter_groups(self.layer_groups()) return optimizer_factory.instantiate_parameter_groups(parameters) @property From bfba7cc49a4e99b8903d2dab158d643a633191dd Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 15:56:41 -0700 Subject: [PATCH 116/162] Brought back the RNN RL training. --- .../atari/{purgatory => }/atari_a2c_lstm.yaml | 24 ++-- .../atari/{purgatory => }/atari_ppo_gru.yaml | 26 ++--- vel/module/rnn_cell.py | 61 ---------- vel/net/modular.py | 28 ++++- vel/rl/env_roller/step_env_roller.py | 18 ++- .../trajectory_replay_env_roller.py | 2 +- .../layer/{purgatory => }/nature_cnn_rnn.py | 3 + vel/rl/layer/rnn_cell.py | 105 ++++++++++++++++++ vel/rl/module/stochastic_rnn_policy.py | 76 +++++++++++++ vel/rl/policy/a2c.py | 2 +- vel/rl/policy/{purgatory => }/a2c_rnn.py | 71 +++++++----- vel/rl/policy/acer.py | 2 +- vel/rl/policy/ppo.py | 2 +- vel/rl/policy/{purgatory => }/ppo_rnn.py | 76 ++++++++----- vel/rl/policy/purgatory/__init__.py | 0 vel/rl/policy/trpo.py | 2 +- vel/rl/xpolicy/stochastic_rnn_policy.py | 99 ----------------- vel/util/datastructure.py | 21 ++++ vel/util/tensor_util.py | 6 +- 19 files changed, 366 insertions(+), 258 deletions(-) rename examples-configs/rl/atari/{purgatory => }/atari_a2c_lstm.yaml (76%) rename examples-configs/rl/atari/{purgatory => }/atari_ppo_gru.yaml (79%) delete mode 100644 vel/module/rnn_cell.py rename vel/rl/layer/{purgatory => }/nature_cnn_rnn.py (95%) create mode 100644 vel/rl/layer/rnn_cell.py create mode 100644 vel/rl/module/stochastic_rnn_policy.py rename vel/rl/policy/{purgatory => }/a2c_rnn.py (70%) rename vel/rl/policy/{purgatory => }/ppo_rnn.py (79%) delete mode 100644 vel/rl/policy/purgatory/__init__.py delete mode 100644 vel/rl/xpolicy/stochastic_rnn_policy.py create mode 100644 vel/util/datastructure.py diff --git a/examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml b/examples-configs/rl/atari/atari_a2c_lstm.yaml similarity index 76% rename from examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml rename to examples-configs/rl/atari/atari_a2c_lstm.yaml index 4db60264..25476fed 100644 --- a/examples-configs/rl/atari/purgatory/atari_a2c_lstm.yaml +++ b/examples-configs/rl/atari/atari_a2c_lstm.yaml @@ -12,24 +12,22 @@ vec_env: model: - name: vel.rl.algo.a2c_rnn + name: vel.rl.policy.a2c_rnn entropy_coefficient: 0.01 value_coefficient: 0.5 discount_factor: 0.99 - policy: - name: vel.rl.policy.stochastic_rnn_policy - - input_block: - name: vel.module.input.image_to_tensor - - backbone: - name: vel.rl.backbone.nature_cnn_rnn - input_width: 84 - input_height: 84 - input_channels: 1 # The same as frame_history - rnn_type: 'lstm' + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + - name: vel.rl.layer.nature_cnn + - name: vel.rl.layer.rnn_cell + hidden_size: 512 + rnn_type: 'lstm' + - name: vel.net.layer.util.repeat + times: 2 # Need to repeat output twice, for action and value heads reinforcer: diff --git a/examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml b/examples-configs/rl/atari/atari_ppo_gru.yaml similarity index 79% rename from examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml rename to examples-configs/rl/atari/atari_ppo_gru.yaml index afea6850..81d7af1e 100644 --- a/examples-configs/rl/atari/purgatory/atari_ppo_gru.yaml +++ b/examples-configs/rl/atari/atari_ppo_gru.yaml @@ -11,7 +11,7 @@ vec_env: model: - name: vel.rl.algo.ppo_rnn + name: vel.rl.policy.ppo_rnn entropy_coefficient: 0.01 value_coefficient: 0.5 @@ -24,20 +24,16 @@ model: initial_value: 0.1 final_value: 0.0 - policy: - name: vel.rl.policy.stochastic_rnn_policy - - input_block: - name: vel.module.input.image_to_tensor - - backbone: - name: vel.rl.backbone.nature_cnn_rnn - rnn_type: 'gru' - hidden_units: 512 - - input_width: 84 - input_height: 84 - input_channels: 1 # The same as frame_history + net: + name: vel.net.modular + layers: + - name: vel.net.layer.input.image_to_tensor + - name: vel.rl.layer.nature_cnn + - name: vel.rl.layer.rnn_cell + hidden_size: 512 + rnn_type: 'gru' + - name: vel.net.layer.util.repeat + times: 2 # Need to repeat output twice, for action and value heads reinforcer: diff --git a/vel/module/rnn_cell.py b/vel/module/rnn_cell.py deleted file mode 100644 index 5ce58867..00000000 --- a/vel/module/rnn_cell.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.init as init - - -from vel.api import LinearBackboneModel - - -class RnnCell(LinearBackboneModel): - """ Generalization of RNN cell (Simple RNN, LSTM or GRU) """ - - def __init__(self, input_size, hidden_size, rnn_type, bias=True, nonlinearity='tanh'): - super().__init__() - - assert rnn_type in {'rnn', 'lstm', 'gru'}, "Rnn type {} is not supported".format(rnn_type) - - self.input_size = input_size - self.hidden_size = hidden_size - self.rnn_type = rnn_type - - if self.rnn_type == 'rnn': - self.rnn_cell = nn.RNNCell( - input_size=input_size, hidden_size=hidden_size, bias=bias, nonlinearity=nonlinearity - ) - elif self.rnn_type == 'lstm': - self.rnn_cell = nn.LSTMCell(input_size=input_size, hidden_size=hidden_size, bias=bias) - elif self.rnn_type == 'gru': - self.rnn_cell = nn.GRUCell(input_size=input_size, hidden_size=hidden_size, bias=bias) - - def reset_weights(self): - init.xavier_normal_(self.rnn_cell.weight_hh) - init.xavier_normal_(self.rnn_cell.weight_ih) - init.zeros_(self.rnn_cell.bias_ih) - init.zeros_(self.rnn_cell.bias_hh) - - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - return self.hidden_size - - @property - def state_dim(self) -> int: - """ Dimension of model state """ - if self.rnn_type == 'lstm': - return 2 * self.hidden_size - else: - return self.hidden_size - - def zero_state(self, batch_size): - """ Potential state for the model """ - return torch.zeros(batch_size, self.state_dim) - - def forward(self, input_data, state): - if self.rnn_type == 'lstm': - hidden_state, cell_state = torch.split(state, self.hidden_size, 1) - hidden_state, cell_state = self.rnn_cell(input_data, (hidden_state, cell_state)) - new_state = torch.cat([hidden_state, cell_state], dim=1) - return hidden_state, new_state - else: - new_hidden_state = self.rnn_cell(input_data, state) - return new_hidden_state, new_hidden_state diff --git a/vel/net/modular.py b/vel/net/modular.py index 9416992a..c23bf0a6 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -54,7 +54,8 @@ def reset_state(self, state, dones): raise NotImplementedError def forward(self, input_data, state=None): - return self.layers(input_data) + context = {} + return self.layers(input_data, context=context) class StatefulModularNetwork(BackboneNetwork): @@ -80,14 +81,33 @@ def size_hints(self) -> SizeHints: def zero_state(self, batch_size): """ Potential state for the model """ - raise NotImplementedError + zero_state = {} + + for l in self.layers: + layer_zero_state = l.zero_state(batch_size) + if layer_zero_state is not None: + zero_state.update(layer_zero_state) + + return zero_state def reset_state(self, state, dones): """ Reset the state after the episode has been terminated """ raise NotImplementedError - def forward(self, input_data, state=None): - raise NotImplementedError + def forward(self, input_data, state): + data = input_data + + context = {} + output_state = {} + + for layer in self.layers: + if layer.is_stateful: + data, new_state = layer(data, state=state, context=context) + output_state.update(new_state) + else: + data = layer(data, state=state, context=context) + + return data, output_state class ModularNetworkFactory(ModelFactory): diff --git a/vel/rl/env_roller/step_env_roller.py b/vel/rl/env_roller/step_env_roller.py index 1a6c22a5..44782283 100644 --- a/vel/rl/env_roller/step_env_roller.py +++ b/vel/rl/env_roller/step_env_roller.py @@ -5,7 +5,8 @@ from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import Trajectories, Rollout, EnvRollerBase, EnvRollerFactoryBase, RlPolicy from vel.rl.util.actor import PolicyActor -from vel.util.tensor_util import TensorAccumulator +from vel.util.tensor_util import TensorAccumulator, to_device +from vel.util.datastructure import flatten_dict class StepEnvRoller(EnvRollerBase): @@ -36,15 +37,17 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: for step_idx in range(number_of_steps): step = self.actor.act(self.last_observation.to(self.device), deterministic=False) + cpu_step = to_device(step, torch.device('cpu')) # Add step to the tensor accumulator - for name, tensor in step.items(): + for name, tensor in cpu_step.items(): + # Take not that here we convert all the tensors to CPU - accumulator.add(name, tensor.cpu()) + accumulator.add(name, tensor) accumulator.add('observations', self.last_observation) - actions_numpy = step['actions'].detach().cpu().numpy() + actions_numpy = cpu_step['actions'].detach().numpy() new_obs, new_rewards, new_dones, new_infos = self.environment.step(actions_numpy) # Done is flagged true when the episode has ended AND the frame we see is already a first frame from the @@ -63,11 +66,14 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: # Perform last agent step, without advancing the state final_obs = self.actor.act(self.last_observation.to(self.device), advance_state=False) + cpu_final_obs = to_device(final_obs, torch.device('cpu')) rollout_tensors = {} - for key, value in final_obs.items(): - rollout_tensors[f"final_{key}"] = value.cpu() + flatten_dict(cpu_final_obs, rollout_tensors, root='final') + + # for key, value in final_obs.items(): + # rollout_tensors[f"final_{key}"] = value.cpu() return Trajectories( num_steps=accumulated_tensors['observations'].size(0), diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index a9cdc06b..1a788025 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -108,7 +108,7 @@ def sample(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: final_values = self.actor.value(last_observations).cpu() # Add 'final_values' to the rollout - rollout.rollout_tensors['final_values'] = final_values + rollout.rollout_tensors['final.values'] = final_values return rollout diff --git a/vel/rl/layer/purgatory/nature_cnn_rnn.py b/vel/rl/layer/nature_cnn_rnn.py similarity index 95% rename from vel/rl/layer/purgatory/nature_cnn_rnn.py rename to vel/rl/layer/nature_cnn_rnn.py index 6dccd7c9..699e7387 100644 --- a/vel/rl/layer/purgatory/nature_cnn_rnn.py +++ b/vel/rl/layer/nature_cnn_rnn.py @@ -2,6 +2,9 @@ from vel.rl.backbone.nature_cnn import NatureCnn from vel.module.rnn_cell import RnnCell +from vel.api import SizeHint, SizeHints +from vel.net.layer_base import Layer, LayerFactory + class NatureCnnRnnBackbone(LinearBackboneModel): """ diff --git a/vel/rl/layer/rnn_cell.py b/vel/rl/layer/rnn_cell.py new file mode 100644 index 00000000..678b003f --- /dev/null +++ b/vel/rl/layer/rnn_cell.py @@ -0,0 +1,105 @@ +import torch +import torch.nn as nn +import torch.nn.init as init + + +from vel.api import SizeHint, SizeHints +from vel.net.layer_base import Layer, LayerFactory + + +class RnnCell(Layer): + """ Generalization of RNN cell (Simple RNN, LSTM or GRU) """ + + def __init__(self, name: str, input_size: int, hidden_size: int, rnn_type: str, bias: bool = True, + nonlinearity: str = 'tanh'): + super().__init__(name) + + assert rnn_type in {'rnn', 'lstm', 'gru'}, "Rnn type {} is not supported".format(rnn_type) + + self.input_size = input_size + self.hidden_size = hidden_size + self.rnn_type = rnn_type + + if self.rnn_type == 'rnn': + self.rnn_cell = nn.RNNCell( + input_size=input_size, hidden_size=hidden_size, bias=bias, nonlinearity=nonlinearity + ) + elif self.rnn_type == 'lstm': + self.rnn_cell = nn.LSTMCell(input_size=input_size, hidden_size=hidden_size, bias=bias) + elif self.rnn_type == 'gru': + self.rnn_cell = nn.GRUCell(input_size=input_size, hidden_size=hidden_size, bias=bias) + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return True + + def reset_weights(self): + init.xavier_normal_(self.rnn_cell.weight_hh) + init.xavier_normal_(self.rnn_cell.weight_ih) + init.zeros_(self.rnn_cell.bias_ih) + init.zeros_(self.rnn_cell.bias_hh) + + def size_hints(self) -> SizeHints: + return SizeHints(SizeHint(None, self.hidden_size)) + + @property + def state_dim(self) -> int: + """ Dimension of model state """ + if self.rnn_type == 'lstm': + return 2 * self.hidden_size + else: + return self.hidden_size + + def zero_state(self, batch_size): + """ Potential state for the model """ + return {self.name: torch.zeros(batch_size, self.state_dim)} + + def forward(self, input_data, state: dict, context: dict = None): + """ Forward propagation of a single layer """ + if self.rnn_type == 'lstm': + state_tensor = state[self.name] + hidden_state, cell_state = torch.split(state_tensor, self.hidden_size, 1) + hidden_state, cell_state = self.rnn_cell(input_data, (hidden_state, cell_state)) + new_state = torch.cat([hidden_state, cell_state], dim=1) + return hidden_state, {self.name: new_state} + else: + state_tensor = state[self.name] + new_hidden_state = self.rnn_cell(input_data, state_tensor) + return new_hidden_state, {self.name: new_hidden_state} + + +class RnnCellFactory(LayerFactory): + """ Factory for the RnnCell layer """ + + def __init__(self, hidden_size: int, rnn_type: str, bias: bool = True, nonlinearity: str = 'tanh'): + self.hidden_size = hidden_size + self.rnn_type = rnn_type + self.bias = bias + self.nonlinearity = nonlinearity + + @property + def name_base(self) -> str: + return "rnn_cell" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + input_size = direct_input.assert_single().last() + + return RnnCell( + name=name, + input_size=input_size, + hidden_size=self.hidden_size, + rnn_type=self.rnn_type, + bias=self.bias, + nonlinearity=self.nonlinearity + ) + + +def create(hidden_size: int, rnn_type: str, bias: bool = True, nonlinearity: str = 'tanh'): + """ Vel factory function """ + return RnnCellFactory( + hidden_size=hidden_size, + rnn_type=rnn_type, + bias=bias, + nonlinearity=nonlinearity + ) diff --git a/vel/rl/module/stochastic_rnn_policy.py b/vel/rl/module/stochastic_rnn_policy.py new file mode 100644 index 00000000..ae2e17b2 --- /dev/null +++ b/vel/rl/module/stochastic_rnn_policy.py @@ -0,0 +1,76 @@ +import gym + +from vel.api import Network, BackboneNetwork + +from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head +from vel.rl.module.head.value_head import ValueHead +from vel.util.tensor_util import to_device + + +class StochasticRnnPolicy(Network): + """ + Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone + RNN version + """ + + def __init__(self, net: BackboneNetwork, action_space: gym.Space): + super().__init__() + + self.net = net + + assert self.net.is_stateful, "Must have a stateful backbone" + + (action_size, value_size) = self.net.size_hints().assert_tuple(2) + + self.action_head = make_stockastic_action_head( + action_space=action_space, + input_dim=action_size.last(), + ) + self.value_head = ValueHead( + input_dim=value_size.last() + ) + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return True + + def zero_state(self, batch_size): + return self.net.zero_state(batch_size) + + def reset_weights(self): + """ Initialize properly model weights """ + self.net.reset_weights() + self.action_head.reset_weights() + self.value_head.reset_weights() + + def forward(self, observations, state): + """ Calculate model outputs """ + (action_hidden, value_hidden), new_state = self.net(observations, state=state) + + action_output = self.action_head(action_hidden) + value_output = self.value_head(value_hidden) + + return action_output, value_output, new_state + + def reset_state(self, state, dones): + """ Reset the state after the episode has been terminated """ + if (dones > 0).any().item(): + dones_expanded = dones.unsqueeze(-1) + + zero_state = self.net.zero_state(dones.shape[0]) + + out_state = {} + + for key in state: + state_item = state[key] + zero_state_item = zero_state[key].to(state_item.device) + + final_item = state_item * (1 - dones_expanded) + zero_state_item * dones_expanded + + out_state[key] = final_item + + return out_state + else: + return state + diff --git a/vel/rl/policy/a2c.py b/vel/rl/policy/a2c.py index ba739527..68589fdd 100644 --- a/vel/rl/policy/a2c.py +++ b/vel/rl/policy/a2c.py @@ -55,7 +55,7 @@ def process_rollout(self, rollout: Rollout) -> Rollout: rewards_buffer=rollout.transition_tensors['rewards'], dones_buffer=rollout.transition_tensors['dones'], values_buffer=rollout.transition_tensors['values'], - final_values=rollout.rollout_tensors['final_values'], + final_values=rollout.rollout_tensors['final.values'], discount_factor=self.discount_factor, gae_lambda=self.gae_lambda, number_of_steps=rollout.num_steps diff --git a/vel/rl/policy/purgatory/a2c_rnn.py b/vel/rl/policy/a2c_rnn.py similarity index 70% rename from vel/rl/policy/purgatory/a2c_rnn.py rename to vel/rl/policy/a2c_rnn.py index 523b2f13..84b4ef50 100644 --- a/vel/rl/policy/purgatory/a2c_rnn.py +++ b/vel/rl/policy/a2c_rnn.py @@ -1,17 +1,20 @@ +import gym import torch import torch.nn.functional as F +from vel.api import ModelFactory, BatchInfo, BackboneNetwork from vel.metric.base import AveragingNamedMetric -from vel.calc.function import explained_variance -from vel.api import BackboneModel, ModelFactory, BatchInfo - from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae +from vel.rl.module.stochastic_rnn_policy import StochasticRnnPolicy +from vel.util.situational import gym_space_to_size_hint +from vel.util.stats import explained_variance class A2CRnn(RlPolicy): """ Simplest policy gradient - calculate loss as an advantage of an actor versus value function """ - def __init__(self, policy: BackboneModel, entropy_coefficient, value_coefficient, discount_factor: float, + def __init__(self, net: BackboneNetwork, action_space: gym.Space, + entropy_coefficient, value_coefficient, discount_factor: float, gae_lambda=1.0): super().__init__(discount_factor) @@ -19,41 +22,40 @@ def __init__(self, policy: BackboneModel, entropy_coefficient, value_coefficient self.value_coefficient = value_coefficient self.gae_lambda = gae_lambda - self.policy = policy + self.net = StochasticRnnPolicy(net, action_space) - assert self.policy.is_stateful, "Policy must be stateful" + assert self.net.is_stateful, "Policy must be stateful" def reset_weights(self): """ Initialize properly model weights """ - self.policy.reset_weights() + self.net.reset_weights() def forward(self, observation, state=None): """ Calculate model outputs """ - return self.policy(observation, state=state) + return self.net(observation, state=state) def is_stateful(self) -> bool: - return self.policy.is_stateful + return self.net.is_stateful def zero_state(self, batch_size): - return self.policy.zero_state(batch_size) + return self.net.zero_state(batch_size) def reset_state(self, state, dones): - return self.policy.reset_state(state, dones) + return self.net.reset_state(state, dones) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ action_pd_params, value_output, next_state = self(observation, state=state) - - actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + actions = self.net.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action - logprobs = self.policy.action_head.logprob(actions, action_pd_params) + logprobs = self.net.action_head.logprob(actions, action_pd_params) return { + 'action:logprobs': logprobs, 'actions': actions, 'state': next_state, 'values': value_output, - 'action:logprobs': logprobs } def process_rollout(self, rollout: Rollout) -> Rollout: @@ -64,7 +66,7 @@ def process_rollout(self, rollout: Rollout) -> Rollout: rewards_buffer=rollout.transition_tensors['rewards'], dones_buffer=rollout.transition_tensors['dones'], values_buffer=rollout.transition_tensors['values'], - final_values=rollout.rollout_tensors['final_values'], + final_values=rollout.rollout_tensors['final.values'], discount_factor=self.discount_factor, gae_lambda=self.gae_lambda, number_of_steps=rollout.num_steps @@ -77,6 +79,18 @@ def process_rollout(self, rollout: Rollout) -> Rollout: return rollout + def _extract_initial_state(self, transition_tensors): + """ Extract initial state from the state dictionary """ + state = {} + + idx = len('state') + 1 + + for key, value in transition_tensors.items(): + if key.startswith('state'): + state[key[idx:]] = value[0] + + return state + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: """ Calculate loss of the supplied rollout """ assert isinstance(rollout, Trajectories), "For an RNN model, we must evaluate trajectories" @@ -89,7 +103,7 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: # Let's evaluate the model observations = rollout.transition_tensors['observations'] - hidden_state = rollout.transition_tensors['state'][0] # Initial hidden state + hidden_state = self._extract_initial_state(rollout.transition_tensors) dones = rollout.transition_tensors['dones'] action_accumulator = [] @@ -106,8 +120,8 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: pd_params = torch.cat(action_accumulator, dim=0) model_values = torch.cat(value_accumulator, dim=0) - log_probs = self.policy.action_head.logprob(actions, pd_params) - entropy = self.policy.action_head.entropy(pd_params) + log_probs = self.net.action_head.logprob(actions, pd_params) + entropy = self.net.action_head.entropy(pd_params) # Actual calculations. Pretty trivial policy_loss = -torch.mean(advantages * log_probs) @@ -141,8 +155,8 @@ def metrics(self) -> list: class A2CRnnFactory(ModelFactory): """ Factory class for policy gradient models """ - def __init__(self, policy, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): - self.policy = policy + def __init__(self, net_factory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): + self.net_factory = net_factory self.entropy_coefficient = entropy_coefficient self.value_coefficient = value_coefficient self.discount_factor = discount_factor @@ -150,11 +164,16 @@ def __init__(self, policy, entropy_coefficient, value_coefficient, discount_fact def instantiate(self, **extra_args): """ Instantiate the model """ - # action_space = extra_args.pop('action_space') - policy = self.policy.instantiate(**extra_args) + action_space = extra_args.pop('action_space') + observation_space = extra_args.pop('observation_space') + + size_hint = gym_space_to_size_hint(observation_space) + + net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) return A2CRnn( - policy=policy, + net=net, + action_space=action_space, entropy_coefficient=self.entropy_coefficient, value_coefficient=self.value_coefficient, discount_factor=self.discount_factor, @@ -162,10 +181,10 @@ def instantiate(self, **extra_args): ) -def create(policy: BackboneModel, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): +def create(net: ModelFactory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): """ Vel factory function """ return A2CRnnFactory( - policy=policy, + net_factory=net, entropy_coefficient=entropy_coefficient, value_coefficient=value_coefficient, discount_factor=discount_factor, diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index 433a4ea2..3d30f733 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -136,7 +136,7 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: action_q.reshape(trajectory_rewards.size()), model_state_values.reshape(trajectory_rewards.size()), actions_rho.reshape(trajectory_rewards.size()), - rollout.rollout_tensors['final_values'] + rollout.rollout_tensors['final.values'] ).flatten() advantages = q_retraced - model_state_values diff --git a/vel/rl/policy/ppo.py b/vel/rl/policy/ppo.py index 1c922f10..4e68445b 100644 --- a/vel/rl/policy/ppo.py +++ b/vel/rl/policy/ppo.py @@ -63,7 +63,7 @@ def process_rollout(self, rollout: Rollout): rewards_buffer=rollout.transition_tensors['rewards'], dones_buffer=rollout.transition_tensors['dones'], values_buffer=rollout.transition_tensors['values'], - final_values=rollout.rollout_tensors['final_values'], + final_values=rollout.rollout_tensors['final.values'], discount_factor=self.discount_factor, gae_lambda=self.gae_lambda, number_of_steps=rollout.num_steps diff --git a/vel/rl/policy/purgatory/ppo_rnn.py b/vel/rl/policy/ppo_rnn.py similarity index 79% rename from vel/rl/policy/purgatory/ppo_rnn.py rename to vel/rl/policy/ppo_rnn.py index 76c2daad..fc28e2f1 100644 --- a/vel/rl/policy/purgatory/ppo_rnn.py +++ b/vel/rl/policy/ppo_rnn.py @@ -1,19 +1,21 @@ -import torch - import numbers -from vel.api import BackboneModel, BatchInfo, ModelFactory -from vel.calc.function import explained_variance +import gym +import torch + +from vel.api import BatchInfo, ModelFactory, BackboneNetwork from vel.function.constant import ConstantSchedule from vel.metric.base import AveragingNamedMetric - from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae +from vel.rl.module.stochastic_rnn_policy import StochasticRnnPolicy +from vel.util.situational import gym_space_to_size_hint +from vel.util.stats import explained_variance class PPORnn(RlPolicy): """ Proximal Policy Optimization - https://arxiv.org/abs/1707.06347 """ - def __init__(self, policy: BackboneModel, + def __init__(self, net: BackboneNetwork, action_space: gym.Space, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): super().__init__(discount_factor) @@ -28,43 +30,43 @@ def __init__(self, policy: BackboneModel, else: self.cliprange = cliprange - self.policy = policy + self.net = StochasticRnnPolicy(net, action_space) - assert self.policy.is_stateful, "Policy must be stateful" + assert self.net.is_stateful, "Policy must be stateful" def reset_weights(self): """ Initialize properly model weights """ - self.policy.reset_weights() + self.net.reset_weights() def forward(self, observation, state=None): """ Calculate model outputs """ - return self.policy.forward(observation, state=state) + return self.net(observation, state=state) def is_stateful(self) -> bool: - return self.policy.is_stateful + return self.net.is_stateful def zero_state(self, batch_size): - return self.policy.zero_state(batch_size) + return self.net.zero_state(batch_size) def reset_state(self, state, dones): - return self.policy.reset_state(state, dones) + return self.net.reset_state(state, dones) def act(self, observation, state=None, deterministic=False): """ Select actions based on model's output """ action_pd_params, value_output, next_state = self(observation, state=state) - actions = self.policy.action_head.sample(action_pd_params, deterministic=deterministic) + actions = self.net.action_head.sample(action_pd_params, deterministic=deterministic) # log likelihood of selected action - logprobs = self.policy.action_head.logprob(actions, action_pd_params) + logprobs = self.net.action_head.logprob(actions, action_pd_params) return { + 'action:logprobs': logprobs, 'actions': actions, - 'values': value_output, 'state': next_state, - 'action:logprobs': logprobs + 'values': value_output, } - def process_rollout(self, rollout: Rollout): + def process_rollout(self, rollout: Rollout) -> Rollout: """ Process rollout for optimization before any chunking/shuffling """ assert isinstance(rollout, Trajectories), "PPO requires trajectory rollouts" @@ -72,7 +74,7 @@ def process_rollout(self, rollout: Rollout): rewards_buffer=rollout.transition_tensors['rewards'], dones_buffer=rollout.transition_tensors['dones'], values_buffer=rollout.transition_tensors['values'], - final_values=rollout.rollout_tensors['final_values'], + final_values=rollout.rollout_tensors['final.values'], discount_factor=self.discount_factor, gae_lambda=self.gae_lambda, number_of_steps=rollout.num_steps @@ -85,6 +87,18 @@ def process_rollout(self, rollout: Rollout): return rollout + def _extract_initial_state(self, transition_tensors): + """ Extract initial state from the state dictionary """ + state = {} + + idx = len('state') + 1 + + for key, value in transition_tensors.items(): + if key.startswith('state'): + state[key[idx:]] = value[0] + + return state + def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: """ Calculate loss of the supplied rollout """ assert isinstance(rollout, Trajectories), "For an RNN model, we must evaluate trajectories" @@ -98,7 +112,7 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: # PART 0.1 - Model evaluation observations = rollout.transition_tensors['observations'] - hidden_state = rollout.transition_tensors['state'][0] # Initial hidden state + hidden_state = self._extract_initial_state(rollout.transition_tensors) dones = rollout.transition_tensors['dones'] action_accumulator = [] @@ -115,8 +129,8 @@ def calculate_gradient(self, batch_info: BatchInfo, rollout: Rollout) -> dict: pd_params = torch.cat(action_accumulator, dim=0) model_values = torch.cat(value_accumulator, dim=0) - model_action_logprobs = self.policy.action_head.logprob(actions, pd_params) - entropy = self.policy.action_head.entropy(pd_params) + model_action_logprobs = self.net.action_head.logprob(actions, pd_params) + entropy = self.net.action_head.entropy(pd_params) # Select the cliprange current_cliprange = self.cliprange.value(batch_info['progress']) @@ -178,10 +192,10 @@ def metrics(self) -> list: class PPORnnFactory(ModelFactory): """ Factory class for policy gradient models """ - def __init__(self, policy: BackboneModel, + def __init__(self, net_factory, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): - self.policy = policy + self.net_factory = net_factory self.entropy_coefficient = entropy_coefficient self.value_coefficient = value_coefficient self.cliprange = cliprange @@ -191,11 +205,17 @@ def __init__(self, policy: BackboneModel, def instantiate(self, **extra_args): """ Instantiate the model """ - policy = self.policy.instantiate(**extra_args) + action_space = extra_args.pop('action_space') + observation_space = extra_args.pop('observation_space') + + size_hint = gym_space_to_size_hint(observation_space) + + net = self.net_factory.instantiate(size_hint=size_hint, **extra_args) return PPORnn( - policy=policy, + net=net, entropy_coefficient=self.entropy_coefficient, + action_space=action_space, value_coefficient=self.value_coefficient, cliprange=self.cliprange, discount_factor=self.discount_factor, @@ -204,12 +224,12 @@ def instantiate(self, **extra_args): ) -def create(policy: BackboneModel, +def create(net: ModelFactory, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): """ Vel factory function """ return PPORnnFactory( - policy=policy, + net_factory=net, entropy_coefficient=entropy_coefficient, value_coefficient=value_coefficient, cliprange=cliprange, diff --git a/vel/rl/policy/purgatory/__init__.py b/vel/rl/policy/purgatory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vel/rl/policy/trpo.py b/vel/rl/policy/trpo.py index c7fe2cda..0e4cba3a 100644 --- a/vel/rl/policy/trpo.py +++ b/vel/rl/policy/trpo.py @@ -167,7 +167,7 @@ def process_rollout(self, rollout: Rollout): rewards_buffer=rollout.transition_tensors['rewards'], dones_buffer=rollout.transition_tensors['dones'], values_buffer=rollout.transition_tensors['values'], - final_values=rollout.rollout_tensors['final_values'], + final_values=rollout.rollout_tensors['final.values'], discount_factor=self.discount_factor, gae_lambda=self.gae_lambda, number_of_steps=rollout.num_steps diff --git a/vel/rl/xpolicy/stochastic_rnn_policy.py b/vel/rl/xpolicy/stochastic_rnn_policy.py deleted file mode 100644 index de8754b1..00000000 --- a/vel/rl/xpolicy/stochastic_rnn_policy.py +++ /dev/null @@ -1,99 +0,0 @@ -import gym -import typing - -from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.module.stochastic_action_head import StochasticActionHead -from vel.rl.module.value_head import ValueHead - - -class StochasticRnnPolicy(BackboneModel): - """ - Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone - RNN version - """ - - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, - action_space: gym.Space): - super().__init__() - - self.input_block = input_block - self.backbone = backbone - - assert self.backbone.is_stateful, "Must have a stateful backbone" - - self.action_head = StochasticActionHead( - action_space=action_space, - input_dim=self.backbone.output_dim - ) - self.value_head = ValueHead(input_dim=self.backbone.output_dim) - - assert self.backbone.is_stateful, "Backbone must be a recurrent model" - - @property - def is_stateful(self) -> bool: - """ If the model has a state that needs to be fed between individual observations """ - return True - - def zero_state(self, batch_size): - return self.backbone.zero_state(batch_size) - - def reset_weights(self): - """ Initialize properly model weights """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.action_head.reset_weights() - self.value_head.reset_weights() - - def forward(self, observations, state): - """ Calculate model outputs """ - input_data = self.input_block(observations) - base_output, new_state = self.backbone(input_data, state=state) - - action_output = self.action_head(base_output) - value_output = self.value_head(base_output) - - return action_output, value_output, new_state - - def value(self, observation, state=None): - """ Calculate only value head for given state """ - input_data = self.input_block(observation) - - base_output, new_state = self.backbone(input_data, state) - value_output = self.value_head(base_output) - - return value_output - - def reset_state(self, state, dones): - """ Reset the state after the episode has been terminated """ - if (dones > 0).any().item(): - zero_state = self.backbone.zero_state(dones.shape[0]).to(state.device) - dones_expanded = dones.unsqueeze(-1) - return state * (1 - dones_expanded) + zero_state * dones_expanded - else: - return state - - -class StochasticRnnPolicyFactory(ModelFactory): - """ Factory class for policy gradient models """ - def __init__(self, input_block: ModelFactory, backbone: ModelFactory): - self.input_block = input_block - self.backbone = backbone - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return StochasticRnnPolicy(input_block, backbone, extra_args['action_space']) - - -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return StochasticRnnPolicyFactory( - input_block=input_block, - backbone=backbone - ) diff --git a/vel/util/datastructure.py b/vel/util/datastructure.py new file mode 100644 index 00000000..455c21e1 --- /dev/null +++ b/vel/util/datastructure.py @@ -0,0 +1,21 @@ +import typing + + +def flatten_dict(dictionary: dict, output: typing.Optional[dict] = None, root: str = '') -> dict: + """ From a nested dictionary built a flat version, concatenating keys with '.' """ + if output is None: + output = {} + + for key, value in dictionary.items(): + if isinstance(value, dict): + if root: + flatten_dict(value, output, f"{root}.{key}") + else: + flatten_dict(value, output, key) + else: + if root: + output[f"{root}.{key}"] = value + else: + output[key] = value + + return output diff --git a/vel/util/tensor_util.py b/vel/util/tensor_util.py index 29f41ab3..db524bda 100644 --- a/vel/util/tensor_util.py +++ b/vel/util/tensor_util.py @@ -41,7 +41,11 @@ def __init__(self): self.accumulants = collections.defaultdict(list) def add(self, name, tensor): - self.accumulants[name].append(tensor) + if isinstance(tensor, dict): + for subname, subtensor in tensor.items(): + self.add(f"{name}.{subname}", subtensor) + else: + self.accumulants[name].append(tensor) def result(self): """ Concatenate accumulated tensors """ From 8e32284e8dd232d22cbbfc6d13bef993e445d5b8 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 15:59:26 -0700 Subject: [PATCH 117/162] Clean up purgatory files. --- vel/rl/layer/purgatory/__init__.py | 0 vel/rl/layer/purgatory/mlp_rnn.py | 59 ------- vel/rl/layer/purgatory/noisy_nature_cnn.py | 103 ------------ vel/rl/layer/purgatory/rnn.py | 47 ------ vel/rl/xpolicy/__init__.py | 0 vel/rl/xpolicy/purgatory/__init__.py | 0 .../purgatory/old_stochastic_policy.py | 123 -------------- .../purgatory/old_stochastic_rnn_policy.py | 153 ------------------ .../purgatory/q_distributional_policy.py | 144 ----------------- vel/rl/xpolicy/purgatory/q_dueling_policy.py | 73 --------- vel/rl/xpolicy/purgatory/q_model.py | 97 ----------- vel/rl/xpolicy/purgatory/q_noisy_model.py | 86 ---------- .../purgatory/q_stochastic_policy_model.py | 128 --------------- vel/rl/xpolicy/stochastic_policy_separate.py | 94 ----------- 14 files changed, 1107 deletions(-) delete mode 100644 vel/rl/layer/purgatory/__init__.py delete mode 100644 vel/rl/layer/purgatory/mlp_rnn.py delete mode 100644 vel/rl/layer/purgatory/noisy_nature_cnn.py delete mode 100644 vel/rl/layer/purgatory/rnn.py delete mode 100644 vel/rl/xpolicy/__init__.py delete mode 100644 vel/rl/xpolicy/purgatory/__init__.py delete mode 100644 vel/rl/xpolicy/purgatory/old_stochastic_policy.py delete mode 100644 vel/rl/xpolicy/purgatory/old_stochastic_rnn_policy.py delete mode 100644 vel/rl/xpolicy/purgatory/q_distributional_policy.py delete mode 100644 vel/rl/xpolicy/purgatory/q_dueling_policy.py delete mode 100644 vel/rl/xpolicy/purgatory/q_model.py delete mode 100644 vel/rl/xpolicy/purgatory/q_noisy_model.py delete mode 100644 vel/rl/xpolicy/purgatory/q_stochastic_policy_model.py delete mode 100644 vel/rl/xpolicy/stochastic_policy_separate.py diff --git a/vel/rl/layer/purgatory/__init__.py b/vel/rl/layer/purgatory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vel/rl/layer/purgatory/mlp_rnn.py b/vel/rl/layer/purgatory/mlp_rnn.py deleted file mode 100644 index 1e871ffb..00000000 --- a/vel/rl/layer/purgatory/mlp_rnn.py +++ /dev/null @@ -1,59 +0,0 @@ -import typing - -from vel.api import LinearBackboneModel, ModelFactory -from vel.rl.backbone.mlp import MLP -from vel.rl.backbone.rnn import RNN - - -class MlpRnn(LinearBackboneModel): - """ MLP followed by an RNN - another simple policy backbone """ - - def __init__(self, input_length: int, mlp_layers: typing.List[int], rnn_units: int, rnn_type: str = 'lstm', - mlp_activation: str = 'tanh', mlp_normalization: typing.Optional[str] = None): - super().__init__() - - self.mlp = MLP( - input_length=input_length, hidden_layers=mlp_layers, activation=mlp_activation, - normalization=mlp_normalization - ) - - self.rnn = RNN(input_length=self.mlp.output_dim, hidden_units=rnn_units, rnn_type=rnn_type) - - @property - def output_dim(self) -> int: - return self.rnn.output_dim - - @property - def state_dim(self) -> int: - """ Initial state of the network """ - return self.rnn.state_dim - - @property - def is_stateful(self) -> bool: - """ If the model has a state that needs to be fed between individual observations """ - return True - - def zero_state(self, batch_size): - """ Potential state for the model """ - return self.rnn.zero_state(batch_size) - - def forward(self, input_data, state): - mlp_output = self.mlp(input_data) - hidden_state, new_state = self.rnn(mlp_output, state) - return hidden_state, new_state - - -def create(input_length: int, mlp_layers: typing.List[int], rnn_units: int, rnn_type: str = 'lstm', - mlp_activation: str = 'tanh', mlp_normalization: typing.Optional[str] = None): - """ Vel factory function """ - def instantiate(**_): - return MlpRnn( - input_length=input_length, - mlp_layers=mlp_layers, - rnn_units=rnn_units, - rnn_type=rnn_type, - mlp_activation=mlp_activation, - mlp_normalization=mlp_normalization - ) - - return ModelFactory.generic(instantiate) diff --git a/vel/rl/layer/purgatory/noisy_nature_cnn.py b/vel/rl/layer/purgatory/noisy_nature_cnn.py deleted file mode 100644 index 08ff71ba..00000000 --- a/vel/rl/layer/purgatory/noisy_nature_cnn.py +++ /dev/null @@ -1,103 +0,0 @@ -""" -Code based loosely on implementation: -https://github.com/openai/baselines/blob/master/baselines/ppo2/policies.py - -Under MIT license. -""" -import numpy as np - -import torch.nn as nn -import torch.nn.init as init -import torch.nn.functional as F - -import vel.util.network as net_util - -from vel.api import LinearBackboneModel, ModelFactory -from vel.rl.module.noisy_linear import NoisyLinear - - -class NoisyNatureCnn(LinearBackboneModel): - """ - Neural network as defined in the paper 'Human-level control through deep reinforcement learning' - implemented via "Noisy Networks for Exploration" - """ - def __init__(self, input_width, input_height, input_channels, output_dim=512, initial_std_dev=0.4, - factorized_noise=True): - super().__init__() - - self._output_dim = output_dim - - self.conv1 = nn.Conv2d( - in_channels=input_channels, - out_channels=32, - kernel_size=(8, 8), - stride=4 - ) - - self.conv2 = nn.Conv2d( - in_channels=32, - out_channels=64, - kernel_size=(4, 4), - stride=2 - ) - - self.conv3 = nn.Conv2d( - in_channels=64, - out_channels=64, - kernel_size=(3, 3), - stride=1 - ) - - layer_series = [ - (8, 0, 4), - (4, 0, 2), - (3, 0, 1) - ] - - self.final_width = net_util.convolutional_layer_series(input_width, layer_series) - self.final_height = net_util.convolutional_layer_series(input_height, layer_series) - - self.linear_layer = NoisyLinear( - self.final_width * self.final_height * 64, # 64 is the number of channels of the last conv layer - self.output_dim, - initial_std_dev=initial_std_dev, - factorized_noise=factorized_noise - ) - - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - return self._output_dim - - def reset_weights(self): - """ Call proper initializers for the weights """ - for m in self.modules(): - if isinstance(m, nn.Conv2d): - # init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - init.orthogonal_(m.weight, gain=np.sqrt(2)) - init.constant_(m.bias, 0.0) - elif isinstance(m, nn.Linear): - # init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - init.orthogonal_(m.weight, gain=np.sqrt(2)) - init.constant_(m.bias, 0.0) - elif isinstance(m, NoisyLinear): - m.reset_weights() - - def forward(self, image): - result = image - result = F.relu(self.conv1(result)) - result = F.relu(self.conv2(result)) - result = F.relu(self.conv3(result)) - flattened = result.view(result.size(0), -1) - return F.relu(self.linear_layer(flattened)) - - -def create(input_width, input_height, input_channels=1, output_dim=512, initial_std_dev=0.4, factorized_noise=True): - """ Vel factory function """ - def instantiate(**_): - return NoisyNatureCnn( - input_width=input_width, input_height=input_height, input_channels=input_channels, - output_dim=output_dim, initial_std_dev=initial_std_dev, factorized_noise=factorized_noise - ) - - return ModelFactory.generic(instantiate) diff --git a/vel/rl/layer/purgatory/rnn.py b/vel/rl/layer/purgatory/rnn.py deleted file mode 100644 index 5a6e9625..00000000 --- a/vel/rl/layer/purgatory/rnn.py +++ /dev/null @@ -1,47 +0,0 @@ -from vel.api import LinearBackboneModel, ModelFactory -from vel.module.rnn_cell import RnnCell - - -class RNN(LinearBackboneModel): - """ Simple recurrent model backbone """ - - def __init__(self, input_length: int, hidden_units: int, rnn_type: str = 'lstm'): - super().__init__() - - self.input_length = input_length - self.hidden_units = hidden_units - - self.rnn_cell = RnnCell(input_size=input_length, hidden_size=self.hidden_units, rnn_type=rnn_type) - - @property - def output_dim(self) -> int: - return self.rnn_cell.output_dim - - @property - def state_dim(self) -> int: - """ Initial state of the network """ - return self.rnn_cell.state_dim - - @property - def is_stateful(self) -> bool: - """ If the model has a state that needs to be fed between individual observations """ - return True - - def zero_state(self, batch_size): - """ Potential state for the model """ - return self.rnn_cell.zero_state(batch_size) - - def forward(self, input_data, state): - hidden_state, new_state = self.rnn_cell(input_data, state) - return hidden_state, new_state - - -def create(input_length: int, hidden_units: int, rnn_type: str = 'lstm'): - """ Vel factory function """ - def instantiate(**_): - return RNN( - input_length=input_length, - hidden_units=hidden_units, - rnn_type=rnn_type - ) - return ModelFactory.generic(instantiate) diff --git a/vel/rl/xpolicy/__init__.py b/vel/rl/xpolicy/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vel/rl/xpolicy/purgatory/__init__.py b/vel/rl/xpolicy/purgatory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vel/rl/xpolicy/purgatory/old_stochastic_policy.py b/vel/rl/xpolicy/purgatory/old_stochastic_policy.py deleted file mode 100644 index 4fc5a16b..00000000 --- a/vel/rl/xpolicy/purgatory/old_stochastic_policy.py +++ /dev/null @@ -1,123 +0,0 @@ -import gym -import typing - -from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Evaluator, RlPolicy -from vel.rl.module.action_head import StochasticActionHead -from vel.rl.module.value_head import ValueHead - - -class StochasticPolicyEvaluator(Evaluator): - """ Evaluator for a policy gradient model """ - - def __init__(self, model: 'StochasticPolicyModel', rollout: Rollout): - super().__init__(rollout) - - self.model = model - - policy_params, estimated_values = model(self.rollout.batch_tensor('observations')) - - self.provide('model:policy_params', policy_params) - self.provide('model:values', estimated_values) - - @Evaluator.provides('model:action:logprobs') - def model_action_logprobs(self): - actions = self.get('rollout:actions') - policy_params = self.get('model:policy_params') - return self.model.action_head.logprob(actions, policy_params) - - @Evaluator.provides('model:entropy') - def model_entropy(self): - policy_params = self.get('model:policy_params') - return self.model.entropy(policy_params) - - -class StochasticPolicyModel(RlPolicy): - """ - Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone - """ - - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space): - super().__init__() - - self.input_block = input_block - self.backbone = backbone - self.action_head = StochasticActionHead( - action_space=action_space, - input_dim=self.backbone.output_dim - ) - self.value_head = ValueHead(input_dim=self.backbone.output_dim) - - def reset_weights(self): - """ Initialize properly model weights """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.action_head.reset_weights() - self.value_head.reset_weights() - - def forward(self, observations): - """ Calculate model outputs """ - input_data = self.input_block(observations) - - base_output = self.backbone(input_data) - - action_output = self.action_head(base_output) - value_output = self.value_head(base_output) - - return action_output, value_output - - def step(self, observation, deterministic=False): - """ Select actions based on model's output """ - action_pd_params, value_output = self(observation) - actions = self.action_head.sample(action_pd_params, deterministic=deterministic) - - # log likelihood of selected action - logprobs = self.action_head.logprob(actions, action_pd_params) - - return { - 'actions': actions, - 'values': value_output, - 'action:logprobs': logprobs - } - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return StochasticPolicyEvaluator(self, rollout) - - def logprob(self, action_sample, policy_params): - """ Calculate - log(prob) of selected actions """ - return self.action_head.logprob(action_sample, policy_params) - - def value(self, observations): - """ Calculate only value head for given state """ - input_data = self.input_block(observations) - base_output = self.backbone(input_data) - value_output = self.value_head(base_output) - return value_output - - def entropy(self, policy_params): - """ Entropy of a probability distribution """ - return self.action_head.entropy(policy_params) - - -class StochasticPolicyModelFactory(ModelFactory): - """ Factory class for policy gradient models """ - def __init__(self, input_block: IdentityFactory, backbone: ModelFactory): - self.backbone = backbone - self.input_block = input_block - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return StochasticPolicyModel(input_block, backbone, extra_args['action_space']) - - -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return StochasticPolicyModelFactory(input_block=input_block, backbone=backbone) diff --git a/vel/rl/xpolicy/purgatory/old_stochastic_rnn_policy.py b/vel/rl/xpolicy/purgatory/old_stochastic_rnn_policy.py deleted file mode 100644 index 25551144..00000000 --- a/vel/rl/xpolicy/purgatory/old_stochastic_rnn_policy.py +++ /dev/null @@ -1,153 +0,0 @@ -import gym -import torch -import typing - -from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Trajectories, Evaluator, RlRnnModel -from vel.rl.module.action_head import StochasticActionHead -from vel.rl.module.value_head import ValueHead - - -class StochasticPolicyRnnEvaluator(Evaluator): - """ Evaluate recurrent model from initial state """ - - def __init__(self, model: 'StochasticPolicyRnnModel', rollout: Rollout): - assert isinstance(rollout, Trajectories), "For an RNN model, we must evaluate trajectories" - super().__init__(rollout) - - self.model = model - - observation_trajectories = rollout.transition_tensors['observations'] - hidden_state = rollout.rollout_tensors['initial_hidden_state'] - - action_accumulator = [] - value_accumulator = [] - - # Evaluate recurrent network step by step - for i in range(observation_trajectories.size(0)): - action_output, value_output, hidden_state = model(observation_trajectories[i], hidden_state) - action_accumulator.append(action_output) - value_accumulator.append(value_output) - - policy_params = torch.cat(action_accumulator, dim=0) - estimated_values = torch.cat(value_accumulator, dim=0) - - self.provide('model:policy_params', policy_params) - self.provide('model:values', estimated_values) - - @Evaluator.provides('model:action:logprobs') - def model_action_logprobs(self): - actions = self.get('rollout:actions') - policy_params = self.get('model:policy_params') - return self.model.action_head.logprob(actions, policy_params) - - @Evaluator.provides('model:entropy') - def model_entropy(self): - policy_params = self.get('model:policy_params') - return self.model.entropy(policy_params) - - -class StochasticPolicyRnnModel(RlRnnModel): - """ - Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone - RNN version - """ - - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, - action_space: gym.Space): - super().__init__() - - self.input_block = input_block - self.backbone = backbone - - self.action_head = StochasticActionHead( - action_space=action_space, - input_dim=self.backbone.output_dim - ) - self.value_head = ValueHead(input_dim=self.backbone.output_dim) - - assert self.backbone.is_stateful, "Backbone must be a recurrent model" - - @property - def state_dim(self) -> int: - """ Dimension of model state """ - return self.backbone.state_dim - - def reset_weights(self): - """ Initialize properly model weights """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.action_head.reset_weights() - self.value_head.reset_weights() - - def forward(self, observations, state): - """ Calculate model outputs """ - input_data = self.input_block(observations) - base_output, new_state = self.backbone(input_data, state=state) - - action_output = self.action_head(base_output) - value_output = self.value_head(base_output) - - return action_output, value_output, new_state - - def step(self, observations, state, deterministic=False): - """ Select actions based on model's output """ - action_pd_params, value_output, new_state = self(observations, state) - actions = self.action_head.sample(action_pd_params, deterministic=deterministic) - - # log likelihood of selected action - logprobs = self.action_head.logprob(actions, action_pd_params) - - return { - 'actions': actions, - 'values': value_output, - 'action:logprobs': logprobs, - 'state': new_state - } - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return StochasticPolicyRnnEvaluator(self, rollout) - - def logprob(self, action_sample, policy_params): - """ Calculate - log(prob) of selected actions """ - return self.action_head.logprob(action_sample, policy_params) - - def value(self, observations, state): - """ Calculate only value head for given state """ - input_data = self.input_block(observations) - - base_output, new_state = self.backbone(input_data, state) - value_output = self.value_head(base_output) - - return value_output - - def entropy(self, action_pd_params): - """ Entropy of a probability distribution """ - return self.action_head.entropy(action_pd_params) - - -class PolicyGradientRnnModelFactory(ModelFactory): - """ Factory class for policy gradient models """ - def __init__(self, input_block: ModelFactory, backbone: ModelFactory): - self.input_block = input_block - self.backbone = backbone - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return StochasticPolicyRnnModel(input_block, backbone, extra_args['action_space']) - - -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return PolicyGradientRnnModelFactory( - input_block=input_block, - backbone=backbone - ) diff --git a/vel/rl/xpolicy/purgatory/q_distributional_policy.py b/vel/rl/xpolicy/purgatory/q_distributional_policy.py deleted file mode 100644 index 4dde37cf..00000000 --- a/vel/rl/xpolicy/purgatory/q_distributional_policy.py +++ /dev/null @@ -1,144 +0,0 @@ -import gym -import typing - -from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, RlPolicy, Evaluator -from vel.rl.module.q_distributional_head import QDistributionalHead - - -class QDistributionalModelEvaluator(Evaluator): - """ Evaluate distributional q-model """ - def __init__(self, model: 'QDistributionalModel', rollout: Rollout): - super().__init__(rollout) - self.model = model - - @Evaluator.provides('model:q') - def model_q(self): - """ Action values for all (discrete) actions """ - # observations = self.get('rollout:observations') - # # This mean of last dimension collapses the histogram/calculates mean reward - # return self.model(observations).mean(dim=-1) - raise NotImplementedError - - @Evaluator.provides('model:q_dist') - def model_q_dist(self): - """ Action values for all (discrete) actions """ - observations = self.get('rollout:observations') - # This mean of last dimension collapses the histogram/calculates mean reward - return self.model(observations) - - @Evaluator.provides('model:action:q') - def model_action_q(self): - """ Action values for selected actions in the rollout """ - raise NotImplementedError - - @Evaluator.provides('model:action:q_dist') - def model_action_q_dist(self): - """ Action values for selected actions in the rollout """ - q = self.get('model:q_dist') - actions = self.get('rollout:actions') - return q[range(q.size(0)), actions] - - @Evaluator.provides('model:q_next') - def model_q_next(self): - """ Action values for all (discrete) actions """ - raise NotImplementedError - - @Evaluator.provides('model:q_dist_next') - def model_q_dist_next(self): - """ Action values for all (discrete) actions """ - observations = self.get('rollout:observations_next') - # This mean of last dimension collapses the histogram/calculates mean reward - return self.model(observations) - - -class QDistributionalModel(RlPolicy): - """ - A deterministic greedy action-value model that learns a value function distribution rather than - just an expectation. - Supports only discrete action spaces (ones that can be enumerated) - """ - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space, - vmin: float, vmax: float, atoms: int = 1): - super().__init__() - - self.action_space = action_space - - self.input_block = input_block - self.backbone = backbone - - self.q_head = QDistributionalHead( - input_dim=backbone.output_dim, action_space=action_space, - vmin=vmin, vmax=vmax, - atoms=atoms - ) - - def reset_weights(self): - """ Initialize weights to reasonable defaults """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.q_head.reset_weights() - - def forward(self, observations): - """ Model forward pass """ - input_data = self.input_block(observations) - base_output = self.backbone(input_data) - log_histogram = self.q_head(base_output) - return log_histogram - - def histogram_info(self): - """ Return extra information about histogram """ - return self.q_head.histogram_info() - - def step(self, observations): - """ Sample action from an action space for given state """ - log_histogram = self(observations) - actions = self.q_head.sample(log_histogram) - - return { - 'actions': actions, - 'log_histogram': log_histogram - } - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return QDistributionalModelEvaluator(self, rollout) - - -class QDistributionalModelFactory(ModelFactory): - """ Factory class for q-learning models """ - def __init__(self, input_block: ModelFactory, backbone: ModelFactory, vmin: float, vmax: float, atoms: int): - self.input_block = input_block - self.backbone = backbone - self.vmin = vmin - self.vmax = vmax - self.atoms = atoms - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return QDistributionalModel( - input_block=input_block, - backbone=backbone, - action_space=extra_args['action_space'], - vmin=self.vmin, - vmax=self.vmax, - atoms=self.atoms - ) - - -def create(backbone: ModelFactory, vmin: float, vmax: float, atoms: int, - input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return QDistributionalModelFactory( - input_block=input_block, backbone=backbone, - vmin=vmin, - vmax=vmax, - atoms=atoms - ) diff --git a/vel/rl/xpolicy/purgatory/q_dueling_policy.py b/vel/rl/xpolicy/purgatory/q_dueling_policy.py deleted file mode 100644 index 74fff35a..00000000 --- a/vel/rl/xpolicy/purgatory/q_dueling_policy.py +++ /dev/null @@ -1,73 +0,0 @@ -import gym -import typing - -from vel.api import LinearBackboneModel, Model, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Evaluator -from vel.rl.module.q_dueling_head import QDuelingHead -from vel.rl.model.q_model import QModelEvaluator - - -class QDuelingModel(Model): - """ - Deterministic greedy action-value model with dueling heads (kind of actor and critic) - Supports only discrete action spaces (ones that can be enumerated) - """ - - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space): - super().__init__() - - self.action_space = action_space - - self.input_block = input_block - self.backbone = backbone - self.q_head = QDuelingHead(input_dim=backbone.output_dim, action_space=action_space) - - def forward(self, observations): - """ Model forward pass """ - observations = self.input_block(observations) - advantage_features, value_features = self.backbone(observations) - q_values = self.q_head(advantage_features, value_features) - - return q_values - - def reset_weights(self): - """ Initialize weights to reasonable defaults """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.q_head.reset_weights() - - def step(self, observations): - """ Sample action from an action space for given state """ - q_values = self(observations) - - return { - 'actions': self.q_head.sample(q_values), - 'q': q_values - } - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return QModelEvaluator(self, rollout) - - -class QDuelingModelFactory(ModelFactory): - """ Factory class for policy gradient models """ - def __init__(self, input_block: ModelFactory, backbone: ModelFactory): - self.input_block = input_block - self.backbone = backbone - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return QDuelingModel(input_block, backbone, extra_args['action_space']) - - -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return QDuelingModelFactory(input_block=input_block, backbone=backbone) diff --git a/vel/rl/xpolicy/purgatory/q_model.py b/vel/rl/xpolicy/purgatory/q_model.py deleted file mode 100644 index 2fbd4513..00000000 --- a/vel/rl/xpolicy/purgatory/q_model.py +++ /dev/null @@ -1,97 +0,0 @@ -import gym -import typing - -from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, RlPolicy -from vel.rl.module.q_head import QHead - - -# class QModelEvaluator(Evaluator): -# """ Evaluate simple q-model """ -# def __init__(self, model: 'QModel', rollout: Rollout): -# super().__init__(rollout) -# self.model = model -# -# @Evaluator.provides('model:q') -# def model_q(self): -# """ Action values for all (discrete) actions """ -# observations = self.get('rollout:observations') -# return self.model(observations) -# -# @Evaluator.provides('model:action:q') -# def model_action_q(self): -# """ Action values for selected actions in the rollout """ -# q = self.get('model:q') -# actions = self.get('rollout:actions') -# return q.gather(1, actions.unsqueeze(1)).squeeze(1) -# -# @Evaluator.provides('model:q_next') -# def model_q_next(self): -# """ Action values for all (discrete) actions """ -# observations = self.get('rollout:observations_next') -# return self.model(observations) - - -class QModel(RlPolicy): - """ - Simple deterministic greedy action-value model. - Supports only discrete action spaces (ones that can be enumerated) - """ - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space): - super().__init__() - - self.action_space = action_space - - self.input_block = input_block - self.backbone = backbone - self.q_head = QHead(input_dim=backbone.output_dim, action_space=action_space) - - def reset_weights(self): - """ Initialize weights to reasonable defaults """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.q_head.reset_weights() - - def forward(self, observations): - """ Model forward pass """ - observations = self.input_block(observations) - base_output = self.backbone(observations) - q_values = self.q_head(base_output) - return q_values - - def step(self, observations): - """ Sample action from an action space for given state """ - q_values = self(observations) - actions = self.q_head.sample(q_values) - - return { - 'actions': actions, - 'q': q_values - } - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return QModelEvaluator(self, rollout) - - -class QModelFactory(ModelFactory): - """ Factory class for q-learning models """ - def __init__(self, input_block: ModelFactory, backbone: ModelFactory): - self.input_block = input_block - self.backbone = backbone - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return QModel(input_block, backbone, extra_args['action_space']) - - -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return QModelFactory(input_block=input_block, backbone=backbone) diff --git a/vel/rl/xpolicy/purgatory/q_noisy_model.py b/vel/rl/xpolicy/purgatory/q_noisy_model.py deleted file mode 100644 index 2ef6aab3..00000000 --- a/vel/rl/xpolicy/purgatory/q_noisy_model.py +++ /dev/null @@ -1,86 +0,0 @@ -import gym -import typing - -from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, RlPolicy, Evaluator -from vel.rl.model.q_model import QModelEvaluator -from vel.rl.module.q_noisy_head import QNoisyHead - - -class NoisyQModel(RlPolicy): - """ - NoisyNets action-value model. - Supports only discrete action spaces (ones that can be enumerated) - """ - - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space, - initial_std_dev=0.4, factorized_noise=True): - super().__init__() - - self.action_space = action_space - - self.input_block = input_block - self.backbone = backbone - self.q_head = QNoisyHead( - input_dim=backbone.output_dim, action_space=action_space, initial_std_dev=initial_std_dev, - factorized_noise=factorized_noise - ) - - def reset_weights(self): - """ Initialize weights to reasonable defaults """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.q_head.reset_weights() - - def forward(self, observations): - """ Model forward pass """ - observations = self.input_block(observations) - base_output = self.backbone(observations) - q_values = self.q_head(base_output) - return q_values - - def step(self, observations): - """ Sample action from an action space for given state """ - q_values = self(observations) - actions = self.q_head.sample(q_values) - - return { - 'actions': actions, - 'q': q_values - } - - def evaluate(self, rollout: Rollout) -> Evaluator: - """ Evaluate model on a rollout """ - return QModelEvaluator(self, rollout) - - -class NoisyQModelFactory(ModelFactory): - """ Factory class for q-learning models """ - def __init__(self, input_block: ModelFactory, backbone: ModelFactory, initial_std_dev=0.4, factorized_noise=True): - self.initial_std_dev = initial_std_dev - self.factorized_noise = factorized_noise - - self.input_block = input_block - self.backbone = backbone - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return NoisyQModel( - input_block, backbone, extra_args['action_space'], initial_std_dev=self.initial_std_dev, - factorized_noise=self.factorized_noise - ) - - -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None, initial_std_dev: float = 0.4, - factorized_noise: bool = True): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return NoisyQModelFactory( - input_block=input_block, backbone=backbone, initial_std_dev=initial_std_dev, factorized_noise=factorized_noise - ) diff --git a/vel/rl/xpolicy/purgatory/q_stochastic_policy_model.py b/vel/rl/xpolicy/purgatory/q_stochastic_policy_model.py deleted file mode 100644 index c489980d..00000000 --- a/vel/rl/xpolicy/purgatory/q_stochastic_policy_model.py +++ /dev/null @@ -1,128 +0,0 @@ -import gym -import torch -import typing - -from vel.api import LinearBackboneModel, Model, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.api import Rollout, Evaluator -from vel.rl.module.action_head import StochasticActionHead -from vel.rl.module.q_head import QHead - - -class QStochasticPolicyEvaluator(Evaluator): - """ Evaluator for QPolicyGradientModel """ - def __init__(self, model: 'QStochasticPolicyModel', rollout: Rollout): - super().__init__(rollout) - - self.model = model - - observations = self.get('rollout:observations') - logprobs, q = model(observations) - - self.provide('model:logprobs', logprobs) - self.provide('model:q', q) - - @Evaluator.provides('model:action:logprobs') - def model_action_logprobs(self): - actions = self.get('rollout_actions') - logprobs = self.get('model:logprobs') - return self.model.action_head.logprob(actions, logprobs) - - -class QStochasticPolicyModel(Model): - """ - A policy gradient model with an action-value critic head (instead of more common state-value critic head). - Supports only discrete action spaces (ones that can be enumerated) - """ - - def __init__(self, input_block: BackboneModel, backbone: LinearBackboneModel, action_space: gym.Space): - super().__init__() - - assert isinstance(action_space, gym.spaces.Discrete) - - self.input_block = input_block - self.backbone = backbone - - self.action_head = StochasticActionHead( - input_dim=self.backbone.output_dim, - action_space=action_space - ) - - self.q_head = QHead( - input_dim=self.backbone.output_dim, - action_space=action_space - ) - - def reset_weights(self): - """ Initialize properly model weights """ - self.input_block.reset_weights() - self.backbone.reset_weights() - self.action_head.reset_weights() - self.q_head.reset_weights() - - def forward(self, observations): - """ Calculate model outputs """ - input_data = self.input_block(observations) - - base_output = self.backbone(input_data) - policy_params = self.action_head(base_output) - - q = self.q_head(base_output) - - return policy_params, q - - def step(self, observation, deterministic=False): - """ Select actions based on model's output """ - policy_params, q = self(observation) - actions = self.action_head.sample(policy_params, deterministic=deterministic) - - # log probability - we can do that, because we support only discrete action spaces - logprobs = self.action_head.logprob(actions, policy_params) - - return { - 'actions': actions, - 'q': q, - 'logprobs': policy_params, - 'action:logprobs': logprobs - } - - def evaluate(self, rollout: Rollout) -> QStochasticPolicyEvaluator: - """ Evaluate model on a rollout """ - return QStochasticPolicyEvaluator(self, rollout) - - def value(self, observation): - """ Calculate only value head for given state """ - policy_params, q = self(observation) - - # Expectation of Q value with respect to action - return (torch.exp(policy_params) * q).sum(dim=1) - - def entropy(self, action_logits): - """ Entropy of a probability distribution """ - return self.action_head.entropy(action_logits) - - def kl_divergence(self, logits_q, logits_p): - """ Calculate KL-divergence between two probability distributions """ - return self.action_head.kl_divergence(logits_q, logits_p) - - -class QStochasticPolicyModelFactory(ModelFactory): - """ Factory class for policy gradient models """ - def __init__(self, input_block: IdentityFactory, backbone: ModelFactory): - self.backbone = backbone - self.input_block = input_block - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - backbone = self.backbone.instantiate(**extra_args) - - return QStochasticPolicyModel(input_block, backbone, extra_args['action_space']) - - -def create(backbone: ModelFactory, input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return QStochasticPolicyModelFactory(input_block=input_block, backbone=backbone) diff --git a/vel/rl/xpolicy/stochastic_policy_separate.py b/vel/rl/xpolicy/stochastic_policy_separate.py deleted file mode 100644 index afced37c..00000000 --- a/vel/rl/xpolicy/stochastic_policy_separate.py +++ /dev/null @@ -1,94 +0,0 @@ -import gym -import typing - -from vel.api import LinearBackboneModel, ModelFactory, BackboneModel -from vel.module.input.identity import IdentityFactory -from vel.rl.module.stochastic_action_head import StochasticActionHead -from vel.rl.module.value_head import ValueHead - - -class StochasticPolicyModelSeparate(BackboneModel): - """ - Policy gradient model class with an actor and critic heads that don't share a backbone - """ - - def __init__(self, input_block: BackboneModel, - policy_backbone: LinearBackboneModel, value_backbone: LinearBackboneModel, - action_space: gym.Space): - super().__init__() - - self.input_block = input_block - self.policy_backbone = policy_backbone - self.value_backbone = value_backbone - - self.action_head = StochasticActionHead( - action_space=action_space, - input_dim=self.policy_backbone.output_dim - ) - - self.value_head = ValueHead(input_dim=self.value_backbone.output_dim) - - def reset_weights(self): - """ Initialize properly model weights """ - self.input_block.reset_weights() - - self.policy_backbone.reset_weights() - self.value_backbone.reset_weights() - - self.action_head.reset_weights() - self.value_head.reset_weights() - - def forward(self, observations): - """ Calculate model outputs """ - input_data = self.input_block(observations) - - policy_base_output = self.policy_backbone(input_data) - value_base_output = self.value_backbone(input_data) - - action_output = self.action_head(policy_base_output) - value_output = self.value_head(value_base_output) - - return action_output, value_output - - def value(self, observations, state=None): - """ Calculate only value head for given state """ - input_data = self.input_block(observations) - base_output = self.value_backbone(input_data) - value_output = self.value_head(base_output) - return value_output - - def policy(self, observations): - """ Calculate only action head for given state """ - input_data = self.input_block(observations) - policy_base_output = self.policy_backbone(input_data) - policy_params = self.action_head(policy_base_output) - return policy_params - - -class StochasticPolicyModelSeparateFactory(ModelFactory): - """ Factory class for policy gradient models """ - def __init__(self, input_block: ModelFactory, policy_backbone: ModelFactory, value_backbone: ModelFactory): - self.input_block = input_block - self.policy_backbone = policy_backbone - self.value_backbone = value_backbone - - def instantiate(self, **extra_args): - """ Instantiate the model """ - input_block = self.input_block.instantiate() - policy_backbone = self.policy_backbone.instantiate(**extra_args) - value_backbone = self.value_backbone.instantiate(**extra_args) - - return StochasticPolicyModelSeparate(input_block, policy_backbone, value_backbone, extra_args['action_space']) - - -def create(policy_backbone: ModelFactory, value_backbone: ModelFactory, - input_block: typing.Optional[ModelFactory] = None): - """ Vel factory function """ - if input_block is None: - input_block = IdentityFactory() - - return StochasticPolicyModelSeparateFactory( - input_block=input_block, - policy_backbone=policy_backbone, - value_backbone=value_backbone - ) From aeec2ac8aaaf63313ff3bab0bb2f26d1dedb7e9b Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 15:59:37 -0700 Subject: [PATCH 118/162] Commit basic version of VQ-VAE. --- .../latent/mnist/mnist_cnn_vq_vae.yaml | 53 +++ .../autoencoders/mnist/mnist-vq-vae.ipynb | 239 ++++++++++++++ vel/model/latent/vq_vae.py | 306 ++++++++++++++++++ 3 files changed, 598 insertions(+) create mode 100644 examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml create mode 100644 examples-notebooks/autoencoders/mnist/mnist-vq-vae.ipynb create mode 100644 vel/model/latent/vq_vae.py diff --git a/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml b/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml new file mode 100644 index 00000000..bab34608 --- /dev/null +++ b/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml @@ -0,0 +1,53 @@ +name: 'mnist_cnn_vq_vae' + + +model: + name: vel.model.latent.vq_vae + img_rows: 28 + img_cols: 28 + img_channels: 1 + channels: [64, 128, 256] +# channels: [32, 64, 128] + + k: 128 + d: 64 + + +source: + name: vel.data.source.vision.mnist + + +loader: + name: vel.data.dataset_loader + batch_size: 128 +# num_workers: 4 +# pin_memory: true + + transformations: + - name: vel.data.transformation.to_array + - name: vel.data.transformation.image_to_tensor + - name: vel.data.transformation.unsupervised + + +optimizer: + name: vel.optimizer.radam + lr: 1.0e-3 + eps: 1.0e-4 + + +scheduler: + name: vel.scheduler.multi_step + gamma: 0.71968 # 10 * (-1/7) + milestones: [ 1, 4, 13, 40, 121, 364, 1093, 3280] + + +commands: + augvis: + name: vel.command.augvis_command + samples: 10 + cases: 5 + + train: + name: vel.command.train_command +# epochs: 3280 + epochs: 50 diff --git a/examples-notebooks/autoencoders/mnist/mnist-vq-vae.ipynb b/examples-notebooks/autoencoders/mnist/mnist-vq-vae.ipynb new file mode 100644 index 00000000..32f55f3c --- /dev/null +++ b/examples-notebooks/autoencoders/mnist/mnist-vq-vae.ipynb @@ -0,0 +1,239 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import torch\n", + "import tqdm\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "import vel\n", + "import vel.notebook as nb" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "nb.reasonable_notbook_defaults()\n", + "torch.set_grad_enabled(False) # We don't need autograd here\n", + "None" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "config = nb.load_config('examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml', run_number=13, device='cuda:0') " + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "model = config.load_trained_model().to(config.device)" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "VQVAE(\n", + " (codebook): VQEmbedding(k=128, d=64)\n", + " (encoder): Sequential(\n", + " (0): Conv2d(1, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (1): SELU(inplace=True)\n", + " (2): LayerNorm((64, 28, 28), eps=1e-05, elementwise_affine=True)\n", + " (3): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (4): SELU(inplace=True)\n", + " (5): LayerNorm((128, 14, 14), eps=1e-05, elementwise_affine=True)\n", + " (6): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", + " (7): SELU(inplace=True)\n", + " (8): LayerNorm((256, 7, 7), eps=1e-05, elementwise_affine=True)\n", + " (9): Conv2d(256, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (10): SELU(inplace=True)\n", + " (11): LayerNorm((64, 7, 7), eps=1e-05, elementwise_affine=True)\n", + " )\n", + " (decoder): Sequential(\n", + " (0): ConvTranspose2d(64, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (1): SELU(inplace=True)\n", + " (2): LayerNorm((256, 7, 7), eps=1e-05, elementwise_affine=True)\n", + " (3): ConvTranspose2d(256, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", + " (4): SELU(inplace=True)\n", + " (5): LayerNorm((128, 14, 14), eps=1e-05, elementwise_affine=True)\n", + " (6): ConvTranspose2d(128, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1, 1))\n", + " (7): SELU(inplace=True)\n", + " (8): LayerNorm((64, 28, 28), eps=1e-05, elementwise_affine=True)\n", + " (9): ConvTranspose2d(64, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", + " (10): Sigmoid()\n", + " )\n", + ")\n", + "----------------------------------------------------------------------------------------------------\n", + "Number of model parameters: 1,400,001\n", + "----------------------------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "data_loader = config.provide('loader')\n", + "data_source = data_loader.transformed_source\n", + "train_dataset = data_source.train\n", + "validation_dataset = data_source.validation" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "def get_sample(idx):\n", + " return train_dataset[idx]['x'].to(config.device)" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "def show_image(axis, sample):\n", + " axis.imshow(train_dataset.denormalize_item(sample, 'x'), cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2cAAACxCAYAAABAxMXKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAdJUlEQVR4nO3de5AV1dnv8ecBg2I4gIhByhvGIClN4RgQCYcSEpB4jIkoiUoJiLHACl5ISimMIR5yCIYo+BYYNSoBvHBEK0BA8/qiR24xAsUlJC9yETWBF5iAoNyNHGCdP9g5QZ+1mZ7dvfdeq+f7qaKY+dG9e/WeHzN7TU+vUeecAAAAAACqq1G1BwAAAAAAYHIGAAAAAEFgcgYAAAAAAWByBgAAAAABYHIGAAAAAAFgcgYAAAAAAUg1OVPVq1R1g6q+q6r3ZTUooFzoLGJEbxEbOovY0FmEQkv9PWeq2lhE3hGRK0Vki4gsF5H+zrm1J9iHX6qGVJxzWuq+dBbVkKazIvXvLZ1FBnY6584odWc6iyqoaGcL+9BbpFLs9UGaK2ddRORd59z7zrlDIjJDRK5N8XhAudFZxIjeotI2pdyfzqLS6CxyI83k7CwR+a/j3t9SyIBQ0VnEiN4iNnQWsaGzCMZJ5T6Aqg4VkaHlPg6QFTqL2NBZxIbOIkb0FpWQZnK2VUTOOe79swvZpzjnnhKRp0T4+VxUHZ1FjOrsLZ1FYOgsYsPrAwQjzY81LheR9qp6vqo2EZGbRGRuNsMCyoLOIkb0FrGhs4gNnUUwSr5y5pw7rKp3isg8EWksIlOcc29nNjIgY3QWMaK3iA2dRWzoLEJS8lL6JR2MS8BIKe2y5PVFZ5EWnUWEVjrnOlfqYHQWGahoZ0XoLdIrx1L6AAAAAICMMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAnFTtAQDIn06dOpnszjvvNNmgQYNM9uyzz5rs0Ucf9R5n1apVJYwOAAAgTFw5AwAAAIAAMDkDAAAAgAAwOQMAAACAAKS650xV/yYi+0TkiIgcds51zmJQQDnRW8SGziI2dBaxobMIhTrnSt/5WJE7O+d2Jty+9INFrHHjxiZr0aJFqsf0La5w6qmnmqxDhw4mu+OOO7yPOX78eJP179/fZP/4xz9MNm7cOJP97Gc/8x4nDeecpn2M+vS2oXY2qZqaGm8+f/58kzVv3rzk4+zZs8ebn3766SU/ZqXQWRyvV69eJps+fbrJevToYbINGzaUZUweK9O+MKWzYRs1apTJin3NbtTI/pBVz549TbZo0aLU40qhop0tbE9vkUqx1wf8WCMAAAAABCDt5MyJyGuqulJVh2YxIKAC6C1iQ2cRGzqL2NBZBCHt7znr7pzbqqpfEJHXVXW9c27x8RsUCk7JEZIT9pbOIkB0FrGhs4gNr2kRhFRXzpxzWwt/7xCR2SLSxbPNU865ztxYiVDU1Vs6i9DQWcSGziI2vKZFKEq+cqaqnxeRRs65fYW3+4jI/8psZFVy7rnnevMmTZqYrFu3bibr3r27yVq2bGmyfv36lTC6+tuyZYvJJk2a5N32uuuuM9m+fftM9uc//9lkVb4ROLG89rYSunQxX6dk5syZ3m19C974Fh/y9evQoUMmK7bwR9euXU22atWqRI8ZixA6e8UVV5jM9zGZPXt2JYYTtcsuu8xky5cvr8JIyieEzuJfBg8ebLKRI0ea7OjRo4kfM81iciGiswhJmh9rbCMis1X1n4/zv51z/5HJqIDyobeIDZ1FbOgsYkNnEYySJ2fOufdF5JIMxwKUHb1FbOgsYkNnERs6i5CwlD4AAAAABIDJGQAAAAAEIO1S+lGrqakx2fz5873b+hY4CI3vZt5Ro0aZbP/+/d79p0+fbrLa2lqTffTRRybbsGFDkiEiQKeeeqrJvvrVr5rs+eefN1nbtm1THXvjxo0me+ihh0w2Y8YM7/5//OMfTebr/C9+8YsSRod/6tmzp8nat29vMhYE+ZdGjfzf+zz//PNNdt5555mscO8LkJqvX6ecckoVRoI8uPzyy002YMAAk/Xo0cO7/8UXX5zoOPfee6/Jtm3bZjLfQnwi/tcsy5YtS3TsauPKGQAAAAAEgMkZAAAAAASAyRkAAAAABIDJGQAAAAAEoEEvCLJ582aT7dq1y7ttJRYEKXaj4u7du0329a9/3WSHDh0y2XPPPZd+YMi1J5980mT9+/evyLF9C480a9bMZIsWLfLu71uoomPHjqnHhU8bNGiQyZYsWVKFkcSj2GI5Q4YMMZnvxvX169dnPibkX+/evU121113Jdq3WOeuueYak23fvr1+A0OUbrzxRpNNnDjRZK1btzZZsUWNFi5caLIzzjjDZA8//HCCERY/ju8xb7rppkSPWW1cOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAANOjVGj/88EOTjRgxwrutb7WiP/3pTyabNGlSomOvXr3aZFdeeaV32wMHDpjs4osvNtnw4cMTHRsNV6dOnUz2rW99y2TFVj/6rGKrKL788ssmGz9+vMm2bdtmMt//q48++sh7nG984xsmSzp2JNeoEd/Hq6/Jkycn3nbjxo1lHAnyqnv37iabOnWqyZKuNl1sdbxNmzbVb2AI3kkn2Zf/nTt3NtnTTz9tslNPPdVkixcvNtmYMWO8x37zzTdNdvLJJ5vspZdeMlmfPn28j+mzYsWKxNuGhq+4AAAAABAAJmcAAAAAEAAmZwAAAAAQgDonZ6o6RVV3qOqa47JWqvq6qm4s/H1aeYcJ1A+9RWzoLGJDZxEbOosYqHPuxBuoXiEi+0XkWefcVwrZQyLyoXNunKreJyKnOedG1nkw1RMfLGDNmzc32b59+0z25JNPmuy2224z2YABA0z2wgsvlDi6hsM5l2i1h6x6G3Nna2pqTDZ//nyT+brt8+qrr5qsf//+3m179Ohhso4dO5rMt2jCBx98kGg8IiJHjhwx2cGDBxONZ9WqVYmPk0ZsnfV9nJYsWWKyWbNmmWzgwIFpDp0rb731ljfv2rWrybp162aypUuXZj6meljpnLOrA3xGKJ1tqHyLNXz/+99PtO/ChQtN1qtXr7RDqqaKdrawX7S9HTx4sMmSLmL0+uuvm+zGG2802d69exOPx/eaeNq0aYn23bp1qzf3LXBSn9cXlVDs9UGdV86cc4tF5LPLGl4rIs8U3n5GRPqmGh2QMXqL2NBZxIbOIjZ0FjEo9Z6zNs652sLbfxeRNhmNBygneovY0FnEhs4iNnQWQUn9e86cc+5El3ZVdaiIDE17HCBLJ+otnUWI6CxiQ2cRG17TIgSlXjnbrqptRUQKf+8otqFz7innXOckPwsMlFmi3tJZBITOIjZ0FrHhNS2CUuqVs7kicouIjCv8PSezEQUq6Y2Ne/bsSbTdkCFDTPbiiy96tz169Giix0SdctnbCy+80JuPGDHCZC1atDDZzp07TVZbW2uyZ555xmT79+/3Hvv3v/99oqwcmjZtarJ77rnHZDfffHMlhpNWxTt79dVXm8z3nOJf2rSxPwV1/vnnJ96/2A3tkcrl59lqat26tTf3Lf7he72we/duk/385z9PP7D8yG1nx4wZ483vv/9+k/kWCHz88cdNNmrUKJPVZ/EPn5/85Ccl73v33Xd789AW/6iPJEvpvyAiS0Skg6puUdXb5FiBr1TVjSLSu/A+EAx6i9jQWcSGziI2dBYxqPPKmXPOv1a2SNRrriLf6C1iQ2cRGzqL2NBZxKDUe84AAAAAABlicgYAAAAAAUi9lD4+bfTo0Sbr1KmTyXr06GGy3r17ex/ztddeSz0u5MPJJ59ssvHjx3u39S3ssG/fPpMNGjTIZCtWrDBZzItCnHvuudUeQjQ6dOiQaLu33367zCOJh+//oG+REBGRd955x2S+/5domNq1a2eymTNnpnrMRx991GQLFixI9ZgIzwMPPGAy38IfIiKHDh0y2bx580w2cuRIk3388ceJxnPKKad48z59+pjM9zVaVU3mW8hmzpzcrN/y/3HlDAAAAAACwOQMAAAAAALA5AwAAAAAAsDkDAAAAAACwIIgGTtw4IDJhgwZYrJVq1aZ7Omnn/Y+pu/GXd+CDY899pjJfL/xHfG69NJLTeZb+KOYa6+91mSLFi1KNSY0TMuXL6/2EDLVvHlzk1111VUmGzBggMl8N7gXM2bMGJPt3r078f7IN1/nOnbsmHj/N954w2QTJ05MNSaEp2XLliYbNmyYyYq9BvQt/tG3b9+Sx/OlL33JZNOnT/du61skz+e3v/2tyR566KH6DSxSXDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgACwIEgFvPfeeyYbPHiwyaZOnerdf+DAgYmyz3/+8yZ79tlnTVZbW+s9DsL3yCOPmExVvdv6FvrI2+IfjRrZ7y8dPXq0CiNpeFq1apX5Y15yySUm8/W7d+/eJjv77LNN1qRJE5PdfPPN3mP7uvTxxx+bbNmyZSb75JNPTHbSSf4vrytXrvTmaHh8CzCMGzcu8f5vvvmmyW655RaT7dmzp34DQ/B8n9tat26deP+7777bZF/4whdMduutt5rsO9/5jsm+8pWvmKxZs2beY/sWKfFlzz//vMl8i+7lEVfOAAAAACAATM4AAAAAIABMzgAAAAAgAEzOAAAAACAAdU7OVHWKqu5Q1TXHZaNVdauqri78ubq8wwSSo7OIEb1FbOgsYkNnEYMkqzVOE5Ffichnl/37N+fc+MxH1EDMnj3bZBs3bvRu61uhr1evXiZ78MEHTXbeeeeZbOzYsd7jbN261ZtHaJrkoLPXXHONyWpqakzmW+VIRGTu3LmZjyk0vpUZfc/H6tWrKzGctKZJAL31rVDoe05//etfm+z+++9PdeyOHTuazLda4+HDh0128OBBk61du9ZkU6ZM8R57xYoVJvOtbrp9+3aTbdmyxWRNmzb1Hmf9+vXePFLTJIDOxqBdu3YmmzlzZqrHfP/9903m6yc+ZZrkoLOHDh0y2QcffGCyM844w7v/X//6V5MVey2RxLZt20y2d+9e77Zt27Y12c6dO0328ssvlzye2NV55cw5t1hEPqzAWIBM0FnEiN4iNnQWsaGziEGae87uVNW/FC4Rn1ZsI1UdqqorVNV+WxKoLDqLGNXZWzqLwNBZxIbXBwhGqZOzJ0TkAhGpEZFaEZlQbEPn3FPOuc7Ouc4lHgvIAp1FjBL1ls4iIHQWseH1AYJS0uTMObfdOXfEOXdURJ4WkS7ZDgvIFp1FjOgtYkNnERs6i9AkWRDEUNW2zrnawrvXiciaE22PZNas8T+NN9xwg8m+/e1vm2zq1Kkmu/32203Wvn1773GuvPLKuoYYrRg761tQoEmTJibbsWOHd/8XX3wx8zFVwsknn2yy0aNHJ95//vz5Jvvxj3+cZkhVU43eDhs2zGSbNm0yWbdu3TI/9ubNm032u9/9zmTr1q0z2dKlSzMfj8/QoUNN5rvp3rdYQ0MQ4+faShg5cqTJfAsa1ce4ceNS7Y9jYuzs7t27Tda3b1+TvfLKK979W7VqZbL33nvPZHPmzDHZtGnTTPbhh/Y2vhkzZniP7VsQpNi2DVWdkzNVfUFEeopIa1XdIiL/U0R6qmqNiDgR+ZuI2BkAUCV0FjGit4gNnUVs6CxiUOfkzDnX3xP/pgxjATJBZxEjeovY0FnEhs4iBmlWawQAAAAAZITJGQAAAAAEoKQFQVBZvhs/n3vuOZNNnjzZZCedZD/EV1xxhfc4PXv2NNnChQvrHiCq6pNPPvHmtbW13jwkvsU/Ro0aZbIRI0Z499+yZYvJJkywqyDv37+/hNHhn375y19WewjB6NWrV6LtZs6cWeaRIFQ1NTUm69OnT8mP51uUQURkw4YNJT8m8mfZsmUm8y1WVA6+15U9evTwbutbCKehLqBUDFfOAAAAACAATM4AAAAAIABMzgAAAAAgAEzOAAAAACAALAgSkI4dO3rz7373uya77LLLTOZb/MNn7dq13nzx4sWJ9kdY5s6dW+0hJOK7Sd630MeNN95osmI3xPfr1y/9wIAymD17drWHgCp57bXXTHbaaacl2nfp0qUmGzx4cNohAWXVtGlTk/kW/hARcc6ZbMaMGZmPKWZcOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAALAgSAV06NDBZHfeeafJrr/+eu/+Z555ZsnHPnLkiMlqa2u92xa7eRPVoaqJsr59+3r3Hz58eOZjSupHP/qRyX7605+arEWLFiabPn26yQYNGpTNwACgzE4//XSTJf36+vjjj5ts//79qccElNO8efOqPYRc4coZAAAAAASAyRkAAAAABIDJGQAAAAAEoM7Jmaqeo6oLVHWtqr6tqsMLeStVfV1VNxb+TvYbFoEyo7OIDZ1FjOgtYkNnEYMkV84Oi8g9zrmLRKSriNyhqheJyH0i8oZzrr2IvFF4HwgBnUVs6CxiRG8RGzqL4NW5WqNzrlZEagtv71PVdSJylohcKyI9C5s9IyILRWRkWUYZKN8qiv379zeZb2XGdu3aZT6eFStWmGzs2LEmmzt3bubHDkleOuucS5QVW81z0qRJJpsyZYrJdu3aZbKuXbuabODAgSa75JJLvMc+++yzTbZ582aT+VZ48q1Wlnd56WxD5VtF9cILL/Ruu3Tp0nIPp2LorcjUqVNN1qhR6XeMvPXWW2mGgzrQ2fL45je/We0h5Eq9PoOoajsRuVRElolIm0LJRUT+LiJtMh0ZkAE6i9jQWcSI3iI2dBahSvx7zlS1mYjMFJEfOuf2Hv+dQuecU1X7Lf1j+w0VkaFpBwrUF51FbOgsYlRKb+ksqonPtQhZoitnqvo5OVbi6c65WYV4u6q2Lfx7WxHZ4dvXOfeUc66zc65zFgMGkqCziA2dRYxK7S2dRbXwuRahS7Jao4rIb0RknXPukeP+aa6I3FJ4+xYRmZP98ID6o7OIDZ1FjOgtYkNnEYMkP9b430VkoIj8p6quLmT3i8g4EXlJVW8TkU0ickN5hlhZbdr4f8z4oosuMtmvfvUrk335y1/OfEzLli0z2cMPP2yyOXPs55KjR49mPp4INKjONm7c2JsPGzbMZP369TPZ3r17Tda+fftUY/Ld1L5gwQKTPfDAA6mOkyMNqrN541uoJ82iEBFpML2tqanx5r179zaZ7+vuoUOHTPbYY4+ZbPv27SWMDvXQYDpbSV/84herPYRcSbJa45siYpeiOqZXtsMB0qOziA2dRYzoLWJDZxGDBvGtPQAAAAAIHZMzAAAAAAgAkzMAAAAACEDi33MWu1atWpnsySefNFmxm36zvtnRt2DChAkTvNvOmzfPZB9//HGm40F4lixZYrLly5eb7LLLLkv8mGeeeabJii2C81m7du0y2YwZM7zbDh8+PPGYgDz62te+5s2nTZtW2YEgEy1btvTmvs+pPlu3bjXZvffem2pMQCj+8Ic/mKzYokgNdKG6euHKGQAAAAAEgMkZAAAAAASAyRkAAAAABIDJGQAAAAAEIPoFQS6//HKTjRgxwmRdunQx2VlnnZX5eA4ePGiySZMmmezBBx802YEDBzIfD+K1ZcsWk11//fUmu/322737jxo1quRjT5w40WRPPPGEyd59992SjwHkhWqx32kLAPm3Zs0ak23cuNG7rW+BvQsuuMBkH3zwQfqBRYorZwAAAAAQACZnAAAAABAAJmcAAAAAEAAmZwAAAAAQgOgXBLnuuusSZUmtXbvWm7/yyismO3z4sMkmTJhgst27d5c8HuB4tbW1Jhs9erR322I5gNK9+uqrJvve975XhZGgktavX+/N33rrLZN179693MMBgudb+E5EZPLkySYbO3asye666y6TFXuNnjdcOQMAAACAADA5AwAAAIAAMDkDAAAAgADUOTlT1XNUdYGqrlXVt1V1eCEfrapbVXV14c/V5R8uUDc6i9jQWcSGziJG9BYxUOfciTdQbSsibZ1zq1T1v4nIShHpKyI3iMh+59z4xAdTPfHBgDo457SubegsQkJnEaGVzrnOJ9qAziIwdXZWhN5WUvPmzb35Sy+9ZLLevXubbNasWSa79dZbTXbgwIESRheGYq8P6lyt0TlXKyK1hbf3qeo6ETkr2+EB2aGziA2dRWzoLGJEbxGDet1zpqrtRORSEVlWiO5U1b+o6hRVPS3jsQGp0VnEhs4iNnQWMaK3CFXiyZmqNhORmSLyQ+fcXhF5QkQuEJEaOfZdCPsLvo7tN1RVV6jqigzGCyRGZxEbOovY0FnEiN4iZIkmZ6r6OTlW4unOuVkiIs657c65I865oyLytIh08e3rnHvKOdc5yc8CA1mhs4gNnUVs6CxiRG8RuiSrNaqI/EZE1jnnHjkub3vcZteJyJrshwfUH51FbOgsYkNnESN6ixgkWa2xu4j8QUT+U0SOFuL7RaS/HLv860TkbyJye+FGyxM9FivbIJWEK9/RWQSDziJCSVZrpLMISdLVGultlflWcRw7dqzJfvCDH5isY8eOJlu7dm02A6uCNKs1vikivp3/Pe2ggHKgs4gNnUVs6CxiRG8Rg3qt1ggAAAAAKA8mZwAAAAAQACZnAAAAABCAOhcEyfRg3DyJlJIsrpAlOou06CwilGhxhazQWWSgop0VobdIr9jrA66cAQAAAEAAmJwBAAAAQACYnAEAAABAAJicAQAAAEAA6vwl1BnbKSKbCm+3LryfB3k6F5Fwz+e8KhyTzsYh1POhs9nJ07mIhH0+le5tXjsrkq/zCflcqvm5NuTnpRR5Op+Qz6VoZyu6WuOnDqy6otIr65RLns5FJH/nk5U8PS95OheR/J1PVvL0vOTpXETydz5ZydvzkqfzydO5ZClvz0uezifWc+HHGgEAAAAgAEzOAAAAACAA1ZycPVXFY2ctT+cikr/zyUqenpc8nYtI/s4nK3l6XvJ0LiL5O5+s5O15ydP55OlcspS35yVP5xPluVTtnjMAAAAAwL/wY40AAAAAEICKT85U9SpV3aCq76rqfZU+flqqOkVVd6jqmuOyVqr6uqpuLPx9WjXHmJSqnqOqC1R1raq+rarDC3mU51MudDYcdDYZOhsOOptczL3NU2dF6G1SMXdWJF+9zVNnKzo5U9XGIvKYiPwPEblIRPqr6kWVHEMGponIVZ/J7hORN5xz7UXkjcL7MTgsIvc45y4Ska4ickfh4xHr+WSOzgaHztaBzgaHziaQg95Ok/x0VoTe1ikHnRXJV29z09lKXznrIiLvOufed84dEpEZInJthceQinNusYh8+Jn4WhF5pvD2MyLSt6KDKpFzrtY5t6rw9j4RWSciZ0mk51MmdDYgdDYROhsQOptY1L3NU2dF6G1CUXdWJF+9zVNnKz05O0tE/uu497cUsti1cc7VFt7+u4i0qeZgSqGq7UTkUhFZJjk4nwzR2UDR2aLobKDo7Anlsbe5+BjT26Ly2FmRHHyMY+8sC4JkzB1b/jKqJTBVtZmIzBSRHzrn9h7/bzGeD+onxo8xnW3YYvwY09mGLdaPMb1t2GL8GOehs5WenG0VkXOOe//sQha77araVkSk8PeOKo8nMVX9nBwr8XTn3KxCHO35lAGdDQydrROdDQydTSSPvY36Y0xv65THzopE/DHOS2crPTlbLiLtVfV8VW0iIjeJyNwKj6Ec5orILYW3bxGROVUcS2KqqiLyGxFZ55x75Lh/ivJ8yoTOBoTOJkJnA0JnE8tjb6P9GNPbRPLYWZFIP8a56qxzrqJ/RORqEXlHRN4TkZ9U+vgZjP8FEakVkf8rx36++DYROV2OrQCzUUT+j4i0qvY4E55Ldzl2efcvIrK68OfqWM+njM8TnQ3kD51N/DzR2UD+0Nl6PVfR9jZPnS2cD71N9jxF29nC+HPT2zx1VgsnBAAAAACoIhYEAQAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAAwOQMAAACAADA5AwAAAIAAMDkDAAAAgAD8P8a5unNTxUnoAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# Browse examples\n", + "fig, axes = plt.subplots(1, 5)\n", + "\n", + "for index in range(5):\n", + " show_image(axes[index], get_sample(index).cpu())" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([[[ 23, 23, 23, 23, 23, 23, 23],\n", + " [ 23, 64, 88, 106, 4, 121, 29],\n", + " [ 23, 70, 114, 51, 31, 12, 12],\n", + " [ 23, 23, 46, 117, 22, 23, 23],\n", + " [ 23, 23, 23, 70, 114, 104, 23],\n", + " [ 23, 22, 4, 65, 116, 29, 23],\n", + " [ 99, 116, 51, 110, 23, 23, 23]]], device='cuda:0')\n", + "torch.Size([1, 7, 7])\n" + ] + } + ], + "source": [ + "x = model.encode(get_sample(0)[None])\n", + "print(x)\n", + "print(x.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA2cAAAGeCAYAAAAQSXmdAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3de5yVZb3///fF+SQooIiIoEWaJmIiug1FQ8nYlVhtkR2o5Y7aSmpbzcNmG2ka28RvmpqgIpqGWmpilocUJVOJQ1QKIngGRw4iZ5CA6/cHa/8iP9dy7lmnua57Xs/HgwfDm7Xmvu6Z9yzWxZr7M857LwAAAABA42rW2AsAAAAAALA5AwAAAIAosDkDAAAAgAiwOQMAAACACLA5AwAAAIAIsDkDAAAAgAiUtTlzzp3onFvonFvsnLu4UosCqoXOIkX0Fqmhs0gNnUUsXKk/58w511zSK5JOkLRE0ixJI7z38z/iPvxQNZTFe+9KvS+dRWMop7NSw3tLZ1EBK733u5d6ZzqLRlDTzhbuQ29RlmLPD8p55WyApMXe+9e891sk3SPppDLeH1BtdBYporeotTfLvD+dRa3RWeRGOZuzHpLe3unPSwoZECs6ixTRW6SGziI1dBbRaFHtAzjnRksaXe3jAJVCZ5EaOovU0FmkiN6iFsrZnC2V1HOnP+9dyP6J936SpEkS35+LRkdnkaJ6e0tnERk6i9Tw/ADRKOfbGmdJ6uOc29c510rSqZKmVWZZQFXQWaSI3iI1dBapobOIRsmvnHnvtzrnxkh6TFJzSZO99y9VbGVAhdFZpIjeIjV0Fqmhs4hJyaP0SzoYLwGjTOWOJW8oOoty0VkkaI73vn+tDkZnUQE17axEb1G+aozSBwAAAABUCJszAAAAAIgAmzMAAAAAiACbMwAAAACIAJszAAAAAIgAmzMAAAAAiACbMwAAAACIAJszAAAAAIgAmzMAAAAAiACbMwAAAACIAJszAAAAAIgAmzMAAAAAiACbMwAAAACIQIvGXgCA/DnssMNMNmbMGJOddtppJrvzzjtN9tOf/jR4nLlz55awOgAAgDjxyhkAAAAARIDNGQAAAABEgM0ZAAAAAESgrGvOnHNvSFonaZukrd77/pVYFFBN9BapobNIDZ1FaugsYuG896XfeUeR+3vvV2a8fekHS1jz5s1N1qlTp7LeZ2i4Qrt27Uy2//77m+zss88Ovs9rrrnGZCNGjDDZ5s2bTTZ+/HiT/eAHPwgepxzee1fu+2hIb5tqZ7Pq169fMH/qqadM1rFjx5KPs2bNmmDepUuXkt9nrdBZ7Gzw4MEmu/vuu002aNAgky1cuLAqawqYU+4TUzobt7Fjx5qs2L/ZzZrZb7I69thjTfbMM8+Uva4y1LSzhdvTW5Sl2PMDvq0RAAAAACJQ7ubMS3rcOTfHOTe6EgsCaoDeIjV0Fqmhs0gNnUUUyv05ZwO990udc3tIesI597L3fsbONygUnJIjJh/ZWzqLCNFZpIbOIjU8p0UUynrlzHu/tPD7ckkPShoQuM0k731/LqxELOrrLZ1FbOgsUkNnkRqe0yIWJb9y5pxrL6mZ935d4e0hki6v2MoayT777BPMW7VqZbKjjjrKZAMHDjTZrrvuarKvfOUrJayu4ZYsWWKy66+/Pnjbk08+2WTr1q0z2V/+8heTNfKFwJnltbe1MGCA+XdK999/f/C2oYE3oeFDoX5t2bLFZMUGfxx55JEmmzt3bqb3mYoYOnvMMceYLPQ5efDBB2uxnKQdfvjhJps1a1YjrKR6Yugs/uGMM84w2UUXXWSy7du3Z36f5QyTixGdRUzK+bbGbpIedM793/v5hff+0YqsCqgeeovU0Fmkhs4iNXQW0Sh5c+a9f03SIRVcC1B19BapobNIDZ1FaugsYsIofQAAAACIAJszAAAAAIhAuaP0k9avXz+TPfXUU8HbhgYcxCZ0Me/YsWNNtn79+uD97777bpPV1dWZ7P333zfZwoULsywREWrXrp3JPv3pT5vsrrvuMln37t3LOvaiRYtMdvXVV5vsnnvuCd7/j3/8o8lCnf/Rj35Uwurwf4499liT9enTx2QMBPmHZs3C//e57777mqxXr14mK1z7ApQt1K82bdo0wkqQB0cccYTJRo4cabJBgwYF73/QQQdlOs4FF1xgsnfeecdkoUF8Uvg5y8yZMzMdu7HxyhkAAAAARIDNGQAAAABEgM0ZAAAAAESAzRkAAAAARKBJDwR56623TPbee+8Fb1uLgSDFLlRcvXq1yY477jiTbdmyxWQ///nPy18Ycm3ixIkmGzFiRE2OHRo80qFDB5M988wzwfuHBlX07du37HXhn5122mkme/755xthJekoNiznm9/8pslCF66//PLLFV8T8u/444832Xe+851M9y3WuS984QsmW7ZsWcMWhiQNHz7cZNddd53JunbtarJiQ42efvppk+2+++4m+/GPf5xhhcWPE3qfp556aqb32dh45QwAAAAAIsDmDAAAAAAiwOYMAAAAACLA5gwAAAAAIsDmDAAAAAAi0KSnNa5atcpkF154YfC2oWlFf/7zn012/fXXZzr2vHnzTHbCCScEb7thwwaTHXTQQSY799xzMx0bTddhhx1msn/91381WbHpRx9WbIriww8/bLJrrrnGZO+8847JQl9X77//fvA4n/3sZ02Wde3Irlkz/h+voW699dbMt120aFEVV4K8GjhwoMluv/12k2WdNl1sOt6bb77ZsIUhei1a2Kf//fv3N9ktt9xisnbt2plsxowZJrviiiuCx3722WdN1rp1a5Pdd999JhsyZEjwfYbMnj07821jw7+4AAAAABABNmcAAAAAEAE2ZwAAAAAQgXo3Z865yc655c65F3fKOjvnnnDOLSr8vlt1lwk0DL1FaugsUkNnkRo6ixQ47/1H38C5YyStl3Sn9/5ThexqSau89+OdcxdL2s17f1G9B3Puow8WsY4dO5ps3bp1Jps4caLJzjzzTJONHDnSZFOnTi1xdU2H9z7TtIdK9Tblzvbr189kTz31lMlC3Q753e9+Z7IRI0YEbzto0CCT9e3b12ShoQkrVqzItB5J2rZtm8k2btyYaT1z587NfJxypNbZ0Ofp+eefN9kDDzxgslGjRpVz6Fx57rnngvmRRx5psqOOOspkL7zwQsXX1ABzvPd2OsCHxNLZpio0rOEb3/hGpvs+/fTTJhs8eHC5S2pMNe1s4X7J9vaMM84wWdYhRk888YTJhg8fbrK1a9dmXk/oOfGUKVMy3Xfp0qXBPDTgpCHPL2qh2PODel85897PkPThsYYnSbqj8PYdkoaVtTqgwugtUkNnkRo6i9TQWaSg1FH63bz3dYW335XUrdgNnXOjJY0u8ThAJWXqLZ1FROgsUkNnkRqe0yIqZf+cM++9/6iXdr33kyRNktJ+CRj58lG9pbOIEZ1FaugsUsNzWsSg1GmNy5xz3SWp8Pvyyi0JqBp6i9TQWaSGziI1dBZRKfWVs2mSTpc0vvD7QxVbUaSyXti4Zs2aTLf75je/abJ77703eNvt27dnep+oVy57+4lPfCKYX3jhhSbr1KmTyVauXGmyuro6k91xxx0mW79+ffDYjzzySKasGtq2bWuy888/32Rf+9rXarGcctW8s0OHDjVZ6GOKf+jWzX4X1L777pv5/sUuaE9ULh9nG1PXrl2DeWj4R+j5wurVq032wx/+sPyF5UduO3vFFVcE80svvdRkoQGBN910k8nGjh1rsoYM/wj57//+75Lve8455wTz2IZ/NESWUfpTJT0vaX/n3BLn3JnaUeATnHOLJB1f+DMQDXqL1NBZpIbOIjV0Fimo95Uz7314VraU9MxV5Bu9RWroLFJDZ5EaOosUlHrNGQAAAACggticAQAAAEAEyh6lj382btw4kx122GEmGzRokMmOP/744Pt8/PHHy14X8qF169Ymu+aaa4K3DQ12WLdunclOO+00k82ePdtkKQ+F2GeffRp7CcnYf//9M93upZdeqvJK0hH6GgwNCZGkV155xWShr0s0Tb179zbZ/fffX9b7/OlPf2qy6dOnl/U+EZ/LLrvMZKHBH5K0ZcsWkz322GMmu+iii0y2adOmTOtp06ZNMB8yZIjJQv9GO+dMFhpk89BDuZnf8v/jlTMAAAAAiACbMwAAAACIAJszAAAAAIgAmzMAAAAAiAADQSpsw4YNJvvmN79psrlz55rslltuCb7P0IW7oYENN954o8lCP/Ed6Tr00ENNFhr8UcxJJ51ksmeeeaasNaFpmjVrVmMvoaI6duxoshNPPNFkI0eONFnoAvdirrjiCpOtXr068/2Rb6HO9e3bN/P9n3zySZNdd911Za0J8dl1111NdtZZZ5ms2HPA0PCPYcOGlbyej3/84ya7++67g7cNDckL+dWvfmWyq6++umELSxSvnAEAAABABNicAQAAAEAE2JwBAAAAQATYnAEAAABABBgIUgOvvvqqyc444wyT3X777cH7jxo1KlPWvn17k915550mq6urCx4H8bv22mtN5pwL3jY06CNvwz+aNbP/v7R9+/ZGWEnT07lz54q/z0MOOcRkoX4ff/zxJtt7771N1qpVK5N97WtfCx471KVNmzaZbObMmSb74IMPTNaiRfif1zlz5gRzND2hAQzjx4/PfP9nn33WZKeffrrJ1qxZ07CFIXqhx7auXbtmvv8555xjsj322MNkX//61032pS99yWSf+tSnTNahQ4fgsUNDSkLZXXfdZbLQ0L084pUzAAAAAIgAmzMAAAAAiACbMwAAAACIAJszAAAAAIhAvZsz59xk59xy59yLO2XjnHNLnXPzCr+GVneZQHZ0Fimit0gNnUVq6CxSkGVa4xRJN0j68Ni//+e9v6biK2oiHnzwQZMtWrQoeNvQhL7Bgweb7KqrrjJZr169THbllVcGj7N06dJgnqApykFnv/CFL5isX79+JgtNOZKkadOmVXxNsQlNZgx9PObNm1eL5ZRriiLobWhCYehjevPNN5vs0ksvLevYffv2NVloWuPWrVtNtnHjRpPNnz/fZJMnTw4ee/bs2SYLTTddtmyZyZYsWWKytm3bBo/z8ssvB/NETVEEnU1B7969TXb//feX9T5fe+01k4X6iX8yRTno7JYtW0y2YsUKk+2+++7B+7/++usmK/ZcIot33nnHZGvXrg3etnv37iZbuXKlyR5++OGS15O6el85897PkLSqBmsBKoLOIkX0Fqmhs0gNnUUKyrnmbIxz7q+Fl4h3K3Yj59xo59xs55z9b0mgtugsUlRvb+ksIkNnkRqeHyAapW7OfibpY5L6SaqTNKHYDb33k7z3/b33/Us8FlAJdBYpytRbOouI0FmkhucHiEpJmzPv/TLv/Tbv/XZJt0gaUNllAZVFZ5EieovU0Fmkhs4iNlkGghjOue7e+7rCH0+W9OJH3R7ZvPhi+MN4yimnmOyLX/yiyW6//XaTfetb3zJZnz59gsc54YQT6ltislLsbGigQKtWrUy2fPny4P3vvffeiq+pFlq3bm2ycePGZb7/U089ZbJLLrmknCU1msbo7VlnnWWyN99802RHHXVUxY/91ltvmezXv/61yRYsWGCyF154oeLrCRk9erTJQhfdh4Y1NAUpPtbWwkUXXWSy0ECjhhg/fnxZ98cOKXZ29erVJhs2bJjJfvOb3wTv37lzZ5O9+uqrJnvooYdMNmXKFJOtWmUv47vnnnuCxw4NBCl226aq3s2Zc26qpGMldXXOLZH0fUnHOuf6SfKS3pBkdwBAI6GzSBG9RWroLFJDZ5GCejdn3vsRgfi2KqwFqAg6ixTRW6SGziI1dBYpKGdaIwAAAACgQticAQAAAEAEShoIgtoKXfj585//3GS33nqryVq0sJ/iY445JnicY4891mRPP/10/QtEo/rggw+CeV1dXTCPSWj4x9ixY0124YUXBu+/ZMkSk02YYKcgr1+/voTV4f/87//+b2MvIRqDBw/OdLv777+/yitBrPr162eyIUOGlPz+QkMZJGnhwoUlv0/kz8yZM00WGlZUDaHnlYMGDQreNjQIp6kOUCqGV84AAAAAIAJszgAAAAAgAmzOAAAAACACbM4AAAAAIAIMBIlI3759g/lXv/pVkx1++OEmCw3/CJk/f34wnzFjRqb7Iy7Tpk1r7CVkErpIPjToY/jw4SYrdkH8V77ylfIXBlTBgw8+2NhLQCN5/PHHTbbbbrtluu8LL7xgsjPOOKPcJQFV1bZtW5OFBn9IkvfeZPfcc0/F15QyXjkDAAAAgAiwOQMAAACACLA5AwAAAIAIsDkDAAAAgAgwEKQG9t9/f5ONGTPGZF/+8peD999zzz1LPva2bdtMVldXF7xtsYs30Ticc5myYcOGBe9/7rnnVnxNWX33u9812f/8z/+YrFOnTia7++67TXbaaadVZmEAUGVdunQxWdZ/X2+66SaTrV+/vuw1AdX02GOPNfYScoVXzgAAAAAgAmzOAAAAACACbM4AAAAAIAL1bs6ccz2dc9Odc/Odcy85584t5J2dc0845xYVfs/2ExaBKqOzSA2dRYroLVJDZ5GCLK+cbZV0vvf+QElHSjrbOXegpIslPem97yPpycKfgRjQWaSGziJF9BapobOIXr3TGr33dZLqCm+vc84tkNRD0kmSji3c7A5JT0u6qCqrjFRoiuKIESNMFprM2Lt374qvZ/bs2Sa78sorTTZt2rSKHzsmeems9z5TVmya5/XXX2+yyZMnm+y9994z2ZFHHmmyUaNGmeyQQw4JHnvvvfc22VtvvWWy0ISn0LSyvMtLZ5uq0BTVT3ziE8HbvvDCC9VeTs3QW+n22283WbNmpV8x8txzz5WzHNSDzlbH5z73ucZeQq406BHEOddb0qGSZkrqVii5JL0rqVtFVwZUAJ1FaugsUkRvkRo6i1hl/jlnzrkOku6XdJ73fu3O/1PovffOOftf+jvuN1rS6HIXCjQUnUVq6CxSVEpv6SwaE4+1iFmmV86ccy21o8R3e+8fKMTLnHPdC3/fXdLy0H2995O89/299/0rsWAgCzqL1NBZpKjU3tJZNBYeaxG7LNManaTbJC3w3l+7019Nk3R64e3TJT1U+eUBDUdnkRo6ixTRW6SGziIFWb6t8TOSRkn6m3NuXiG7VNJ4Sfc5586U9KakU6qzxNrq1i38bcYHHnigyW644QaTHXDAARVf08yZM0324x//2GQPPWQfS7Zv317x9SSgSXW2efPmwfyss84y2Ve+8hWTrV271mR9+vQpa02hi9qnT59usssuu6ys4+RIk+ps3oQG9ZQzFCIhTaa3/fr1C+bHH3+8yUL/7m7ZssVkN954o8mWLVtWwurQAE2ms7W03377NfYSciXLtMZnJdlRVDsMruxygPLRWaSGziJF9BapobNIQZP4rz0AAAAAiB2bMwAAAACIAJszAAAAAIhA5p9zlrrOnTubbOLEiSYrdtFvpS92DA1MmDBhQvC2jz32mMk2bdpU0fUgPs8//7zJZs2aZbLDDz888/vcc889TVZsCM6Hvffeeya75557grc999xzM68JyKN/+Zd/CeZTpkyp7UJQEbvuumswDz2mhixdutRkF1xwQVlrAmLxhz/8wWTFhiI10UF1DcIrZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQgeQHghxxxBEmu/DCC002YMAAk/Xo0aPi69m4caPJrr/+epNdddVVJtuwYUPF14N0LVmyxGRf/vKXTfatb30reP+xY8eWfOzrrrvOZD/72c9Mtnjx4pKPAeSFc8V+pi0A5N+LL75oskWLFgVvGxqw97GPfcxkK1asKH9hieKVMwAAAACIAJszAAAAAIgAmzMAAAAAiACbMwAAAACIQPIDQU4++eRMWVbz588P5r/5zW9MtnXrVpNNmDDBZKtXry55PcDO6urqTDZu3LjgbYvlAEr3u9/9zmT/9m//1ggrQS29/PLLwfy5554z2cCBA6u9HCB6ocF3knTrrbea7MorrzTZd77zHZMVe46eN7xyBgAAAAARYHMGAAAAABFgcwYAAAAAEah3c+ac6+mcm+6cm++ce8k5d24hH+ecW+qcm1f4NbT6ywXqR2eRGjqL1NBZpIjeIgXOe//RN3Cuu6Tu3vu5zrldJM2RNEzSKZLWe++vyXww5z76YEA9vPeuvtvQWcSEziJBc7z3/T/qBnQWkam3sxK9raWOHTsG8/vuu89kxx9/vMkeeOABk33961832YYNG0pYXRyKPT+od1qj975OUl3h7XXOuQWSelR2eUDl0Fmkhs4iNXQWKaK3SEGDrjlzzvWWdKikmYVojHPur865yc653Sq8NqBsdBapobNIDZ1FiugtYpV5c+ac6yDpfknnee/XSvqZpI9J6qcd/wthf8DXjvuNds7Nds7NrsB6gczoLFJDZ5EaOosU0VvELNPmzDnXUjtKfLf3/gFJ8t4v895v895vl3SLpAGh+3rvJ3nv+2f5XmCgUugsUkNnkRo6ixTRW8Quy7RGJ+k2SQu899fulHff6WYnS3qx8ssDGo7OIjV0Fqmhs0gRvUUKskxrHCjpD5L+Jml7Ib5U0gjtePnXS3pD0rcKF1p+1Ptisg3KknHyHZ1FNOgsEpRlWiOdRUyyTmukt40sNMXxyiuvNNl//ud/mqxv374mmz9/fmUW1gjKmdb4rKTQnX9b7qKAaqCzSA2dRWroLFJEb5GCBk1rBAAAAABUB5szAAAAAIgAmzMAAAAAiEC9A0EqejAunkSZsgxXqCQ6i3LRWSQo03CFSqGzqICadlaityhfsecHvHIGAAAAABFgcwYAAAAAEWBzBgAAAAARYHMGAAAAABGo94dQV9hKSW8W3u5a+HMe5OlcpHjPp1cjHJPOpiHW86GzlZOnc5HiPp9a9zavnZXydT4xn0tjPtbG/HEpRZ7OJ+ZzKdrZmk5r/KcDOze71pN1qiVP5yLl73wqJU8flzydi5S/86mUPH1c8nQuUv7Op1Ly9nHJ0/nk6VwqKW8flzydT6rnwrc1AgAAAEAE2JwBAAAAQAQac3M2qRGPXWl5Ohcpf+dTKXn6uOTpXKT8nU+l5OnjkqdzkfJ3PpWSt49Lns4nT+dSSXn7uOTpfJI8l0a75gwAAAAA8A98WyMAAAAARKDmmzPn3InOuYXOucXOuYtrffxyOecmO+eWO+de3Cnr7Jx7wjm3qPD7bo25xqyccz2dc9Odc/Odcy85584t5EmeT7XQ2XjQ2WzobDzobHYp9zZPnZXobVYpd1bKV2/z1Nmabs6cc80l3Sjp85IOlDTCOXdgLddQAVMknfih7GJJT3rv+0h6svDnFGyVdL73/kBJR0o6u/D5SPV8Ko7ORofO1oPORofOZpCD3k5Rfjor0dt65aCzUr56m5vO1vqVswGSFnvvX/Peb5F0j6STaryGsnjvZ0ha9aH4JEl3FN6+Q9Kwmi6qRN77Ou/93MLb6yQtkNRDiZ5PldDZiNDZTOhsROhsZkn3Nk+dlehtRkl3VspXb/PU2VpvznpIenunPy8pZKnr5r2vK7z9rqRujbmYUjjneks6VNJM5eB8KojORorOFkVnI0VnP1Iee5uLzzG9LSqPnZVy8DlOvbMMBKkwv2P8ZVIjMJ1zHSTdL+k87/3anf8uxfNBw6T4OaazTVuKn2M627Sl+jmmt01bip/jPHS21puzpZJ67vTnvQtZ6pY557pLUuH35Y28nsyccy21o8R3e+8fKMTJnk8V0NnI0Nl60dnI0NlM8tjbpD/H9LZeeeyslPDnOC+drfXmbJakPs65fZ1zrSSdKmlajddQDdMknV54+3RJDzXiWjJzzjlJt0la4L2/dqe/SvJ8qoTORoTOZkJnI0JnM8tjb5P9HNPbTPLYWSnRz3GuOuu9r+kvSUMlvSLpVUn/XevjV2D9UyXVSfq7dnx/8ZmSumjHBJhFkn4vqXNjrzPjuQzUjpd3/yppXuHX0FTPp4ofJzobyS86m/njRGcj+UVnG/SxSra3eeps4XzobbaPU7KdLaw/N73NU2dd4YQAAAAAAI2IgSAAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEAE2ZwAAAAAQATZnAAAAABCBsjZnzrkTnXMLnXOLnXMXV2pRQLXQWaSI3iI1dBapobOIhfPel3ZH55pLekXSCZKWSJolaYT3fv5H3Ke0gwEF3ntX6n3pLBpDOZ2VGt5bOosKWOm9373UO9NZNIKadrZwH3qLshR7flDOK2cDJC323r/mvd8i6R5JJ5Xx/oBqo7NIEb1Frb1Z5v3pLGqNziI3ytmc9ZD09k5/XlLIgFjRWaSI3iI1dBapoZc3aoUAAB+nSURBVLOIRotqH8A5N1rS6GofB6gUOovU0Fmkhs4iRfQWtVDO5myppJ47/XnvQvZPvPeTJE2S+P5cNDo6ixTV21s6i8jQWaSG5weIRjnf1jhLUh/n3L7OuVaSTpU0rTLLAqqCziJF9BapobNIDZ1FNEp+5cx7v9U5N0bSY5KaS5rsvX+pYisDKozOIkX0Fqmhs0gNnUVMSh6lX9LBeAkYZSp3LHlD0VmUi84iQXO89/1rdTA6iwqoaWcleovyVWOUPgAAAACgQticAQAAAEAE2JwBAAAAQATYnAEAAABABNicAQAAAEAE2JwBAAAAQATYnAEAAABABNicAQAAAEAEWjT2AgA0Dc7Zn7XYvHlzk23bts1k3vOzPgEAQP7xyhkAAAAARIDNGQAAAABEgM0ZAAAAAESAzRkAAAAARICBIPhILVu2NFmHDh1MtnnzZpNt2rSpKmtC/Hr16mWye++912SHHnqoyebPn2+yUaNGBY/z0ksvmYzhIUhN+/btTbZx40aT0W1UU7Nm2f+/fvv27VVcCdC08coZAAAAAESAzRkAAAAARIDNGQAAAABEoKxrzpxzb0haJ2mbpK3e+/6VWBRQTfQWqaGzSA2dRWroLGJRiYEgx3nvV1bg/eRWixb2w9y2bVuT7b777sH79+3b12Sf/OQnTdanTx+TOedMtttuuwWP06NHj0xZ69atTfarX/3KZGPGjDHZ3//+9+CxGwG9rYBQNyXpgQceMNl+++1nslA/DznkEJM98cQTweMcffTRJlu8eHHwtjlAZ3PggAMOMFmo3yNGjDDZs88+W5U1VRGdjUDHjh1Ndsstt5jsxBNPDN5/5Ur7KRw+fLjJ5syZY7IEh9jQWTQ6vq0RAAAAACJQ7ubMS3rcOTfHOTe6EgsCaoDeIjV0Fqmhs0gNnUUUyv22xoHe+6XOuT0kPeGce9l7P2PnGxQKTskRk4/sLZ1FhOgsUkNnkRqe0yIKZb1y5r1fWvh9uaQHJQ0I3GaS974/F1YiFvX1ls4iNnQWqaGzSA3PaRGLkl85c861l9TMe7+u8PYQSZdXbGU1EBpGsMsuuwRvGxrWERrAceSRR5rsc5/7nMk+8YlPZD528+bNTRZae0joYtytW7cGb7tu3TqTLVmyxGRvv/22yX7zm9+YbPv27VmWWFN56G0thPoVGkJzzz33BO/fu3dvk4W6uG3btky369KlS/A43//+9002erT9T81NmzYF75+CGDrbrl07k+2xxx4mCz02hD7HTVn//vY5XWhgQ4cOHWqxnKqIobNNVeixe8AAs8fQ4MGDTVbsOUjo6z/0vGbu3LkmS2UgCJ0tT7Nm9rWeNm3aBG/bsmVLk23cuNFkEQ2Qq7lyvq2xm6QHCw8ELST9wnv/aEVWBVQPvUVq6CxSQ2eRGjqLaJS8OfPevybJzrwGIkZvkRo6i9TQWaSGziImjNIHAAAAgAiwOQMAAACACJQ7Sj9pXbt2Ndmzzz4bvO1+++2X6X2GLooMZQ2R9YLaDRs2mOzRR+23TN9yyy3B+8+bNy/T+wwNFNmyZYvJUrkQuKkLXZw7bNgwk02YMMFkoaEQUni4TOvWrU0WGhaxePHizMc54YQTTDZo0CCThb4OEBYaKPCDH/zAZH379jXZV7/6VZOFutAUFBvaNHz4cJOFHiv/+te/VnxNyL/Q843QY2Jo4EyxIV4rVqww2fTp0zPfH+kKDaQ76KCDTPZf//VfJjvppJOC7zP0XKCurs5kv/jFL0wWGjpT7PnBggULTBZ6jh9jb3nlDAAAAAAiwOYMAAAAACLA5gwAAAAAIsDmDAAAAAAi0KQHgrRv395knTt3Dt42dMF26ILvrD/RfPPmzSZ7//33g7dt0cJ+mnbZZReTPfLIIyYbM2ZM5uMg/0LDPy6++GKTXXLJJZnuu3r16uBxli5darL999/fZKFBMsuXLzdZp06dgsfp2bOnyb797W+b7Pe//73JQoNtEH5c69Onj8lCn/vQ57Op6tatWzA/5phjTPbOO++YjMdplCL0ODty5EiTtWrVymRr1qwJvs8zzjjDZDNnzmz44hC10HPN0Oc+NBwsNGCm2DCoRYsWmWzJkiUmCw1PuuCCC0wWWrckPfTQQyZ7/vnnTcZAEAAAAABAEJszAAAAAIgAmzMAAAAAiACbMwAAAACIAJszAAAAAIhAk57WGJqQddZZZwVvO3jwYJOFJjsOGjTIZM2a2T3wjTfeaLK77roreOw999zTZEcddZTJnnrqKZMVm6aH/AtN3Rs6dKjJLrzwQpO1bt3aZBs2bDDZo48+Gjx2ly5dTBaauLhgwQKTPfjggyYLTWCUpF69eplswIABJgtNN2UaXljz5s1Ndthhh5nsiSeeMFmMU69qIfS19h//8R/B24a6GJogFproC+ws1KWf/OQnJgs9Toa+Vos9B5k+fbrJtm3blmWJiFToMevUU0812Q033GCy0FTyO+64w2ShqY6S9NZbb5msbdu2JrviiitMVuxxNWTOnDkmS2VKM6+cAQAAAEAE2JwBAAAAQATYnAEAAABABOrdnDnnJjvnljvnXtwp6+yce8I5t6jw+27VXSbQMPQWqaGzSA2dRWroLFKQZSDIFEk3SLpzp+xiSU9678c75y4u/Pmiyi+vurZs2WKyX/7yl8HbPvDAAyYLXWT78MMPm2zfffc1WcuWLU22ZMmS4LFff/11k82cOdNkoQsdvffB99kETFFOe5vVQQcdZLJrrrnGZO3btzdZqDeLFy82WbELyEPDQ3bffXeThQaCrFmzxmQ9e/YMHqdv374mC10kH/pajXAgyBRF0NlQb7p162ayFi3sPx+hYSKhi8fzJnTeX/jCF4K3DV2I/9vf/tZkiTx2T1EEnc270PMFSbr00ktNdtxxx5ks1LlXX33VZJdffnnwODn7Gp6iJtTZ0GOTJJ1yyikmCw3/CA2OCfXkpz/9qckaMtSoTZs2JjviiCNMFhqwV+y5c+j5SSKPq/W/cua9nyFp1YfikyT932iWOyQNq/C6gLLQW6SGziI1dBapobNIQanXnHXz3tcV3n5Xkv1vVSA+9BapobNIDZ1FaugsolL2zznz3nvnXNHXCZ1zoyWNLvc4QCV9VG/pLGJEZ5EaOovU8JwWMSj1lbNlzrnuklT4fXmxG3rvJ3nv+3vv+5d4LKBSMvWWziIidBapobNIDc9pEZVSXzmbJul0SeMLvz9UsRVFatu2bSZ74403TPbrX//aZBdccIHJQj+J/eqrrw4e+4MPPsi0HtQrl73da6+9gvnUqVNNtt9++5ksNBjnkUceMdkPf/hDk4WGhEjSxo0bg/mHhS7ODV3wO23atOD9v/vd75qsXbt2JjvppJNM9pe//CXTehpZzTs7fPhwk4UuKv/b3/5msgg/fjXRvXt3kx144IHB24Yez+fOnVvxNTWiXD7ONqbPfOYzwXzMmDEmCw3qWbduXab7rlixooTV5UIuOhv63F944YXB21522WUmCw2OOe+880x22223mawhQ2NC65w0aZLJDj74YJOFnq+cddZZweMUGxSSgiyj9KdKel7S/s65Jc65M7WjwCc45xZJOr7wZyAa9BapobNIDZ1FaugsUlDvK2fe+xFF/mpwhdcCVAy9RWroLFJDZ5EaOosUlHrNGQAAAACggticAQAAAEAEyh6l35SFfnJ66Cesf+Mb3zDZPvvsY7KrrroqeJzvfe97JtuwYUOWJSJnWrZsabJrr702eNtPfvKTJlu16sM/e1M6//zzTXbfffeZbPPmzVmWWLbQsJu33347eNvQ10H79u1NNmTIEJOFvt4aclFz6kKDVyRp5MiRJgsNsfjTn/5kstDF2nnTtm1bk40ebSdrd+jQIXj/119/3WTvvvtu+QtDLrRu3dpkl1xySfC2oY6FHj9vuukmkz311FMlrA6xCD1+jxo1ymSXX3558P6h568TJkwwWTnDP0LPVyRp7NixJhs6dKjJQud4++23m+yxxx4LHiflAVW8cgYAAAAAEWBzBgAAAAARYHMGAAAAABFgcwYAAAAAEWAgSIWFLuy++OKLTRa6QDd0UbkkHXDAASYLXfj5zjvvZFkiEnbMMceY7Itf/GLwtps2bTLZiBH2R7w8+eSTJovtQtrmzZsH89DF7yHdunUzWehi5aY0EKSY0MCL0OCVxhxK5Jwr+XZt2rQJ3rZTp04mO/roo0329a9/3WQDBw7MdGxJeuSRR0wW+lpF/oUGHpxyyikm++xnP5v5fc6YMcNkP/jBD0yW9bETcerRo4fJQsPBij0OTZo0yWTjxo0zWejfxND7DP0be9lllwWPfeaZZ5qsVatWJlu4cKHJrrjiCpPlcRAVr5wBAAAAQATYnAEAAABABNicAQAAAEAE2JwBAAAAQAQYCFJhoUEKU6dONVmLFvZDf/311wff53HHHWeyefPmmeznP/+5ya655hqThYaWSPENgWjqWrdubbJLLrnEZKHBFpI0fvx4k02fPt1kKXzet2/fHsxDF9SHzmflypUVX1Pqil0oXldXZ7K99trLZJ/85CdNFhpKtHr1apO1a9cueOzQRe6f/vSnTRYaktSlSxeTHXLIISbbc889g8feddddg/mHrVq1ymShC9K3bt0avH+qX4OovEGDBpnsJz/5iclCzxckaePGjSa78sorTcbAmfwJPSaHHleLPQ7NmTPHZPvss4/JunbtarLDDz/cZKGBdqHHaSnc59Bj6MSJE00W+vcpj3jlDAAAAAAiwOYMAAAAACLA5gwAAAAAIsDmDAAAAAAiUO/mzDk32Tm33Dn34k7ZOOfcUufcvMKvodVdJpAdnUWK6C1SQ2eRGjqLFGSZ1jhF0g2S7vxQ/v+893YUIIzQFJq77rrLZMWm7n3ve98zWc+ePU12zjnnmOyUU04x2QknnBA8zsKFC02W6BSxKcpBZw8++GCTHXHEESYrNsnw8ccfN9m2bdvKX1gj6N27dzDv0KGDyULn+Mtf/tJkEU4wm6Ia9rZYbxYtWmSy0NStG264wWSh6XGhj3OnTp2Cx+7YsaPJmjdvbrLQY+qaNWtMFppMG5qWKIW/XmbNmmWy0JTLZ555xmSbN28OHmfu3LnBPFFTlIPH2lr4+Mc/brI77rjDZJ07dzZZsX+HQ49rTz/9dMMX17RMUQ46+9prr5ls6dKlJuvVq1fw/jfffLPJQo9txab6fljo8a7Yv7Ghf7ffeOMNk4WeJxf7dytv6n3lzHs/Q5KdHQxEis4iRfQWqaGzSA2dRQrKueZsjHPur4WXiHcrdiPn3Gjn3Gzn3OwyjgVUAp1FiurtLZ1FZOgsUsPzA0Sj1M3ZzyR9TFI/SXWSJhS7ofd+kve+v/e+f4nHAiqBziJFmXpLZxEROovU8PwAUSlpc+a9X+a93+a93y7pFkkDKrssoLLoLFJEb5EaOovU0FnEJstAEMM51917X1f448mSXvyo28P64IMPTDZx4sTgbR999FGTXXTRRSY77bTTTLbXXnuZ7L777gse56ijjjLZ+vXrg7dNTYqdPeSQQ0zWrl07k61evTp4/1deeaXia6qF1q1bm+ySSy4J3rZFC/sQNmPGDJNNnjzZZCkMu6lmb4ud/ze+8Q2TnXfeeSbr06ePyfbZZx+TtWrVymRr164NHjt0QftDDz1kstBQjVdffdVk69atM1mxoTihj0foYvjQoJ4uXbqYbPny5cHjrFixIpjnRYqPtZXWtm1bk1177bUm69GjR6b399577wXzq666ymSpDn1qTCl2duXKlSb7/Oc/b7KxY8cG73/ggQeaLPRYHXp+8dvf/tZkoeFJoYE3krTLLruY7Be/+IXJQufYVNS7OXPOTZV0rKSuzrklkr4v6VjnXD9JXtIbkr5VxTUCDUJnkSJ6i9TQWaSGziIF9W7OvPcjAvFtVVgLUBF0Fimit0gNnUVq6CxSUM60RgAAAABAhbA5AwAAAIAIlDQQBLUVuijyttvsq/CHH364yT796U+b7IADDgge5+Mf/7jJ5s2bl2WJqIKOHTuaLDSgYNWq8M/TLDYoJCYtW7Y02eWXX26yz372s8H7hy5C/vd//3eTvf/++yWsrmkK9WbcuHEmC3UxlIUUu9327dtN1piDW0LHPuigg0zWrJn9f84///nPwfe5efPm8heGaIS6/MUvftFkn/vc50wW6k3oa+Dmm28OHnvx4sVZlogcCj02LVy40GRnnHFG5vtnfUwPdTQ0FCk0GKeY5557zmQpDO2qFl45AwAAAIAIsDkDAAAAgAiwOQMAAACACLA5AwAAAIAIMBCkkYQGIRx22GHB2w4dOtRkJ5xwgslCAz1Ctm7dGsxbt26d6f6ojazDEYoNVwhdbF4roTV17tzZZD/60Y9MNmzYMJPNmTMneJyRI0eabPny5VmWiDKFutgULuA++OCDTRbq+1tvvRW8f1P4GDUlbdq0MdnZZ59tstC/+SHvvvuuyW666abgbUP/RgA727ZtW02OExr+0b59++BtQ89BX3/99YqvKWW8cgYAAAAAEWBzBgAAAAARYHMGAAAAABFgcwYAAAAAEWAgSA3sscceJvv2t79tsjFjxgTvv9tuu5msefPmJgtdlL5lyxaT3XzzzcHjzJs3L5ijcWzevNlkoWECoX5IUu/evU22cOHCTO8zJNS50DEk6fTTTzdZqPOdOnUy2QsvvGCyr371q8HjrFy5MpgDlRB6TO3Ro4fJQl9DxYbYMBAkX4455hiTHXHEESYLdSk00GPq1KkmW7ZsWYmrA2qjT58+JmvRIrzFWLduncnWrFlT8TWljFfOAAAAACACbM4AAAAAIAJszgAAAAAgAvVuzpxzPZ1z051z851zLznnzi3knZ1zTzjnFhV+D1/4AtQYnUVq6CxSRG+RGjqLFGR55WyrpPO99wdKOlLS2c65AyVdLOlJ730fSU8W/gzEgM4iNXQWKaK3SA2dRfTqndbova+TVFd4e51zboGkHpJOknRs4WZ3SHpa0kVVWWUEQpOWdtllF5Mde+yxJrv88stNdvDBB5usWbPs32Uamvi1ZMkSk11xxRUmu+uuu4Lv84MPPsh8/JjlpbOhaW9bt241WceOHYP3/973vmeya6+91mTvvvuuyfbee2+ThaaJfulLXwoeu3PnziYLdfZPf/qTyUaOHGmyvE9lzEtn8yb0uF/s6+3DVq9eXenlRKcp9bZly5bB/NRTT8182w9btWqVyaZMmWKy0FRHlKYpdbaWQo+LxSbTbtq0KVPWlDXomjPnXG9Jh0qaKalboeSS9K6kbhVdGVABdBapobNIEb1FaugsYpX555w55zpIul/Sed77tTv/j6L33jvngltk59xoSaPLXSjQUHQWqaGzSFEpvaWzaEw81iJmmV45c8611I4S3+29f6AQL3POdS/8fXdJy0P39d5P8t739973r8SCgSzoLFJDZ5GiUntLZ9FYeKxF7LJMa3SSbpO0wHu/8wUr0ySdXnj7dEkPVX55QMPRWaSGziJF9BapobNIQZZva/yMpFGS/uacm1fILpU0XtJ9zrkzJb0p6ZTqLLF6Qhd7d+3aNXjb0OCD888/32T77befyVq1apXp2MUungwNbJgwYYLJ7rzzTpOFBikUO06O5KKzK1asMNm6detM1qVLl+D9R40aZbJQj0MX4nbq1Mlk7du3Dx4n5P333zfZTTfdZLLrr7/eZHkf/lFELjrbFIQGQYUez1u3bl2L5TS2JtPbtm3bBvOBAweaLDTcKzTUY8aMGSZbsGBBCatDAzSZztZS6LlJMaGvj9BjaFOWZVrjs5KKfdQGV3Y5QPnoLFJDZ5EieovU0FmkoEHTGgEAAAAA1cHmDAAAAAAiwOYMAAAAACKQ+eecpaRFC3tae+65p8mGDx9usm9/+9vB99mrVy+TtWzZMtN6QhcCv/322ya76qqrgvefOnWqydauXZvp2EhXaDDGrFmzTDZkyJDg/UNfB8WGh3xYaGjMhg0bTHbvvfcG7z9+/HiTvfbaayYLfW0AsQhdpB4aCBJy9NFHB/PQ4zlfB/Hbddddg/kee+yR6f5btmwxWWiw17Zt2xq2MCACCxcuNFmxLjdv3txkoecrTRmvnAEAAABABNicAQAAAEAE2JwBAAAAQATYnAEAAABABJK/Au/II4802fe///1Mt+vYsaPJQj+5vJjQRdzvvPOOyW666SaTTZw40WTvv/9+8Dih4QzIv9AAjrPOOstk1113XfD+xxxzjMnatGmT6TiPPPKIyW688UaTzZ07N3jsrVu3BnMgJaGL1Dt06GCy0GN0z549g+8zNGQE8QsN9JCyDzLYtGmTyUJDkoAUhZ5HrF+/Pnjb9u3bmyw0dK/Yc+KmgFfOAAAAACACbM4AAAAAIAJszgAAAAAgAmzOAAAAACACSQ0EadWqlclOO+00kx111FEma9mypck++OADk4Uu2pWkN954w2Q33HCDyR5++GGTvffeeyZjyAdKEerhsGHDgrdt3ry5yUK9Cw22oZ9A+Gvj2WefNdlee+1lssmTJ2d+n4hf6N9xSXruuedMdvTRR5ts8+bNJuvcubPJ3n333RJWBzSu1atXm+wPf/hD8LZDhgwx2Xe+8x2TnXPOOSYLDR7JI145AwAAAIAIsDkDAAAAgAiwOQMAAACACNS7OXPO9XTOTXfOzXfOveScO7eQj3POLXXOzSv8Glr95QL1o7NIDZ1FaugsUkRvkQJX34X/zrnukrp77+c653aRNEfSMEmnSFrvvb8m88Gcq/iUgWbNsr3455zLdLtiHw8u4o6D977eT2TsnUXTQmeRoDne+/4fdQM6u0PouUXoeUnodtu2bTMZw5hKVm9nJXpbS7179w7mf/zjH03WsWNHk1166aUmmzhxosm2bNnS8MVFotjzg3qnNXrv6yTVFd5e55xbIKlHZZcHVA6dRWroLFJDZ5EieosUNOiaM+dcb0mHSppZiMY45/7qnJvsnNutwmsDykZnkRo6i9TQWaSI3iJWmTdnzrkOku6XdJ73fq2kn0n6mKR+2vG/EBOK3G+0c262c252BdYLZEZnkRo6i9TQWaSI3iJmmTZnzrmW2lHiu733D0iS936Z936b9367pFskDQjd13s/yXvfP8v3AgOVQmeRGjqL1NBZpIjeInZZpjU6SbdJWuC9v3anvPtONztZ0ouVXx7QcHQWqaGzSA2dRYroLVJQ70AQSZ+RNErS35xz8wrZpZJGOOf6SfKS3pD0raqssB5MUURA1J0FAugsUkNnFZ6uGJrCiGjQ2xp54403gvmAAfZFyVtvvdVkn/rUp0zWunVrk6U8rbGYLNMan5UUGvX428ovBygfnUVq6CxSQ2eRInqLFDRoWiMAAAAAoDrYnAEAAABABNicAQAAAEAEXOhi1qodzLnaHQy55L0Pfa941dBZlIvOIkFzajkqnM6iAmraWYneVtKOIZr/rF27dib7+9//brKUB4IUe37AK2cAAAAAEAE2ZwAAAAAQATZnAAAAABABNmcAAAAAEIF6fwh1ha2U9Gbh7a6FP+dBns5Fivd8ejXCMelsGmI9HzpbOXk6Fynu86l1b/PaWSlf5xPzuTTmY23MH5dS1Px8QsMJN2zYUIl3HfPnpmhnazqt8Z8O7NzsWk/WqZY8nYuUv/OplDx9XPJ0LlL+zqdS8vRxydO5SPk7n0rJ28clT+eTp3OppLx9XPJ0PqmeC9/WCAAAAAARYHMGAAAAABFozM3ZpEY8dqXl6Vyk/J1PpeTp45Knc5Hydz6VkqePS57ORcrf+VRK3j4ueTqfPJ1LJeXt45Kn80nyXBrtmjMAAAAAwD/wbY0AAAAAEIGab86ccyc65xY65xY75y6u9fHL5Zyb7Jxb7px7caess3PuCefcosLvuzXmGrNyzvV0zk13zs13zr3knDu3kCd5PtVCZ+NBZ7Ohs/Ggs9ml3Ns8dVait1ml3FkpX73NU2drujlzzjWXdKOkz0s6UNII59yBtVxDBUyRdOKHsoslPem97yPpycKfU7BV0vne+wMlHSnp7MLnI9XzqTg6Gx06Ww86Gx06m0EOejtF+emsRG/rlYPOSvnqbW46W+tXzgZIWuy9f817v0XSPZJOqvEayuK9nyFp1YfikyTdUXj7DknDarqoEnnv67z3cwtvr5O0QFIPJXo+VUJnI0JnM6GzEaGzmSXd2zx1VqK3GSXdWSlfvc1TZ2u9Oesh6e2d/rykkKWum/e+rvD2u5K6NeZiSuGc6y3pUEkzlYPzqSA6Gyk6WxSdjRSd/Uh57G0uPsf0tqg8dlbKwec49c4yEKTC/I7xl0mNwHTOdZB0v6TzvPdrd/67FM8HDZPi55jONm0pfo7pbNOW6ueY3jZtKX6O89DZWm/OlkrqudOf9y5kqVvmnOsuSYXflzfyejJzzrXUjhLf7b1/oBAnez5VQGcjQ2frRWcjQ2czyWNvk/4c09t65bGzUsKf47x0ttabs1mS+jjn9nXOtZJ0qqRpNV5DNUyTdHrh7dMlPdSIa8nMOeck3SZpgff+2p3+KsnzqRI6GxE6mwmdjQidzSyPvU32c0xvM8ljZ6VEP8e56qz3vqa/JA2V9IqkVyX9d62PX4H1T5VUJ+nv2vH9xWdK6qIdE2AWSfq9pM6Nvc6M5zJQO17e/aukeYVfQ1M9nyp+nOhsJL/obOaPE52N5BedbdDHKtne5qmzhfOht9k+Tsl2trD+3PQ2T511hRMCAAAAADQiBoIAAAAAQATYnAEAAABABNicAQAAAEAE2JwBAAAAQATYnAEAAABABNicAQAAAEAE2JwBAAAAQATYnAEAAABABP4/QsnJhvJZZLoAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# Browse examples\n", + "fig, axes = plt.subplots(2, 5)\n", + "\n", + "for index in range(5):\n", + " sample = get_sample(index)\n", + " decoded = model(sample[None])[0].detach()\n", + " show_image(axes[0, index], sample.cpu())\n", + " show_image(axes[1, index], decoded.cpu())" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "vel", + "language": "python", + "name": "vel" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/vel/model/latent/vq_vae.py b/vel/model/latent/vq_vae.py new file mode 100644 index 00000000..5f3d4677 --- /dev/null +++ b/vel/model/latent/vq_vae.py @@ -0,0 +1,306 @@ +""" +VQ-VAE implementation with Vector Quantization functions taken from +https://github.com/ritheshkumar95/pytorch-vqvae/blob/master/functions.py +""" +import itertools as it + +import torch +import torch.autograd as autograd +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.init as init + +import vel.util.network as net_util + +from vel.api import GradientModel +from vel.metric import AveragingNamedMetric +from vel.metric.loss_metric import Loss + + +class VectorQuantization(autograd.Function): + @staticmethod + def forward(ctx, inputs, codebook): + with torch.no_grad(): + embedding_size = codebook.size(1) + inputs_size = inputs.size() + inputs_flatten = inputs.view(-1, embedding_size) + + codebook_sqr = torch.sum(codebook ** 2, dim=1) + inputs_sqr = torch.sum(inputs_flatten ** 2, dim=1, keepdim=True) + + # Compute the distances to the codebook + distances = torch.addmm(codebook_sqr + inputs_sqr, inputs_flatten, codebook.t(), alpha=-2.0, beta=1.0) + + _, indices_flatten = torch.min(distances, dim=1) + indices = indices_flatten.view(*inputs_size[:-1]) + ctx.mark_non_differentiable(indices) + + return indices + + @staticmethod + def backward(ctx, grad_output): + raise RuntimeError( + 'Trying to call `.grad()` on graph containing ' + '`VectorQuantization`. The function `VectorQuantization` ' + 'is not differentiable. Use `VectorQuantizationStraightThrough` ' + 'if you want a straight-through estimator of the gradient.' + ) + + +class VectorQuantizationStraightThrough(autograd.Function): + @staticmethod + def forward(ctx, inputs, codebook): + indices = vector_quantization(inputs, codebook) + indices_flatten = indices.view(-1) + ctx.save_for_backward(indices_flatten, codebook) + ctx.mark_non_differentiable(indices_flatten) + + codes_flatten = torch.index_select(codebook, dim=0, index=indices_flatten) + codes = codes_flatten.view_as(inputs) + + return codes, indices_flatten + + @staticmethod + def backward(ctx, grad_output, grad_indices): + grad_inputs, grad_codebook = None, None + + if ctx.needs_input_grad[0]: + # Straight-through estimator + grad_inputs = grad_output.clone() + + if ctx.needs_input_grad[1]: + # Gradient wrt. the codebook + indices, codebook = ctx.saved_tensors + embedding_size = codebook.size(1) + + grad_output_flatten = (grad_output.contiguous().view(-1, embedding_size)) + grad_codebook = torch.zeros_like(codebook) + grad_codebook.index_add_(0, indices, grad_output_flatten) + + return grad_inputs, grad_codebook + + +vector_quantization = VectorQuantization.apply +vector_quantization_straight_through = VectorQuantizationStraightThrough.apply + + +class VQEmbedding(nn.Module): + """ Vector-Quantised code embedding for the latent variables """ + + def __init__(self, k: int, d: int): + super().__init__() + self.k = k + self.d = d + self.embedding = nn.Parameter(torch.empty((self.k, self.d))) + + def reset_weights(self): + """ Initialize weights of the embedding """ + self.embedding.data.uniform_(-1.0/self.k, 1.0/self.k) + + def extra_repr(self) -> str: + return f"k={self.k}, d={self.d}" + + def forward(self, z_e_x): + z_e_x_ = z_e_x.permute(0, 2, 3, 1).contiguous() + latents = vector_quantization(z_e_x_, self.embedding) + return latents + + def straight_through(self, z_e_x): + z_e_x_ = z_e_x.permute(0, 2, 3, 1).contiguous() + z_q_x_, indices = vector_quantization_straight_through(z_e_x_, self.embedding.detach()) + z_q_x = z_q_x_.permute(0, 3, 1, 2).contiguous() + + z_q_x_bar_flatten = torch.index_select(self.embedding, dim=0, index=indices) + z_q_x_bar_ = z_q_x_bar_flatten.view_as(z_e_x_) + z_q_x_bar = z_q_x_bar_.permute(0, 3, 1, 2).contiguous() + + return z_q_x, z_q_x_bar + + +class VQVAE(GradientModel): + """ + Implementation of Neural Discrete Representation Learning (https://arxiv.org/abs/1711.00937) + Vector-Quantised Variational-AutoEncoder (VQ-VAE) + """ + + def __init__(self, img_rows, img_cols, img_channels, channels=None, k: int = 512, d: int = 256, + beta: float = 1.0): + super().__init__() + + if channels is None: + channels = [16, 32, 32] + + layer_series = [ + (3, 1, 1), + (3, 1, 2), + (3, 1, 2), + ] + + self.codebook = VQEmbedding(k, d) + + self.final_width = net_util.convolutional_layer_series(img_rows, layer_series) + self.final_height = net_util.convolutional_layer_series(img_cols, layer_series) + self.channels = channels + + self.beta = beta + self.k = k + self.d = d + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels=img_channels, out_channels=channels[0], kernel_size=(3, 3), padding=1), + nn.SELU(True), + nn.LayerNorm([ + channels[0], + net_util.convolutional_layer_series(img_rows, layer_series[:1]), + net_util.convolutional_layer_series(img_cols, layer_series[:1]), + ]), + nn.Conv2d(in_channels=channels[0], out_channels=channels[1], kernel_size=(3, 3), stride=2, padding=1), + nn.SELU(True), + nn.LayerNorm([ + channels[1], + net_util.convolutional_layer_series(img_rows, layer_series[:2]), + net_util.convolutional_layer_series(img_cols, layer_series[:2]), + ]), + nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=(3, 3), stride=2, padding=1), + nn.SELU(True), + nn.LayerNorm([ + channels[2], + net_util.convolutional_layer_series(img_rows, layer_series), + net_util.convolutional_layer_series(img_cols, layer_series), + ]), + nn.Conv2d(in_channels=channels[2], out_channels=self.d, kernel_size=(3, 3), stride=1, padding=1), + nn.SELU(True), + nn.LayerNorm([ + self.d, + net_util.convolutional_layer_series(img_rows, layer_series), + net_util.convolutional_layer_series(img_cols, layer_series), + ]), + ) + + self.decoder = nn.Sequential( + nn.ConvTranspose2d(in_channels=self.d, out_channels=channels[2], kernel_size=(3, 3), stride=1, padding=1), + # nn.Linear(d, self.final_width * self.final_height * channels[2]), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[2], + net_util.convolutional_layer_series(img_rows, layer_series), + net_util.convolutional_layer_series(img_cols, layer_series), + ]), + nn.ConvTranspose2d( + in_channels=channels[2], out_channels=channels[1], kernel_size=3, stride=2, padding=1, output_padding=1 + ), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[1], + net_util.convolutional_layer_series(img_rows, layer_series[:2]), + net_util.convolutional_layer_series(img_cols, layer_series[:2]), + ]), + nn.ConvTranspose2d( + in_channels=channels[1], out_channels=channels[0], kernel_size=3, stride=2, padding=1, output_padding=1 + ), + # nn.ReLU(True), + nn.SELU(True), + nn.LayerNorm([ + channels[0], + net_util.convolutional_layer_series(img_rows, layer_series[:1]), + net_util.convolutional_layer_series(img_cols, layer_series[:1]), + ]), + nn.ConvTranspose2d(in_channels=channels[0], out_channels=img_channels, kernel_size=3, padding=1), + nn.Sigmoid() + ) + + def reset_weights(self): + self.codebook.reset_weights() + + for m in it.chain(self.encoder, self.decoder): + if isinstance(m, nn.Conv2d): + self._weight_initializer(m) + elif isinstance(m, nn.ConvTranspose2d): + self._weight_initializer(m) + elif isinstance(m, nn.Linear): + self._weight_initializer(m) + + @staticmethod + def _weight_initializer(tensor): + init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('relu')) + init.constant_(tensor.bias, 0.0) + + def encode(self, x): + z_e_x = self.encoder(x) + latents = self.codebook(z_e_x) + return latents + + def decode(self, latents): + z_q_x = self.codebook.embedding(latents).permute(0, 3, 1, 2) # (B, D, H, W) + x_tilde = self.decoder(z_q_x) + return x_tilde + + def forward(self, x): + z_e_x = self.encoder(x) + z_q_x_st, z_q_x = self.codebook.straight_through(z_e_x) + x_tilde = self.decoder(z_q_x_st) + return x_tilde + + def calculate_gradient(self, data: dict) -> dict: + """ + Calculate gradient for given batch of supervised learning. + Returns a dictionary of metrics + """ + input_data = data['x'] + target_data = data['y'] + + # x_tilde, z_e_x, z_q_x = self(input_data) + z_e_x = self.encoder(input_data) + z_q_x_st, z_q_x = self.codebook.straight_through(z_e_x) + x_tilde = self.decoder(z_q_x_st) + + # Reconstruction loss + loss_recons = F.mse_loss(x_tilde, target_data) + + # Vector quantization objective + loss_vq = F.mse_loss(z_q_x, z_e_x.detach()) + + # Commitment objective + loss_commit = F.mse_loss(z_e_x, z_q_x.detach()) + + loss = loss_recons + loss_vq + self.beta * loss_commit + + if self.training: + loss.backward() + + return { + 'loss': loss.item(), + + 'grad_norm': grad_norm, + 'reconstruction': loss_recons.item(), + 'loss_vq': loss_vq.item(), + 'loss_commit': loss_commit.item() + } + + def metrics(self): + """ Set of metrics for this model """ + return [ + Loss(), + AveragingNamedMetric('reconstruction', scope="train"), + AveragingNamedMetric('loss_vq', scope="train"), + AveragingNamedMetric('loss_commit', scope="train"), + AveragingNamedMetric('grad_norm', scope="train") + ] + + +def create(img_rows, img_cols, img_channels, channels=None, k: int = 512, d: int = 256, + beta: float = 1.0): + """ Vel factory function """ + from vel.api import ModelFactory + + if channels is None: + channels = [16, 32, 32] + + def instantiate(**_): + return VQVAE( + img_rows, img_cols, img_channels, channels=channels, k=k, d=d, beta=beta + ) + + return ModelFactory.generic(instantiate) From 6934cb6ff4be48c2ba001237716c584d2524b0e0 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 3 Oct 2019 18:39:28 -0700 Subject: [PATCH 119/162] Implemented simple MNIST-GAN --- docs/Bibliography.md | 1 + examples-configs/gan/mnist/mnist_gan.yaml | 61 ++++++++ vel/api/info.py | 9 ++ vel/api/model.py | 1 + vel/api/optimizer.py | 83 ++++++++++- vel/model/gan/__init__.py | 0 vel/model/gan/simple_gan.py | 162 ++++++++++++++++++++++ vel/train/trainer.py | 3 +- 8 files changed, 317 insertions(+), 3 deletions(-) create mode 100644 examples-configs/gan/mnist/mnist_gan.yaml create mode 100644 vel/model/gan/__init__.py create mode 100644 vel/model/gan/simple_gan.py diff --git a/docs/Bibliography.md b/docs/Bibliography.md index 2490513a..31cfef82 100644 --- a/docs/Bibliography.md +++ b/docs/Bibliography.md @@ -153,6 +153,7 @@ in the following repositories (in alphabetical order): - https://github.com/Kaixhin/Rainbow - https://github.com/Khrylx/PyTorch-RL - https://github.com/LiyuanLucasLiu/RAdam +- https://github.com/eriklindernoren/PyTorch-GAN - https://github.com/fastai/fastai - https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer - https://github.com/openai/baselines diff --git a/examples-configs/gan/mnist/mnist_gan.yaml b/examples-configs/gan/mnist/mnist_gan.yaml new file mode 100644 index 00000000..4de4ecce --- /dev/null +++ b/examples-configs/gan/mnist/mnist_gan.yaml @@ -0,0 +1,61 @@ +name: 'mnist_gan' + + +model: + name: vel.model.gan.simple_gan + img_rows: 28 + img_cols: 28 + img_channels: 1 + latent_dim: 128 + + +source: + name: vel.data.source.vision.mnist + + +loader: + name: vel.data.dataset_loader + batch_size: 128 +# num_workers: 4 +# pin_memory: true + + transformations: + - name: vel.data.transformation.to_array + # - name: vel.data.augmentation.random_scale + # tags: train + # size: 28 + # max_zoom: 1.1 + # - name: vel.data.augmentation.random_rotate + # tags: train + # deg: 15.0 + # - name: vel.data.augmentation.random_crop + # tags: train + # width: 28 + # height: 28 + # padding: 3 + # padding_mode: 'constant' + - name: vel.data.transformation.image_to_tensor + - name: vel.data.transformation.unsupervised + + +optimizer: + name: vel.optimizer.radam + lr: 2.0e-4 + eps: 1.0e-4 + + +#scheduler: +# name: vel.scheduler.multi_step +# gamma: 0.71968 # 10 * (-1/7) +# milestones: [ 1, 4, 13, 40, 121, 364, 1093, 3280] + + +commands: + augvis: + name: vel.command.augvis_command + samples: 10 + cases: 5 + + train: + name: vel.command.train_command + epochs: 200 diff --git a/vel/api/info.py b/vel/api/info.py index 1f0a765b..e76cec24 100644 --- a/vel/api/info.py +++ b/vel/api/info.py @@ -99,6 +99,15 @@ def __init__(self, global_epoch_idx, metrics): self._reset_metrics() self.metrics_by_name = {m.name: m for m in self.metrics} + def __contains__(self, metric): + if ':' in metric: + # TODO(jerry) There's got to be a better way to do it + metric_name = metric.split(':')[-1] + else: + metric_name = metric + + return metric_name in self.metrics_by_name + @torch.no_grad() def calculate(self, batch_info): """ Calculate metric values """ diff --git a/vel/api/model.py b/vel/api/model.py index 693d6a46..4946b7bb 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -76,6 +76,7 @@ def validate(self, data: dict) -> dict: raise NotImplementedError + class GradientModel(ValidatedModel): """ Model that calculates a single gradient and optimizes it """ diff --git a/vel/api/optimizer.py b/vel/api/optimizer.py index 82f56825..bed2a75b 100644 --- a/vel/api/optimizer.py +++ b/vel/api/optimizer.py @@ -1,11 +1,15 @@ -import typing +import collections import itertools as it +import typing from torch.nn.utils import clip_grad_norm_ from torch.optim.optimizer import Optimizer -from vel.metric import DefaultAveragingNamedMetric + from vel.api.callback import Callback from vel.api.scheduler import SchedulerFactory +from vel.exception import VelException +from vel.metric import DefaultAveragingNamedMetric +from vel.util.datastructure import flatten_dict class VelOptimizer: @@ -101,6 +105,72 @@ def create_scheduler(self, scheduler_factory: SchedulerFactory, last_epoch: int return [scheduler_factory.instantiate(self.optimizer, last_epoch=last_epoch)] +class VelMultiOptimizer(VelOptimizer): + """ Optimizer that wraps several individual optimizers """ + + def __init__(self, optimizers: typing.Dict[str, VelOptimizer], canonical_name: typing.Optional[str] = None): + self.optimizers = optimizers + + # Canonical, chosen optimizer + self.canonical_name = list(optimizers.keys())[0] + + self.initial_lrs = { + name: optimizer.get_lr() + for name, optimizer in self.optimizers.items() + } + + def __getitem__(self, item): + return self.optimizers[item] + + def get_lr(self) -> float: + return self.optimizers[self.canonical_name].get_lr() + + def set_lr(self, lr: float): + canonical_lr = self.initial_lrs[self.canonical_name] + + for name, optimizer in self.optimizers.items(): + opt_lr = self.initial_lrs[name] / canonical_lr * lr + optimizer.set_lr(opt_lr) + + def state_dict(self) -> dict: + output = {} + + for name, optimizer in self.optimizers.items(): + output[name] = optimizer.state_dict() + + def load_state_dict(self, state_dict: dict) -> None: + for name, optimizer in self.optimizers.items(): + optimizer.load_state_dict(state_dict[name]) + + def zero_grad(self) -> None: + for optimizer in self.optimizers.values(): + optimizer.zero_grad() + + def step(self, closure=None) -> dict: + output = {} + + for name, optimizer in self.optimizers.items(): + metrics = optimizer.step() + flatten_dict(metrics, output, name) + + return output + + def create_scheduler(self, scheduler_factory: SchedulerFactory, last_epoch: int = -1) -> [Callback]: + """ Create a scheduler instance for this optimizer """ + return [ + scheduler_factory.instantiate(optimizer, last_epoch=last_epoch) + for optimizer in self.optimizers.values() + ] + + def add_param_group(self, param_group: dict) -> None: + raise VelException("Unsupported operation") + + def metrics(self) -> list: + """ Set of metrics for this model """ + # TODO(jerry): aggregate metrics + return [] + + class OptimizerFactory: """ Base class for optimizer factories """ @@ -109,3 +179,12 @@ def instantiate(self, parameters) -> VelOptimizer: def instantiate_parameter_groups(self, parameters) -> VelOptimizer: raise NotImplementedError + + def instantiate_multi(self, parameter_dict: dict) -> VelMultiOptimizer: + od = collections.OrderedDict() + + for name, value in parameter_dict.items(): + od[name] = self.instantiate(value) + + return VelMultiOptimizer(od) + diff --git a/vel/model/gan/__init__.py b/vel/model/gan/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/model/gan/simple_gan.py b/vel/model/gan/simple_gan.py new file mode 100644 index 00000000..78d516eb --- /dev/null +++ b/vel/model/gan/simple_gan.py @@ -0,0 +1,162 @@ +""" +Simple GAN code is based on https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/gan/gan.py +""" +import collections +import numpy as np +import torch +import torch.nn as nn + +from vel.api import OptimizedModel, ModelFactory, VelOptimizer, OptimizerFactory +from vel.api.optimizer import VelMultiOptimizer +from vel.metric import AveragingNamedMetric + + +class Generator(nn.Module): + def __init__(self, img_shape, latent_dim): + super(Generator, self).__init__() + + self.img_shape = img_shape + self.latent_dim = latent_dim + + def block(in_feat, out_feat, normalize=True): + layers = [nn.Linear(in_feat, out_feat)] + if normalize: + layers.append(nn.BatchNorm1d(out_feat, 0.8)) + layers.append(nn.LeakyReLU(0.2, inplace=True)) + return layers + + self.model = nn.Sequential( + *block(latent_dim, 128, normalize=False), + *block(128, 256), + *block(256, 512), + *block(512, 1024), + nn.Linear(1024, int(np.prod(img_shape))), + nn.Tanh() + ) + + def forward(self, z): + img = self.model(z) + img = img.view(img.size(0), *self.img_shape) + return img + + +class Discriminator(nn.Module): + def __init__(self, img_shape, latent_dim): + super(Discriminator, self).__init__() + + self.model = nn.Sequential( + nn.Linear(int(np.prod(img_shape)), 512), + nn.LeakyReLU(0.2, inplace=True), + nn.Linear(512, 256), + nn.LeakyReLU(0.2, inplace=True), + nn.Linear(256, 1), + nn.Sigmoid(), + ) + + def forward(self, img): + img_flat = img.view(img.size(0), -1) + validity = self.model(img_flat) + + return validity + + +class SimpleGAN(OptimizedModel): + """ + Implements simple Generative Adversarial Network in the spirit of the original paper + "Generative Adversarial Networks" https://arxiv.org/abs/1406.2661 + """ + + def __init__(self, img_rows, img_cols, img_channels, latent_dim): + super().__init__() + + self.image_shape = (img_channels, img_rows, img_cols) + self.latent_dim = latent_dim + + self.generator = Generator(img_shape=self.image_shape, latent_dim=self.latent_dim) + self.discriminator = Discriminator(img_shape=self.image_shape, latent_dim=self.latent_dim) + + self.adversarial_loss = nn.BCELoss() + + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelMultiOptimizer: + """ Create optimizer for the purpose of optimizing this model """ + gen_parameters = filter(lambda p: p.requires_grad, self.generator.parameters()) + disc_parameters = filter(lambda p: p.requires_grad, self.discriminator.parameters()) + + return optimizer_factory.instantiate_multi(collections.OrderedDict([ + ('generator', gen_parameters), + ('discriminator', disc_parameters) + ])) + + def optimize(self, data: dict, optimizer: VelMultiOptimizer) -> dict: + """ + Perform one step of optimization of the model + :returns a dictionary of metrics + """ + optimizer_G = optimizer['generator'] + optimizer_D = optimizer['discriminator'] + + input_data = data['x'] + + # Adversarial ground truths + valid = torch.ones(input_data.size(0), 1).to(input_data.device) + fake = torch.zeros(input_data.size(0), 1).to(input_data.device) + + optimizer_G.zero_grad() + + # Sample noise as generator input + z = torch.randn(input_data.size(0), self.latent_dim).to(input_data.device) + + # Generate a batch of images + gen_imgs = self.generator(z) + + # Loss measures generator's ability to fool the discriminator + g_loss = self.adversarial_loss(self.discriminator(gen_imgs), valid) + + g_loss.backward() + optimizer_G.step() + + # --------------------- + # Train Discriminator + # --------------------- + + optimizer_D.zero_grad() + + # Measure discriminator's ability to classify real from generated samples + real_loss = self.adversarial_loss(self.discriminator(input_data), valid) + fake_loss = self.adversarial_loss(self.discriminator(gen_imgs.detach()), fake) + d_loss = (real_loss + fake_loss) / 2 + + d_loss.backward() + optimizer_D.step() + + return { + 'gen_loss': g_loss.item(), + 'disc_loss': d_loss.item() + } + + def validate(self, data: dict) -> dict: + """ + Perform one step of model inference without optimization + :returns a dictionary of metrics + """ + return { + 'gen_loss': 0.0, + 'disc_loss': 0.0 + } + + def metrics(self): + """ Set of metrics for this model """ + return [ + AveragingNamedMetric('gen_loss', scope="train"), + AveragingNamedMetric('disc_loss', scope="train"), + ] + + +def create(img_rows, img_cols, img_channels, latent_dim): + """ Vel factory function """ + def instantiate(**_): + return SimpleGAN( + img_rows, img_cols, img_channels, latent_dim=latent_dim + ) + + return ModelFactory.generic(instantiate) diff --git a/vel/train/trainer.py b/vel/train/trainer.py index 6222067c..7d151b18 100644 --- a/vel/train/trainer.py +++ b/vel/train/trainer.py @@ -73,7 +73,8 @@ def train_epoch(self, epoch_info, loader: DatasetLoader, interactive=True): batch_info.update(metrics) batch_info.on_batch_end('train') - iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss')) + if 'loss' in epoch_info.result_accumulator: + iterator.set_postfix(loss=epoch_info.result_accumulator.intermediate_value('loss')) def validation_epoch(self, epoch_info, loader: DatasetLoader, interactive=True): """ Run a single evaluation epoch """ From 9d35c4f17dc6d99182d4077612a4bd493b7faf88 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 3 Oct 2019 22:20:15 -0700 Subject: [PATCH 120/162] Requirements update. --- requirements.txt | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/requirements.txt b/requirements.txt index 9629fe23..8a6ce769 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ # atari-py==0.2.6 # via gym atomicwrites==1.3.0 # via pytest -attrs==19.1.0 +attrs==19.2.0 box2d-py==2.3.8 # via gym certifi==2019.9.11 # via requests chardet==3.0.4 # via requests @@ -17,7 +17,7 @@ future==0.17.1 # via pyglet gym[atari,box2d,classic_control]==0.14.0 idna==2.8 # via requests importlib-metadata==0.23 # via pluggy, pytest -joblib==0.13.2 # via scikit-learn +joblib==0.14.0 # via scikit-learn jsonpatch==1.24 # via visdom jsonpointer==2.0 # via jsonpatch kiwisolver==1.1.0 # via matplotlib @@ -27,13 +27,13 @@ numpy==1.17.2 opencv-python==4.1.1.26 packaging==19.2 # via pytest pandas==0.25.1 -pillow==6.1.0 # via gym, torchvision, visdom +pillow==6.2.0 # via gym, torchvision, visdom pluggy==0.13.0 # via pytest py==1.8.0 # via pytest pyglet==1.3.2 # via gym pymongo==3.9.0 pyparsing==2.4.2 # via matplotlib, packaging -pytest==5.1.2 +pytest==5.2.0 python-dateutil==2.8.0 # via matplotlib, pandas pytz==2019.2 # via pandas pyyaml==5.1.2 @@ -48,11 +48,8 @@ torchtext==0.4.0 torchvision==0.4.0 tornado==6.0.3 # via visdom tqdm==4.36.1 -urllib3==1.25.4 # via requests +urllib3==1.25.6 # via requests visdom==0.1.8.9 wcwidth==0.1.7 # via pytest websocket-client==0.56.0 # via visdom zipp==0.6.0 # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -# setuptools==41.2.0 # via kiwisolver From 23e0b21de1b6783b48c87084e9913f4b5cccc702 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 5 Oct 2019 20:33:26 -0700 Subject: [PATCH 121/162] Updated roadmap slightly. --- README.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 60563e3f..57237231 100644 --- a/README.md +++ b/README.md @@ -206,14 +206,17 @@ Very likely to be included: Possible to be included: -- Popart reward normalization -- Parameter Space Noise for Exploration -- Hindsight experience replay - Generative adversarial networks +For version 0.5 I'll again be looking to expand widely on the spectrum of available models in the framework, +as well as I'll try to support **multi-gpu** training by data parallelism. + +Work in progress roadmap: -Code quality: -- Factor action noise back into the policy +- Popart reward normalization +- PixelCNN +- MADE: Masked Autoencoder for Distribution Estimation +- Variational AutoEncoder with Inverse Autoregressive Flow # Directories From f9f594274325c867ac831bb6110c02e30d73ac11 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 6 Oct 2019 21:35:44 -0700 Subject: [PATCH 122/162] Refactored RNN language modelling examples. --- .velproject.yaml | 2 + ...kespeare_gru.yaml => gen_shakespeare.yaml} | 38 ++++-- .../gen_shakespeare_gru_embedding.yaml | 49 ------- .../nlp/generation/gen_shakespeare_lstm.yaml | 49 ------- .../gen_shakespeare_lstm_embedding.yaml | 49 ------- requirements.in | 10 +- requirements.txt | 4 +- vel/api/model.py | 26 ++-- vel/api/size_hint.py | 8 ++ vel/command/rnn/generate_text.py | 5 +- vel/command/summary_command.py | 19 +-- vel/data/text_character_loader.py | 10 +- vel/model/nlp/__init__.py | 0 vel/model/nlp/language_model.py | 79 +++++++++++ .../rnn/multilayer_rnn_sequence_model.py | 2 +- vel/module/input/embedding.py | 43 ------ vel/module/input/flatten.py | 4 +- vel/module/input/one_hot_encoding.py | 16 +-- vel/module/input/sequence.py | 21 --- vel/net/layer/arch/parallel.py | 2 +- vel/net/layer/dropout.py | 54 ++++++++ vel/net/layer/input/image_to_tensor.py | 2 +- vel/net/layer/input/normalize.py | 2 +- vel/net/layer/mlp.py | 19 ++- vel/net/layer/nlp/__init__.py | 0 vel/net/layer/nlp/alphabet_embedding.py | 52 ++++++++ vel/net/layer/nlp/alphabet_one_hot_encode.py | 47 +++++++ vel/net/layer/rnn.py | 126 ++++++++++++++++++ vel/net/layer/util/concat.py | 2 +- vel/net/layer/util/repeat.py | 2 +- vel/net/layer_base.py | 10 +- vel/net/modular.py | 37 ++++- vel/rl/layer/double_nature_cnn.py | 2 +- vel/rl/layer/double_noisy_nature_cnn.py | 2 +- vel/rl/layer/nature_cnn.py | 2 +- vel/rl/layer/nature_cnn_small.py | 2 +- vel/rl/layer/rnn_cell.py | 2 +- vel/util/summary.py | 87 ------------ 38 files changed, 487 insertions(+), 399 deletions(-) rename examples-configs/nlp/generation/{gen_shakespeare_gru.yaml => gen_shakespeare.yaml} (52%) delete mode 100644 examples-configs/nlp/generation/gen_shakespeare_gru_embedding.yaml delete mode 100644 examples-configs/nlp/generation/gen_shakespeare_lstm.yaml delete mode 100644 examples-configs/nlp/generation/gen_shakespeare_lstm_embedding.yaml create mode 100644 vel/model/nlp/__init__.py create mode 100644 vel/model/nlp/language_model.py delete mode 100644 vel/module/input/embedding.py delete mode 100644 vel/module/input/sequence.py create mode 100644 vel/net/layer/dropout.py create mode 100644 vel/net/layer/nlp/__init__.py create mode 100644 vel/net/layer/nlp/alphabet_embedding.py create mode 100644 vel/net/layer/nlp/alphabet_one_hot_encode.py create mode 100644 vel/net/layer/rnn.py delete mode 100644 vel/util/summary.py diff --git a/.velproject.yaml b/.velproject.yaml index 8127a339..5e25ba22 100644 --- a/.velproject.yaml +++ b/.velproject.yaml @@ -29,4 +29,6 @@ visdom_settings: global_commands: list: name: vel.command.list_command + summary: + name: vel.command.summary_command diff --git a/examples-configs/nlp/generation/gen_shakespeare_gru.yaml b/examples-configs/nlp/generation/gen_shakespeare.yaml similarity index 52% rename from examples-configs/nlp/generation/gen_shakespeare_gru.yaml rename to examples-configs/nlp/generation/gen_shakespeare.yaml index 2ae82918..ac7bd121 100644 --- a/examples-configs/nlp/generation/gen_shakespeare_gru.yaml +++ b/examples-configs/nlp/generation/gen_shakespeare.yaml @@ -1,4 +1,4 @@ -name: 'gen_shakespeare_gru' +name: 'gen_shakespeare' source: @@ -15,16 +15,30 @@ loader: model: - name: vel.model.rnn.multilayer_rnn_sequence_model - - input_block: - name: vel.module.input.one_hot_encoding - alphabet_size: 68 # Size of the alphabet + 1 - - hidden_layers: [512, 512, 512] - output_dim: 68 # Size of the alphabet + 1 - dropout: 0.5 - rnn_type: 'gru' + name: vel.model.nlp.language_model + + net: + name: vel.net.modular + layers: + - name: vel.net.layer.nlp.alphabet_embedding + dim: 512 + - name: vel.net.layer.rnn + hidden_size: 512 + rnn_type: 'lstm' + - name: vel.net.layer.dropout + p: 0.3 + - name: vel.net.layer.rnn + hidden_size: 512 + rnn_type: 'lstm' + dropout: 0.5 + - name: vel.net.layer.dropout + p: 0.3 + - name: vel.net.layer.rnn + hidden_size: 512 + rnn_type: 'lstm' + dropout: 0.5 + - name: vel.net.layer.dropout + p: 0.3 optimizer: @@ -46,5 +60,3 @@ commands: temperature: !param temperature = 0.8 - - diff --git a/examples-configs/nlp/generation/gen_shakespeare_gru_embedding.yaml b/examples-configs/nlp/generation/gen_shakespeare_gru_embedding.yaml deleted file mode 100644 index 70e1a961..00000000 --- a/examples-configs/nlp/generation/gen_shakespeare_gru_embedding.yaml +++ /dev/null @@ -1,49 +0,0 @@ -name: 'gen_shakespeare_gru_embedding' - - -source: - name: vel.data.source.nlp.text_url - # Andrej Karpathy built a small (4.4mb) file with combined all works of Shakespeare - url: 'https://cs.stanford.edu/people/karpathy/char-rnn/shakespeare_input.txt' - local_dir: './rnn_shakespeare' - - -loader: - name: vel.data.text_character_loader - sequence_length: 128 - batch_size: 64 - - -model: - name: vel.model.rnn.multilayer_rnn_sequence_model - - input_block: - name: vel.module.input.embedding - alphabet_size: 68 # Size of the alphabet + 1 - output_dim: 512 # Embedding dimension - - hidden_layers: [512, 512, 512] - output_dim: 68 # Size of the alphabet + 1 - dropout: 0.5 - rnn_type: 'gru' - - -optimizer: - name: vel.optimizer.adam - lr: 1.0e-3 - epsilon: 1.0e-5 - - -commands: - train: - name: vel.command.train_command - max_grad_norm: 0.5 - epochs: 20 - - generate: - name: vel.command.rnn.generate_text - start_letter: !param start_letter = 'A' - length: !param length = 500 - temperature: !param temperature = 0.8 - - diff --git a/examples-configs/nlp/generation/gen_shakespeare_lstm.yaml b/examples-configs/nlp/generation/gen_shakespeare_lstm.yaml deleted file mode 100644 index 0f06d487..00000000 --- a/examples-configs/nlp/generation/gen_shakespeare_lstm.yaml +++ /dev/null @@ -1,49 +0,0 @@ -name: 'gen_shakespeare_lstm' - - -source: - name: vel.data.source.nlp.text_url - # Andrej Karpathy built a small (4.4mb) file with combined all works of Shakespeare - url: 'https://cs.stanford.edu/people/karpathy/char-rnn/shakespeare_input.txt' - local_dir: './rnn_shakespeare' - - -loader: - name: vel.data.text_character_loader - sequence_length: 128 - batch_size: 64 - - -model: - name: vel.model.rnn.multilayer_rnn_sequence_model - - input_block: - name: vel.module.input.one_hot_encoding - alphabet_size: 68 # Size of the alphabet + 1 - - hidden_layers: [512, 512, 512] - output_dim: 68 # Size of the alphabet + 1 - dropout: 0.5 - rnn_type: 'lstm' - - -optimizer: - name: vel.optimizer.adam - lr: 1.0e-3 - epsilon: 1.0e-5 - - -commands: - train: - name: vel.command.train_command - max_grad_norm: 0.5 - epochs: 20 - - generate: - name: vel.command.rnn.generate_text - start_letter: !param start_letter = 'A' - length: !param length = 500 - temperature: !param temperature = 0.8 - - - diff --git a/examples-configs/nlp/generation/gen_shakespeare_lstm_embedding.yaml b/examples-configs/nlp/generation/gen_shakespeare_lstm_embedding.yaml deleted file mode 100644 index c84af0d0..00000000 --- a/examples-configs/nlp/generation/gen_shakespeare_lstm_embedding.yaml +++ /dev/null @@ -1,49 +0,0 @@ -name: 'gen_shakespeare_lstm_embedding' - - -source: - name: vel.data.source.nlp.text_url - # Andrej Karpathy built a small (4.4mb) file with combined all works of Shakespeare - url: 'https://cs.stanford.edu/people/karpathy/char-rnn/shakespeare_input.txt' - local_dir: './rnn_shakespeare' - - -loader: - name: vel.data.text_character_loader - sequence_length: 128 - batch_size: 64 - - -model: - name: vel.model.rnn.multilayer_rnn_sequence_model - - input_block: - name: vel.module.input.embedding - alphabet_size: 68 # Size of the alphabet + 1 - output_dim: 512 # Embedding dimension - - hidden_layers: [512, 512, 512] - output_dim: 68 # Size of the alphabet + 1 - dropout: 0.5 - rnn_type: 'lstm' - - -optimizer: - name: vel.optimizer.adam - lr: 1.0e-3 - epsilon: 1.0e-5 - - -commands: - train: - name: vel.command.train_command - max_grad_norm: 0.5 - epochs: 20 - - generate: - name: vel.command.rnn.generate_text - start_letter: !param start_letter = 'A' - length: !param length = 500 - temperature: !param temperature = 0.8 - - diff --git a/requirements.in b/requirements.in index 6ffb91bd..70eebbb8 100644 --- a/requirements.in +++ b/requirements.in @@ -1,17 +1,17 @@ attrs cloudpickle +dnspython +gym[atari,box2d,classic_control] matplotlib numpy opencv-python pandas +pymongo +pytest pyyaml scikit-learn -torch~=1.2 torchtext torchvision +torch~=1.2 tqdm visdom -pymongo -dnspython -gym[atari,box2d,classic_control] -pytest diff --git a/requirements.txt b/requirements.txt index 9629fe23..508db5cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile +# pip-compile requirements.in # atari-py==0.2.6 # via gym atomicwrites==1.3.0 # via pytest @@ -55,4 +55,4 @@ websocket-client==0.56.0 # via visdom zipp==0.6.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -# setuptools==41.2.0 # via kiwisolver +# setuptools==41.4.0 # via kiwisolver diff --git a/vel/api/model.py b/vel/api/model.py index 4946b7bb..052bd1c3 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -1,11 +1,10 @@ import torch -import torch.nn as nn import vel.util.module_util as mu from vel.api.optimizer import VelOptimizer, OptimizerFactory from vel.metric.loss_metric import Loss -from vel.util.summary import summary + from .network import Network @@ -36,17 +35,13 @@ def train(self, mode=True): return self - def summary(self, input_size=None): + def summary(self): """ Print a model summary """ - - if input_size is None: - print(self) - print("-" * 100) - number = sum(p.numel() for p in self.parameters()) - print("Number of model parameters: {:,}".format(number)) - print("-" * 100) - else: - summary(self, input_size) + print(self) + print("-" * 100) + number = sum(p.numel() for p in self.parameters()) + print("Number of model parameters: {:,}".format(number)) + print("-" * 100) class OptimizedModel(Model): @@ -76,7 +71,6 @@ def validate(self, data: dict) -> dict: raise NotImplementedError - class GradientModel(ValidatedModel): """ Model that calculates a single gradient and optimizes it """ @@ -120,7 +114,11 @@ def metrics(self) -> list: return [Loss()] def calculate_gradient(self, data: dict) -> dict: - y_hat = self(data['x']) + if self.is_stateful: + y_hat, _ = self(data['x']) + else: + y_hat = self(data['x']) + loss_value = self.loss_value(data['x'], data['y'], y_hat) if self.training: diff --git a/vel/api/size_hint.py b/vel/api/size_hint.py index 8263c0a5..b1e4fecb 100644 --- a/vel/api/size_hint.py +++ b/vel/api/size_hint.py @@ -19,6 +19,14 @@ def shape(self, idx=1) -> typing.Tuple[int]: """ Get shape of size hint, except for a number of dimensions (batch dimensions """ return self[idx:] + def append(self, element: int) -> 'SizeHint': + """ Return a copy of this size hint, with new element added """ + return SizeHint(*(list(self) + [element])) + + def drop_last(self) -> 'SizeHint': + """ Return a copy of this size hint, with last element dropped """ + return SizeHint(*self[:-1]) + def __repr__(self): internal = ", ".join([self._inner_repr(s) for s in self]) return f"{self.__class__.__name__}({internal})" diff --git a/vel/command/rnn/generate_text.py b/vel/command/rnn/generate_text.py index 99b56647..bf14f074 100644 --- a/vel/command/rnn/generate_text.py +++ b/vel/command/rnn/generate_text.py @@ -6,6 +6,7 @@ import torch.distributions as dist from vel.api import TrainingInfo +from vel.util.tensor_util import to_device class GenerateTextCommand: @@ -41,12 +42,12 @@ def run(self): generated_text = [current_char] - state = model.zero_state(1).to(device) + state = to_device(model.zero_state(1), device) char_tensor = torch.from_numpy(np.array([current_char_encoded])).view(1, 1).to(device) for _ in tqdm.trange(self.length): - prob_logits, state = model.forward_state(char_tensor, state) + prob_logits, state = model(char_tensor, state) # Apply temperature to the logits prob_logits = F.log_softmax(prob_logits.view(-1).div(self.temperature), dim=0) diff --git a/vel/command/summary_command.py b/vel/command/summary_command.py index 37393b84..51ab11f7 100644 --- a/vel/command/summary_command.py +++ b/vel/command/summary_command.py @@ -1,21 +1,14 @@ -from vel.api import Source - - class ModelSummary: """ Just print model summary """ - def __init__(self, model, source: Source): - self.model = model - self.source = source + def __init__(self, model): + self.model_factory = model def run(self, *args): """ Print model summary """ - if self.source is None: - self.model.summary() - else: - x_data, y_data = next(iter(self.source.train_loader)) - self.model.summary(input_size=x_data.shape[1:]) + model = self.model_factory.instantiate() + model.summary() -def create(model, source=None): +def create(model): """ Vel factory function """ - return ModelSummary(model, source) + return ModelSummary(model) diff --git a/vel/data/text_character_loader.py b/vel/data/text_character_loader.py index 92f9d405..5eafc95b 100644 --- a/vel/data/text_character_loader.py +++ b/vel/data/text_character_loader.py @@ -109,6 +109,11 @@ def __init__(self, source, sequence_length: int, batch_size: int): def __getitem__(self, item): return self._loaders[item] + @property + def alphabet_size(self): + """ Size of the text alphabet """ + return len(self.alphabet) + @property def loader(self): """ Get a dict of loaders """ @@ -127,8 +132,3 @@ def create(source: Source, sequence_length: int = 64, batch_size: int = 64): sequence_length=sequence_length, batch_size=batch_size ) - - - - - diff --git a/vel/model/nlp/__init__.py b/vel/model/nlp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/model/nlp/language_model.py b/vel/model/nlp/language_model.py new file mode 100644 index 00000000..55b96adb --- /dev/null +++ b/vel/model/nlp/language_model.py @@ -0,0 +1,79 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from vel.api import LossFunctionModel, ModelFactory, Network, BackboneNetwork, SizeHints, SizeHint + + +class LanguageModel(LossFunctionModel): + """ Language model - autoregressive generative model for text """ + + def __init__(self, alphabet_size: int, net: BackboneNetwork): + super().__init__() + + self.net = net + self.alphabet_size = alphabet_size + self.output_dim = self.alphabet_size + 1 + + self.net = net + self.output_layer = nn.Linear( + in_features=self.net.size_hints().assert_single().last(), + out_features=self.alphabet_size+1 + ) + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return self.net.is_stateful + + def zero_state(self, batch_size): + """ Potential state for the model """ + return self.net.zero_state(batch_size) + + def forward(self, input_data: torch.Tensor, state=None) -> torch.Tensor: + r"""Defines the computation performed at every call. + + Should be overridden by all subclasses. + + .. note:: + Although the recipe for forward pass needs to be defined within + this function, one should call the :class:`Module` instance afterwards + instead of this since the former takes care of running the + registered hooks while the latter silently ignores them. + """ + if self.net.is_stateful: + output, new_state = self.net(input_data, state=state) + else: + output = self.net(input_data) + new_state = state + + return F.log_softmax(self.output_layer(output), dim=-1), new_state + + def loss_value(self, x_data, y_true, y_pred) -> torch.tensor: + """ Calculate a value of loss function """ + y_pred = y_pred.view(-1, y_pred.size(2)) + y_true = y_true.view(-1).to(torch.long) + return F.nll_loss(y_pred, y_true) + + +class LanguageModelFactory(ModelFactory): + def __init__(self, alphabet_size: int, net_factory: ModelFactory): + self.alphabet_size = alphabet_size + self.net_factory = net_factory + + def instantiate(self, **extra_args) -> Network: + size_hint = SizeHints(SizeHint(None, None)) + net = self.net_factory.instantiate(alphabet_size=self.alphabet_size, size_hint=size_hint) + + return LanguageModel( + alphabet_size=self.alphabet_size, + net=net + ) + + +def create(loader, net: ModelFactory): + """ Vel factory function """ + return LanguageModelFactory( + alphabet_size=loader.alphabet_size, + net_factory=net + ) diff --git a/vel/model/rnn/multilayer_rnn_sequence_model.py b/vel/model/rnn/multilayer_rnn_sequence_model.py index 959741a5..3f5c332a 100644 --- a/vel/model/rnn/multilayer_rnn_sequence_model.py +++ b/vel/model/rnn/multilayer_rnn_sequence_model.py @@ -4,7 +4,7 @@ import torch.nn.functional as F import torch.nn as nn -from vel.api import LossFunctionModel, ModelFactory, LinearBackboneModel +from vel.api import LossFunctionModel, ModelFactory from vel.module.rnn_layer import RnnLayer diff --git a/vel/module/input/embedding.py b/vel/module/input/embedding.py deleted file mode 100644 index 97927ff1..00000000 --- a/vel/module/input/embedding.py +++ /dev/null @@ -1,43 +0,0 @@ -import torch.nn as nn - -from vel.api import Network, LanguageSource, ModelFactory - - -class EmbeddingInput(Network): - """ Learnable Embedding input layer """ - - def __init__(self, alphabet_size: int, output_dim: int, pretrained: bool = False, frozen: bool = False, - source: LanguageSource = None): - super().__init__() - - self._output_dim = output_dim - self._alphabet_size = alphabet_size - self._pretrained = pretrained - self._frozen = frozen - self._source = source - - self.layer = nn.Embedding(self._alphabet_size, self._output_dim) - - def reset_weights(self): - if self._pretrained: - self.layer.weight.data.copy_(self._source.fields['text'].vocab.vectors) - - if self._frozen: - self.layer.weight.requires_grad = False - - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - return self._output_dim - - def forward(self, input_data): - return self.layer(input_data) - - -def create(alphabet_size: int, output_dim: int, pretrained: bool = False, frozen: bool = False, - source: LanguageSource = None): - """ Vel factory function """ - def instantiate(**_): - return EmbeddingInput(alphabet_size, output_dim, pretrained=pretrained, frozen=frozen, source=source) - - return ModelFactory.generic(instantiate) diff --git a/vel/module/input/flatten.py b/vel/module/input/flatten.py index 5e140cc0..c69e9f02 100644 --- a/vel/module/input/flatten.py +++ b/vel/module/input/flatten.py @@ -1,10 +1,10 @@ from vel.module.layers import Flatten -from vel.api import ModelFactory, BackboneNetwork +from vel.api import Network -class FlattenInput(BackboneNetwork): +class FlattenInput(Network): """ Sequence input """ def __init__(self): super().__init__() diff --git a/vel/module/input/one_hot_encoding.py b/vel/module/input/one_hot_encoding.py index 125bdb47..15f4e961 100644 --- a/vel/module/input/one_hot_encoding.py +++ b/vel/module/input/one_hot_encoding.py @@ -1,8 +1,8 @@ -from vel.api import LinearBackboneModel, ModelFactory +from vel.api import Network from vel.module.layers import OneHotEncode -class OneHotEncodingInput(LinearBackboneModel): +class OneHotEncodingInput(Network): """ One-hot encoding input layer """ def __init__(self, alphabet_size: int): @@ -12,18 +12,6 @@ def __init__(self, alphabet_size: int): self.layer = OneHotEncode(self._alphabet_size) - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - return self._alphabet_size - def forward(self, input_data): return self.layer(input_data) - -def create(alphabet_size: int): - """ Vel factory function """ - def instantiate(**_): - return OneHotEncodingInput(alphabet_size) - - return ModelFactory.generic(instantiate) diff --git a/vel/module/input/sequence.py b/vel/module/input/sequence.py deleted file mode 100644 index 51c50c82..00000000 --- a/vel/module/input/sequence.py +++ /dev/null @@ -1,21 +0,0 @@ -import torch.nn as nn - -from vel.api import ModelFactory, BackboneModel - - -class SequenceInput(BackboneModel): - """ Sequence input """ - def __init__(self, modules): - super().__init__() - self.model = nn.Sequential(*modules) - - def forward(self, input_data): - return self.model(input_data) - - -def create(modules): - """ Vel factory function """ - def instantiate(**_): - return SequenceInput([f.instantiate() for f in modules]) - - return ModelFactory.generic(instantiate) diff --git a/vel/net/layer/arch/parallel.py b/vel/net/layer/arch/parallel.py index de592f2a..c25cc0ec 100644 --- a/vel/net/layer/arch/parallel.py +++ b/vel/net/layer/arch/parallel.py @@ -34,7 +34,7 @@ def name_base(self) -> str: """ Base of layer name """ return "parallel" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: hints = direct_input.assert_tuple(len(self.layers)) layers = [] diff --git a/vel/net/layer/dropout.py b/vel/net/layer/dropout.py new file mode 100644 index 00000000..3d6df1f9 --- /dev/null +++ b/vel/net/layer/dropout.py @@ -0,0 +1,54 @@ +import torch.nn.functional as F +from vel.api import SizeHints +from vel.net.layer_base import Layer, LayerFactory + + +class DropoutLayer(Layer): + """ + During training, randomly zeroes some of the elements of the input + tensor with probability :attr:`p` using samples from a Bernoulli + distribution. + + See :class:`~torch.nn.Dropout` for details. + """ + def __init__(self, name: str, input_size: SizeHints, p: float): + super().__init__(name) + + self.p = p + self.input_size = input_size + + def forward(self, direct, state: dict = None, context: dict = None): + return F.dropout(direct, p=self.p, training=self.training) + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return self.input_size + + def extra_repr(self) -> str: + """Set the extra representation of the module""" + return "p={:.2f}".format(self.p) + + +class DropoutLayerFactory(LayerFactory): + """ Factory class for the Dropout layer """ + + def __init__(self, p: float): + self.p = p + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "dropout" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + """ Create a given layer object """ + return DropoutLayer( + name=name, + input_size=direct_input, + p=self.p + ) + + +def create(p: float): + """ Vel factory function """ + return DropoutLayerFactory(p) diff --git a/vel/net/layer/input/image_to_tensor.py b/vel/net/layer/input/image_to_tensor.py index 4924c33f..1f5adb9c 100644 --- a/vel/net/layer/input/image_to_tensor.py +++ b/vel/net/layer/input/image_to_tensor.py @@ -34,7 +34,7 @@ def name_base(self) -> str: """ Base of layer name """ return "image_to_tensor" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: """ Create a given layer object """ if self.shape is None: shape = direct_input.assert_single().shape() diff --git a/vel/net/layer/input/normalize.py b/vel/net/layer/input/normalize.py index 8da64fa7..91766a38 100644 --- a/vel/net/layer/input/normalize.py +++ b/vel/net/layer/input/normalize.py @@ -33,7 +33,7 @@ def name_base(self) -> str: """ Base of layer name """ return "image_to_tensor" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: """ Create a given layer object """ # Potential improvement here is to use either direct input or size parameter if self.shape is None: diff --git a/vel/net/layer/mlp.py b/vel/net/layer/mlp.py index 1be0d57b..d2f57b49 100644 --- a/vel/net/layer/mlp.py +++ b/vel/net/layer/mlp.py @@ -18,17 +18,18 @@ class MLP(Layer): """ Simple Multi-Layer-Perceptron network """ - def __init__(self, name: str, input_length: int, hidden_layers: typing.List[int], activation: str = 'tanh', + def __init__(self, name: str, input_size: SizeHints, hidden_layers: typing.List[int], activation: str = 'tanh', normalization: typing.Optional[str] = None): super().__init__(name) - self.input_length = input_length + self.input_size = input_size + self.input_length = input_size.assert_single().last() self.hidden_layers = hidden_layers self.activation = activation self.normalization = normalization layer_objects = [] - layer_sizes = zip([input_length] + hidden_layers, hidden_layers) + layer_sizes = zip([self.input_length] + hidden_layers, hidden_layers) for input_size, output_size in layer_sizes: layer_objects.append(nn.Linear(input_size, output_size)) @@ -39,9 +40,12 @@ def __init__(self, name: str, input_length: int, hidden_layers: typing.List[int] layer_objects.append(net_util.activation(activation)()) self.model = nn.Sequential(*layer_objects) - self.hidden_units = hidden_layers[-1] if hidden_layers else input_length + self.hidden_units = hidden_layers[-1] if hidden_layers else self.input_length + + self.output_size = input_size.assert_single().drop_last().append(self.hidden_units) def reset_weights(self): + """ Call proper initializers for the weights """ for m in self.modules(): if isinstance(m, nn.Linear): # init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') @@ -52,7 +56,8 @@ def forward(self, direct, state: dict = None, context: dict = None): return self.model(direct.float()) def size_hints(self) -> SizeHints: - return SizeHints(SizeHint(None, self.hidden_units)) + """ Size hints for this network """ + return SizeHints(self.output_size) class MLPFactory(LayerFactory): @@ -67,11 +72,11 @@ def name_base(self) -> str: """ Base of layer name """ return "mlp" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: """ Create a given layer object """ return MLP( name=name, - input_length=direct_input.assert_single().last(), + input_size=direct_input, hidden_layers=self.hidden_layers, activation=self.activation, normalization=self.normalization diff --git a/vel/net/layer/nlp/__init__.py b/vel/net/layer/nlp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/net/layer/nlp/alphabet_embedding.py b/vel/net/layer/nlp/alphabet_embedding.py new file mode 100644 index 00000000..2fec003a --- /dev/null +++ b/vel/net/layer/nlp/alphabet_embedding.py @@ -0,0 +1,52 @@ +import torch.nn as nn + +from vel.api import SizeHints +from vel.net.layer_base import Layer, LayerFactory + + +class AlphabetEmbeddingLayer(Layer): + """ + Encode incoming tensor encoded using certain alphabet into one-hot encoding + """ + def __init__(self, name: str, alphabet_size: int, dim: int, input_shape: SizeHints): + super().__init__(name) + + self.alphabet_size = alphabet_size + self.dim = dim + self.output_size = SizeHints(input_shape.assert_single().append(self.dim)) + + self.layer = nn.Embedding(self.alphabet_size + 1, self.dim) + + def forward(self, direct, state: dict = None, context: dict = None): + return self.layer(direct) + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return self.output_size + + +class AlphabetEmbeddingLayerFactory(LayerFactory): + """ Factory class for the AlphabetOneHotEncode layer """ + + def __init__(self, dim: int): + self.dim = dim + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "alphabet_embedding" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + alphabet_size = extra_args['alphabet_size'] + + return AlphabetEmbeddingLayer( + name=name, + alphabet_size=alphabet_size, + dim=self.dim, + input_shape=direct_input + ) + + +def create(dim: int): + """ Vel factory function """ + return AlphabetEmbeddingLayerFactory(dim) diff --git a/vel/net/layer/nlp/alphabet_one_hot_encode.py b/vel/net/layer/nlp/alphabet_one_hot_encode.py new file mode 100644 index 00000000..b26745c5 --- /dev/null +++ b/vel/net/layer/nlp/alphabet_one_hot_encode.py @@ -0,0 +1,47 @@ +from vel.api import SizeHints +from vel.net.layer_base import Layer, LayerFactory + +from vel.util.tensor_util import one_hot_encoding + + +class AlphabetOneHotEncodeLayer(Layer): + """ + Encode incoming tensor encoded using certain alphabet into one-hot encoding + """ + def __init__(self, name: str, alphabet_size: int, input_shape: SizeHints): + super().__init__(name) + + self.alphabet_size = alphabet_size + self.output_size = SizeHints(input_shape.assert_single().append(self.alphabet_size + 1)) + + def forward(self, direct, state: dict = None, context: dict = None): + return one_hot_encoding(direct, num_labels=self.alphabet_size + 1) + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return self.output_size + + +class AlphabetOneHotEncodeLayerFactory(LayerFactory): + """ Factory class for the AlphabetoneHotEncode layer """ + + def __init__(self): + pass + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "alphabet_one_hot_encode" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + alphabet_size = extra_args['alphabet_size'] + return AlphabetOneHotEncodeLayer( + name=name, + alphabet_size=alphabet_size, + input_shape=direct_input + ) + + +def create(): + """ Vel factory function """ + return AlphabetOneHotEncodeLayerFactory() diff --git a/vel/net/layer/rnn.py b/vel/net/layer/rnn.py new file mode 100644 index 00000000..7cbf9e13 --- /dev/null +++ b/vel/net/layer/rnn.py @@ -0,0 +1,126 @@ +import torch +import torch.nn as nn +import torch.nn.init as init + +from vel.api import SizeHints +from vel.net.layer_base import Layer, LayerFactory + + +class RnnLayer(Layer): + """ Single Recurrent Layer """ + def __init__(self, name: str, input_size: SizeHints, hidden_size: int, rnn_type: str, + bias: bool = True, bidirectional: bool = False, nonlinearity: str = 'tanh'): + super().__init__(name) + + self.input_size = input_size + self.input_length = input_size.assert_single().last() + self.hidden_size = hidden_size + self.rnn_type = rnn_type + + self.bias = bias + self.bidirectional = bidirectional + self.nonlinearity = nonlinearity + + if self.rnn_type == 'rnn': + self.rnn_cell = nn.RNN( + input_size=self.input_length, hidden_size=hidden_size, bias=bias, nonlinearity=nonlinearity, + bidirectional=bidirectional, batch_first=True + ) + elif self.rnn_type == 'lstm': + self.rnn_cell = nn.LSTM( + input_size=self.input_length, hidden_size=hidden_size, bias=bias, + bidirectional=bidirectional, batch_first=True + ) + elif self.rnn_type == 'gru': + self.rnn_cell = nn.GRU( + input_size=self.input_length, hidden_size=hidden_size, bias=bias, + bidirectional=bidirectional, batch_first=True + ) + + self.output_size = input_size.assert_single().drop_last().append(self.hidden_size) + + def reset_weights(self): + """ Call proper initializers for the weights """ + init.xavier_normal_(self.rnn_cell.weight_hh) + init.xavier_normal_(self.rnn_cell.weight_ih) + init.zeros_(self.rnn_cell.bias_ih) + init.zeros_(self.rnn_cell.bias_hh) + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return True + + @property + def state_dim(self) -> int: + """ Dimension of model state """ + if self.rnn_type == 'lstm': + return 2 * self.hidden_size + else: + return self.hidden_size + + def zero_state(self, batch_size): + """ Potential state for the model """ + return {self.name: torch.zeros(batch_size, self.state_dim)} + + def forward(self, input_data, state: dict, context: dict = None): + """ Forward propagation of a single layer """ + + if self.rnn_type == 'lstm': + state_tensor = state[self.name].unsqueeze(0) + hidden_state, cell_state = torch.split(state_tensor, self.hidden_size, dim=2) + output, (hidden_state, cell_state) = self.rnn_cell( + input_data, (hidden_state.contiguous(), cell_state.contiguous()) + ) + new_state = torch.cat([hidden_state, cell_state], dim=2) + return output, {self.name: new_state[0]} + else: + state_tensor = state[self.name].unsqueeze(0) + output, new_state = self.rnn_cell(input_data, state_tensor) + return output, {self.name: new_state[0]} + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return SizeHints(self.output_size) + + +class RnnLayerFactory(LayerFactory): + """ Factory class for the RnnLayer """ + + def __init__(self, hidden_size: int, rnn_type: str, bias: bool = True, bidirectional: bool = False, + nonlinearity: str = 'tanh'): + self.hidden_size = hidden_size + self.rnn_type = rnn_type + + self.bias = bias + self.bidirectional = bidirectional + self.nonlinearity = nonlinearity + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "rnn" + + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + """ Create instance of 'RnnLayer' """ + return RnnLayer( + name=name, + input_size=direct_input, + hidden_size=self.hidden_size, + rnn_type=self.rnn_type, + bias=self.bias, + bidirectional=self.bidirectional, + nonlinearity=self.nonlinearity + ) + + +def create(hidden_size: int, rnn_type: str, bias: bool = True, bidirectional: bool = False, + nonlinearity: str = 'tanh'): + """ Vel factory function """ + return RnnLayerFactory( + hidden_size=hidden_size, + rnn_type=rnn_type, + bias=bias, + bidirectional=bidirectional, + nonlinearity=nonlinearity + ) diff --git a/vel/net/layer/util/concat.py b/vel/net/layer/util/concat.py index d80e2d57..7d2d7b57 100644 --- a/vel/net/layer/util/concat.py +++ b/vel/net/layer/util/concat.py @@ -29,7 +29,7 @@ def name_base(self) -> str: """ Base of layer name """ return "concat" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: inputs = direct_input.assert_tuple() result = [] diff --git a/vel/net/layer/util/repeat.py b/vel/net/layer/util/repeat.py index 32ca7ede..9fda8050 100644 --- a/vel/net/layer/util/repeat.py +++ b/vel/net/layer/util/repeat.py @@ -26,7 +26,7 @@ def name_base(self) -> str: """ Base of layer name """ return "repeat_tensor" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: return RepeatTensor( name=name, times=self.times, diff --git a/vel/net/layer_base.py b/vel/net/layer_base.py index 62dcabcb..d1be235f 100644 --- a/vel/net/layer_base.py +++ b/vel/net/layer_base.py @@ -1,6 +1,4 @@ -import typing - -from vel.api import BackboneNetwork, SizeHints, SizeHint +from vel.api import BackboneNetwork, SizeHints class Layer(BackboneNetwork): @@ -8,10 +6,6 @@ def __init__(self, name: str): super().__init__() self.name = name - def state_size_hints(self) -> typing.Dict[str, SizeHint]: - """ Size hints for state part of this network """ - return {} - def forward(self, direct, state: dict = None, context: dict = None): """ Forward propagation of a single layer """ raise NotImplementedError @@ -25,7 +19,7 @@ def name_base(self) -> str: """ Base of layer name """ raise NotImplementedError - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: """ Create a given layer object """ raise NotImplementedError diff --git a/vel/net/modular.py b/vel/net/modular.py index c23bf0a6..517d2c2c 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -3,10 +3,34 @@ import torch.nn as nn from vel.api import BackboneNetwork, ModelFactory, SizeHints +from vel.util.tensor_util import to_device from .layer_base import LayerFactory -def instantiate_layers(layers: [LayerFactory], size_hint: SizeHints) -> nn.Module: +class ModularSequential(nn.Module): + """ Modification of nn.Sequential for the purpose of modular networks """ + def __init__(self, layers: collections.OrderedDict): + super().__init__() + + self._layers = [] + + for key, module in layers.items(): + self.add_module(key, module) + self._layers.append(module) + + def __len__(self): + return len(self._layers) + + def __getitem__(self, item): + return self._layers[item] + + def forward(self, direct, state: dict = None, context: dict = None): + for layer in self._layers: + direct = layer(direct, state=state, context=context) + return direct + + +def instantiate_layers(layers: [LayerFactory], size_hint: SizeHints, extra_args: dict) -> nn.Module: """ Instantiate list of layer factories into PyTorch Module """ module_dict = collections.OrderedDict() context = {} @@ -15,12 +39,12 @@ def instantiate_layers(layers: [LayerFactory], size_hint: SizeHints) -> nn.Modul counter = idx + 1 name = "{}_{:04d}".format(layer_factory.name_base, counter) - layer = layer_factory.instantiate(name=name, direct_input=size_hint, context=context) + layer = layer_factory.instantiate(name=name, direct_input=size_hint, context=context, extra_args=extra_args) size_hint = layer.size_hints() module_dict[name] = layer - return nn.Sequential(module_dict) + return ModularSequential(module_dict) class ModularNetwork(BackboneNetwork): @@ -94,12 +118,15 @@ def reset_state(self, state, dones): """ Reset the state after the episode has been terminated """ raise NotImplementedError - def forward(self, input_data, state): + def forward(self, input_data, state=None): data = input_data context = {} output_state = {} + if state is None: + state = to_device(self.zero_state(input_data.size(0)), input_data.device) + for layer in self.layers: if layer.is_stateful: data, new_state = layer(data, state=state, context=context) @@ -120,7 +147,7 @@ def instantiate(self, size_hint=None, **extra_args) -> BackboneNetwork: if size_hint is None: size_hint = SizeHints() - layers = instantiate_layers(self.layers, size_hint=size_hint) + layers = instantiate_layers(self.layers, size_hint=size_hint, extra_args=extra_args) is_stateful = any(l.is_stateful for l in layers) if is_stateful: diff --git a/vel/rl/layer/double_nature_cnn.py b/vel/rl/layer/double_nature_cnn.py index 2e269783..3f78db01 100644 --- a/vel/rl/layer/double_nature_cnn.py +++ b/vel/rl/layer/double_nature_cnn.py @@ -110,7 +110,7 @@ def name_base(self) -> str: """ Base of layer name """ return "double_nature_cnn" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: (b, c, w, h) = direct_input.assert_single(4) return DoubleNatureCnn( diff --git a/vel/rl/layer/double_noisy_nature_cnn.py b/vel/rl/layer/double_noisy_nature_cnn.py index acade064..f0740be8 100644 --- a/vel/rl/layer/double_noisy_nature_cnn.py +++ b/vel/rl/layer/double_noisy_nature_cnn.py @@ -128,7 +128,7 @@ def name_base(self) -> str: """ Base of layer name """ return "double_noisy_nature_cnn" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: (b, c, w, h) = direct_input.assert_single(4) return DoubleNoisyNatureCnn( diff --git a/vel/rl/layer/nature_cnn.py b/vel/rl/layer/nature_cnn.py index f2503a62..b9845a16 100644 --- a/vel/rl/layer/nature_cnn.py +++ b/vel/rl/layer/nature_cnn.py @@ -94,7 +94,7 @@ def name_base(self) -> str: """ Base of layer name """ return "nature_cnn" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: (b, c, w, h) = direct_input.assert_single(4) return NatureCnn( diff --git a/vel/rl/layer/nature_cnn_small.py b/vel/rl/layer/nature_cnn_small.py index c9ac77bb..e8bc8928 100644 --- a/vel/rl/layer/nature_cnn_small.py +++ b/vel/rl/layer/nature_cnn_small.py @@ -88,7 +88,7 @@ def name_base(self) -> str: """ Base of layer name """ return "nature_cnn_small" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: (b, c, w, h) = direct_input.assert_single(4) return NatureCnnSmall( diff --git a/vel/rl/layer/rnn_cell.py b/vel/rl/layer/rnn_cell.py index 678b003f..a509072d 100644 --- a/vel/rl/layer/rnn_cell.py +++ b/vel/rl/layer/rnn_cell.py @@ -82,7 +82,7 @@ def __init__(self, hidden_size: int, rnn_type: str, bias: bool = True, nonlinear def name_base(self) -> str: return "rnn_cell" - def instantiate(self, name: str, direct_input: SizeHints, context: dict) -> Layer: + def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: input_size = direct_input.assert_single().last() return RnnCell( diff --git a/vel/util/summary.py b/vel/util/summary.py deleted file mode 100644 index d7608601..00000000 --- a/vel/util/summary.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Code based on: https://github.com/sksq96/pytorch-summary/blob/master/torchsummary/torchsummary.py -""" -import torch -import torch.nn as nn -from torch.autograd import Variable - -from collections import OrderedDict - - -def summary(model, input_size): - """ Print summary of the model """ - def register_hook(module): - def hook(module, input, output): - class_name = str(module.__class__).split('.')[-1].split("'")[0] - module_idx = len(summary) - - m_key = '%s-%i' % (class_name, module_idx + 1) - summary[m_key] = OrderedDict() - summary[m_key]['input_shape'] = list(input[0].size()) - summary[m_key]['input_shape'][0] = -1 - if isinstance(output, (list, tuple)): - summary[m_key]['output_shape'] = [[-1] + list(o.size())[1:] for o in output] - else: - summary[m_key]['output_shape'] = list(output.size()) - summary[m_key]['output_shape'][0] = -1 - - params = 0 - if hasattr(module, 'weight') and hasattr(module.weight, 'size'): - params += torch.prod(torch.LongTensor(list(module.weight.size()))) - summary[m_key]['trainable'] = module.weight.requires_grad - if hasattr(module, 'bias') and hasattr(module.bias, 'size'): - params += torch.prod(torch.LongTensor(list(module.bias.size()))) - summary[m_key]['nb_params'] = params - - if (not isinstance(module, nn.Sequential) and - not isinstance(module, nn.ModuleList) and - not (module == model)): - hooks.append(module.register_forward_hook(hook)) - - if torch.cuda.is_available(): - dtype = torch.cuda.FloatTensor - model = model.cuda() - else: - dtype = torch.FloatTensor - model = model.cpu() - - # check if there are multiple inputs to the network - if isinstance(input_size[0], (list, tuple)): - x = [Variable(torch.rand(2, *in_size)).type(dtype) for in_size in input_size] - else: - x = Variable(torch.rand(2, *input_size)).type(dtype) - - # print(type(x[0])) - # create properties - summary = OrderedDict() - hooks = [] - # register hook - model.apply(register_hook) - # make a forward pass - # print(x.shape) - model(x) - # remove these hooks - for h in hooks: - h.remove() - - print('----------------------------------------------------------------') - line_new = '{:>20} {:>25} {:>15}'.format('Layer (type)', 'Output Shape', 'Param #') - print(line_new) - print('================================================================') - total_params = 0 - trainable_params = 0 - for layer in summary: - # input_shape, output_shape, trainable, nb_params - line_new = '{:>20} {:>25} {:>15}'.format(layer, str(summary[layer]['output_shape']), - '{0:,}'.format(summary[layer]['nb_params'])) - total_params += summary[layer]['nb_params'] - if 'trainable' in summary[layer]: - if summary[layer]['trainable']: - trainable_params += summary[layer]['nb_params'] - print(line_new) - print('================================================================') - print('Total params: {0:,}'.format(total_params)) - print('Trainable params: {0:,}'.format(trainable_params)) - print('Non-trainable params: {0:,}'.format(total_params - trainable_params)) - print('----------------------------------------------------------------') - # return summary From 0d5e6b153547a0615ac9f85b3cd63e86f032a640 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 6 Oct 2019 21:38:56 -0700 Subject: [PATCH 123/162] Deleting abandoned files. --- vel/data/augmentation/tta/__init__.py | 0 vel/data/augmentation/tta/train_tta.py | 107 ---- vel/rl/test/__init__.py | 0 vel/rl/test/test_integration.py | 679 ------------------------- 4 files changed, 786 deletions(-) delete mode 100644 vel/data/augmentation/tta/__init__.py delete mode 100644 vel/data/augmentation/tta/train_tta.py delete mode 100644 vel/rl/test/__init__.py delete mode 100644 vel/rl/test/test_integration.py diff --git a/vel/data/augmentation/tta/__init__.py b/vel/data/augmentation/tta/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vel/data/augmentation/tta/train_tta.py b/vel/data/augmentation/tta/train_tta.py deleted file mode 100644 index 78621428..00000000 --- a/vel/data/augmentation/tta/train_tta.py +++ /dev/null @@ -1,107 +0,0 @@ -# import torch -# import torch.utils.data as tdata -# -# import vel.api.data as wdata -# -# -# class TrainTTALoader: -# def __init__(self, n_augmentations, batch_size, data_source, augmentations, num_workers): -# self.n_augmentations = n_augmentations -# self.data_source = data_source -# self.augmentations = augmentations -# -# self.val_ds = wdata.DataFlow(self.data_source, augmentations, tag='val') -# self.train_ds = wdata.DataFlow(self.data_source, augmentations, tag='train') -# -# self.val_loader = tdata.DataLoader( -# self.val_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers -# ) -# -# self.train_loader = tdata.DataLoader( -# self.train_ds, batch_size=batch_size, shuffle=False, num_workers=num_workers -# ) -# -# def __len__(self): -# return (1 + self.n_augmentations) * len(self.val_loader) -# -# def __iter__(self): -# iterlist = [iter(self.val_loader)] -# -# for _ in range(self.n_augmentations): -# iterlist.append(iter(self.train_loader)) -# -# for _ in range(len(self.val_loader)): -# for iterator in iterlist: -# yield next(iterator) -# -# -# class TrainTTAAccumulator: -# def __init__(self, metric_accumulator, n_augmentations, data_source): -# self.metric_accumulator = metric_accumulator -# -# self.source_elements = len(data_source) -# self.n_augmentations = n_augmentations -# -# self.data = None -# self.target = None -# -# self.accumulated_output = [] -# self.accumulated_context = [] -# -# self.index = 0 -# -# # def calculate(self, data, target, output, context): -# def calculate(self, data_dict): -# """ Accumulate results """ -# data = data_dict['data'] -# target = data_dict['target'] -# output = data_dict['output'] -# -# if self.index == 0: -# self.data = data -# -# self.target = target -# -# self.accumulated_output.append(output) -# self.accumulated_context.append(context) -# -# self.index += 1 -# -# if self.index == (1 + self.n_augmentations): -# new_output = torch.mean(torch.stack(self.accumulated_output, dim=-1), dim=-1) -# new_context = { -# k: torch.mean(torch.stack([c[k] for c in self.accumulated_context], dim=-1), dim=-1) -# for k in context.keys() -# } -# -# self.metric_accumulator.calculate(self.data, self.target, new_output, new_context) -# -# self.index = 0 -# self.data = None -# self.target = None -# self.accumulated_output = [] -# self.accumulated_context = [] -# -# -# class TrainTTA: -# """ Test time augmentation that generates additional samples according to the training set augmentations """ -# def __init__(self, n_augmentations): -# self.n_augmentations = n_augmentations -# -# def loader(self, data_source, augmentations, batch_size, num_workers): -# """ Return loader for the test-time-augmentation set """ -# return TrainTTALoader( -# n_augmentations=self.n_augmentations, -# batch_size=batch_size, -# data_source=data_source, -# augmentations=augmentations, -# num_workers=num_workers -# ) -# -# def accumulator(self, metric_accumulator, val_source): -# """ Reset internal state """ -# return TrainTTAAccumulator(metric_accumulator, self.n_augmentations, val_source) -# -# -# def create(n_augmentations): -# return TrainTTA(n_augmentations) diff --git a/vel/rl/test/__init__.py b/vel/rl/test/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/vel/rl/test/test_integration.py b/vel/rl/test/test_integration.py deleted file mode 100644 index cc488751..00000000 --- a/vel/rl/test/test_integration.py +++ /dev/null @@ -1,679 +0,0 @@ -# import torch -# import torch.optim as optim -# -# from vel.module.input.image_to_tensor import ImageToTensorFactory -# from vel.module.input.normalize_observations import NormalizeObservationsFactory -# from vel.rl.buffer.circular_replay_buffer import CircularReplayBuffer -# from vel.rl.buffer.prioritized_circular_replay_buffer import PrioritizedCircularReplayBuffer -# from vel.rl.command.rl_train_command import FrameTracker -# from vel.rl.env_roller.step_env_roller import StepEnvRoller -# from vel.rl.env_roller.trajectory_replay_env_roller import TrajectoryReplayEnvRoller -# from vel.rl.env_roller.transition_replay_env_roller import TransitionReplayEnvRoller -# from vel.rl.metrics import EpisodeRewardMetric -# from vel.rl.module.noise.eps_greedy import EpsGreedy -# from vel.rl.module.noise.ou_noise import OuNoise -# from vel.function.linear import LinearSchedule -# from vel.function.linear_and_constant import LinearAndConstantSchedule -# from vel.util.random import set_seed -# -# from vel.rl.env.classic_atari import ClassicAtariEnv -# from vel.rl.env.mujoco import MujocoEnv -# from vel.rl.vecenv.subproc import SubprocVecEnvWrapper -# from vel.rl.vecenv.dummy import DummyVecEnvWrapper -# -# from vel.rl.policy.stochastic_policy import StochasticPolicyFactory -# # from vel.rl.model.q_stochastic_policy_model import QStochasticPolicyModelFactory -# # from vel.rl.model.q_model import QModelFactory -# # from vel.rl.model.deterministic_policy_model import DeterministicPolicyModelFactory -# # from vel.rl.model.stochastic_policy_model_separate import StochasticPolicyModelSeparateFactory -# -# from vel.rl.backbone.nature_cnn import NatureCnnFactory -# from vel.rl.backbone.mlp import MLPFactory -# -# from vel.rl.reinforcer.on_policy_iteration_reinforcer import ( -# OnPolicyIterationReinforcer, OnPolicyIterationReinforcerSettings -# ) -# -# from vel.rl.reinforcer.buffered_off_policy_iteration_reinforcer import ( -# BufferedOffPolicyIterationReinforcer, BufferedOffPolicyIterationReinforcerSettings -# ) -# -# from vel.rl.reinforcer.buffered_mixed_policy_iteration_reinforcer import ( -# BufferedMixedPolicyIterationReinforcer, BufferedMixedPolicyIterationReinforcerSettings -# ) -# -# from vel.rl.algo.dqn import DeepQLearning -# from vel.rl.algo.policy_gradient.a2c import A2CPolicyGradient -# from vel.rl.algo.policy_gradient.ppo import PpoPolicyGradient -# from vel.rl.algo.policy_gradient.trpo import TrpoPolicyGradient -# from vel.rl.algo.policy_gradient.acer import AcerPolicyGradient -# from vel.rl.algo.policy_gradient.ddpg import DeepDeterministicPolicyGradient -# -# from vel.api.info import TrainingInfo, EpochInfo -# -# -# CPU_DEVICE = torch.device('cpu') -# -# -# def test_a2c_breakout(): -# """ -# Simple 1 iteration of a2c breakout -# """ -# seed = 1001 -# -# # Set random seed in python std lib, numpy and pytorch -# set_seed(seed) -# -# # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers -# # These are just helper functions for that -# vec_env = SubprocVecEnvWrapper( -# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 -# ).instantiate(parallel_envs=16, seed=seed) -# -# # Again, use a helper to create a model -# # But because model is owned by the reinforcer, model should not be accessed using this variable -# # but from reinforcer.model property -# policy = StochasticPolicyFactory( -# input_block=ImageToTensorFactory(), -# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) -# ).instantiate(action_space=vec_env.action_space) -# -# # Reinforcer - an object managing the learning process -# reinforcer = OnPolicyIterationReinforcer( -# device=CPU_DEVICE, -# settings=OnPolicyIterationReinforcerSettings( -# batch_size=256, -# number_of_steps=5 -# ), -# policy=policy, -# algo=A2CPolicyGradient( -# entropy_coefficient=0.01, -# value_coefficient=0.5, -# discount_factor=0.99, -# max_grad_norm=0.5 -# ), -# env_roller=StepEnvRoller( -# environment=vec_env, -# policy=policy, -# device=CPU_DEVICE -# ) -# ) -# -# # Model optimizer -# optimizer = optim.RMSprop(reinforcer.policy.parameters(), lr=7.0e-4, eps=1e-3) -# -# # Overall information store for training information -# training_info = TrainingInfo( -# metrics=[ -# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode -# ], -# callbacks=[] # Print live metrics every epoch to standard output -# ) -# -# # A bit of training initialization bookkeeping... -# training_info.initialize() -# reinforcer.initialize_training(training_info) -# training_info.on_train_begin() -# -# # Let's make 100 batches per epoch to average metrics nicely -# num_epochs = 1 -# -# # Normal handrolled training loop -# for i in range(1, num_epochs+1): -# epoch_info = EpochInfo( -# training_info=training_info, -# global_epoch_idx=i, -# batches_per_epoch=1, -# optimizer=optimizer -# ) -# -# reinforcer.train_epoch(epoch_info, interactive=False) -# -# training_info.on_train_end() -# -# -# def test_ppo_breakout(): -# """ -# Simple 1 iteration of ppo breakout -# """ -# device = torch.device('cpu') -# seed = 1001 -# -# # Set random seed in python std lib, numpy and pytorch -# set_seed(seed) -# -# # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers -# # These are just helper functions for that -# vec_env = SubprocVecEnvWrapper( -# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 -# ).instantiate(parallel_envs=8, seed=seed) -# -# # Again, use a helper to create a model -# # But because model is owned by the reinforcer, model should not be accessed using this variable -# # but from reinforcer.model property -# policy = StochasticPolicyFactory( -# input_block=ImageToTensorFactory(), -# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) -# ).instantiate(action_space=vec_env.action_space) -# -# # Reinforcer - an object managing the learning process -# reinforcer = OnPolicyIterationReinforcer( -# device=device, -# settings=OnPolicyIterationReinforcerSettings( -# number_of_steps=12, -# batch_size=4, -# experience_replay=2, -# ), -# policy=policy, -# algo=PpoPolicyGradient( -# entropy_coefficient=0.01, -# value_coefficient=0.5, -# max_grad_norm=0.5, -# cliprange=LinearSchedule(0.1, 0.0), -# discount_factor=0.99, -# normalize_advantage=True -# ), -# env_roller=StepEnvRoller( -# environment=vec_env, -# policy=policy, -# device=device, -# ) -# ) -# -# # Model optimizer -# # optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3) -# optimizer = optim.Adam(reinforcer.policy.parameters(), lr=2.5e-4, eps=1e-5) -# -# # Overall information store for training information -# training_info = TrainingInfo( -# metrics=[ -# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode -# ], -# callbacks=[ -# FrameTracker(100_000) -# ] # Print live metrics every epoch to standard output -# ) -# -# # A bit of training initialization bookkeeping... -# training_info.initialize() -# reinforcer.initialize_training(training_info) -# training_info.on_train_begin() -# -# # Let's make 100 batches per epoch to average metrics nicely -# num_epochs = 1 -# -# # Normal handrolled training loop -# for i in range(1, num_epochs+1): -# epoch_info = EpochInfo( -# training_info=training_info, -# global_epoch_idx=i, -# batches_per_epoch=1, -# optimizer=optimizer -# ) -# -# reinforcer.train_epoch(epoch_info, interactive=False) -# -# training_info.on_train_end() - - -# def test_dqn_breakout(): -# """ -# Simple 1 iteration of DQN breakout -# """ -# device = torch.device('cpu') -# seed = 1001 -# -# # Set random seed in python std lib, numpy and pytorch -# set_seed(seed) -# -# # Only single environment for DQN -# vec_env = DummyVecEnvWrapper( -# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 -# ).instantiate(parallel_envs=1, seed=seed) -# -# # Again, use a helper to create a model -# # But because model is owned by the reinforcer, model should not be accessed using this variable -# # but from reinforcer.model property -# model_factory = QModelFactory( -# input_block=ImageToTensorFactory(), -# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) -# ) -# -# # Reinforcer - an object managing the learning process -# reinforcer = BufferedOffPolicyIterationReinforcer( -# device=device, -# settings=BufferedOffPolicyIterationReinforcerSettings( -# rollout_steps=4, -# training_steps=1, -# ), -# environment=vec_env, -# algo=DeepQLearning( -# model_factory=model_factory, -# double_dqn=False, -# target_update_frequency=10_000, -# discount_factor=0.99, -# max_grad_norm=0.5 -# ), -# model=model_factory.instantiate(action_space=vec_env.action_space), -# env_roller=TransitionReplayEnvRoller( -# environment=vec_env, -# device=device, -# replay_buffer=CircularReplayBuffer( -# buffer_capacity=100, -# buffer_initial_size=100, -# num_envs=vec_env.num_envs, -# observation_space=vec_env.observation_space, -# action_space=vec_env.action_space, -# frame_stack_compensation=True, -# frame_history=4 -# ), -# action_noise=EpsGreedy( -# epsilon=LinearAndConstantSchedule( -# initial_value=1.0, final_value=0.1, end_of_interpolation=0.1 -# ), -# environment=vec_env -# ) -# ) -# ) -# -# # Model optimizer -# optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=2.5e-4, alpha=0.95, momentum=0.95, eps=1e-3) -# -# # Overall information store for training information -# training_info = TrainingInfo( -# metrics=[ -# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode -# ], -# callbacks=[ -# FrameTracker(100_000) -# ] # Print live metrics every epoch to standard output -# ) -# -# # A bit of training initialization bookkeeping... -# training_info.initialize() -# reinforcer.initialize_training(training_info) -# training_info.on_train_begin() -# -# # Let's make 100 batches per epoch to average metrics nicely -# num_epochs = 1 -# -# # Normal handrolled training loop -# for i in range(1, num_epochs+1): -# epoch_info = EpochInfo( -# training_info=training_info, -# global_epoch_idx=i, -# batches_per_epoch=1, -# optimizer=optimizer -# ) -# -# reinforcer.train_epoch(epoch_info, interactive=False) -# -# training_info.on_train_end() -# -# -# def test_prioritized_dqn_breakout(): -# """ -# Simple 1 iteration of DQN prioritized replay breakout -# """ -# device = torch.device('cpu') -# seed = 1001 -# -# # Set random seed in python std lib, numpy and pytorch -# set_seed(seed) -# -# # Only single environment for DQN -# vec_env = DummyVecEnvWrapper( -# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 -# ).instantiate(parallel_envs=1, seed=seed) -# -# # Again, use a helper to create a model -# # But because model is owned by the reinforcer, model should not be accessed using this variable -# # but from reinforcer.model property -# model_factory = QModelFactory( -# input_block=ImageToTensorFactory(), -# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) -# ) -# -# # Reinforcer - an object managing the learning process -# reinforcer = BufferedOffPolicyIterationReinforcer( -# device=device, -# settings=BufferedOffPolicyIterationReinforcerSettings( -# rollout_steps=4, -# training_steps=1, -# ), -# environment=vec_env, -# algo=DeepQLearning( -# model_factory=model_factory, -# double_dqn=False, -# target_update_frequency=10_000, -# discount_factor=0.99, -# max_grad_norm=0.5 -# ), -# model=model_factory.instantiate(action_space=vec_env.action_space), -# env_roller=TransitionReplayEnvRoller( -# environment=vec_env, -# device=device, -# replay_buffer=PrioritizedCircularReplayBuffer( -# buffer_capacity=100, -# buffer_initial_size=100, -# num_envs=vec_env.num_envs, -# observation_space=vec_env.observation_space, -# action_space=vec_env.action_space, -# priority_exponent=0.6, -# priority_weight=LinearSchedule( -# initial_value=0.4, -# final_value=1.0 -# ), -# priority_epsilon=1.0e-6, -# frame_stack_compensation=True, -# frame_history=4 -# ), -# action_noise=EpsGreedy( -# epsilon=LinearAndConstantSchedule( -# initial_value=1.0, final_value=0.1, end_of_interpolation=0.1 -# ), -# environment=vec_env -# ) -# ) -# ) -# -# # Model optimizer -# optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=2.5e-4, alpha=0.95, momentum=0.95, eps=1e-3) -# -# # Overall information store for training information -# training_info = TrainingInfo( -# metrics=[ -# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode -# ], -# callbacks=[ -# FrameTracker(100_000) -# ] # Print live metrics every epoch to standard output -# ) -# -# # A bit of training initialization bookkeeping... -# training_info.initialize() -# reinforcer.initialize_training(training_info) -# training_info.on_train_begin() -# -# # Let's make 100 batches per epoch to average metrics nicely -# num_epochs = 1 -# -# # Normal handrolled training loop -# for i in range(1, num_epochs+1): -# epoch_info = EpochInfo( -# training_info=training_info, -# global_epoch_idx=i, -# batches_per_epoch=1, -# optimizer=optimizer -# ) -# -# reinforcer.train_epoch(epoch_info, interactive=False) -# -# training_info.on_train_end() -# -# -# def test_ddpg_bipedal_walker(): -# """ -# 1 iteration of DDPG bipedal walker environment -# """ -# device = torch.device('cpu') -# seed = 1001 -# -# # Set random seed in python std lib, numpy and pytorch -# set_seed(seed) -# -# # Only single environment for DDPG -# -# vec_env = DummyVecEnvWrapper( -# MujocoEnv('BipedalWalker-v2') -# ).instantiate(parallel_envs=1, seed=seed) -# -# # Again, use a helper to create a model -# # But because model is owned by the reinforcer, model should not be accessed using this variable -# # but from reinforcer.model property -# model_factory = DeterministicPolicyModelFactory( -# input_block=NormalizeObservationsFactory(input_shape=24), -# policy_backbone=MLPFactory(input_length=24, hidden_layers=[64, 64], normalization='layer'), -# value_backbone=MLPFactory(input_length=28, hidden_layers=[64, 64], normalization='layer') -# ) -# -# # Reinforcer - an object managing the learning process -# reinforcer = BufferedOffPolicyIterationReinforcer( -# device=device, -# settings=BufferedOffPolicyIterationReinforcerSettings( -# rollout_steps=4, -# training_steps=1, -# ), -# environment=vec_env, -# algo=DeepDeterministicPolicyGradient( -# model_factory=model_factory, -# tau=0.01, -# discount_factor=0.99, -# max_grad_norm=0.5 -# ), -# model=model_factory.instantiate(action_space=vec_env.action_space), -# env_roller=TransitionReplayEnvRoller( -# environment=vec_env, -# device=device, -# action_noise=OuNoise(std_dev=0.2, environment=vec_env), -# replay_buffer=CircularReplayBuffer( -# buffer_capacity=100, -# buffer_initial_size=100, -# num_envs=vec_env.num_envs, -# observation_space=vec_env.observation_space, -# action_space=vec_env.action_space -# ), -# normalize_returns=True, -# discount_factor=0.99 -# ), -# ) -# -# # Model optimizer -# optimizer = optim.Adam(reinforcer.model.parameters(), lr=2.5e-4, eps=1e-4) -# -# # Overall information store for training information -# training_info = TrainingInfo( -# metrics=[ -# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode -# ], -# callbacks=[ -# FrameTracker(100_000) -# ] # Print live metrics every epoch to standard output -# ) -# -# # A bit of training initialization bookkeeping... -# training_info.initialize() -# reinforcer.initialize_training(training_info) -# training_info.on_train_begin() -# -# # Let's make 100 batches per epoch to average metrics nicely -# num_epochs = 1 -# -# # Normal handrolled training loop -# for i in range(1, num_epochs+1): -# epoch_info = EpochInfo( -# training_info=training_info, -# global_epoch_idx=i, -# batches_per_epoch=1, -# optimizer=optimizer -# ) -# -# reinforcer.train_epoch(epoch_info, interactive=False) -# -# training_info.on_train_end() -# -# -# def test_trpo_bipedal_walker(): -# """ -# 1 iteration of TRPO on bipedal walker -# """ -# device = torch.device('cpu') -# seed = 1001 -# -# # Set random seed in python std lib, numpy and pytorch -# set_seed(seed) -# -# vec_env = DummyVecEnvWrapper( -# MujocoEnv('BipedalWalker-v2', normalize_returns=True), -# ).instantiate(parallel_envs=8, seed=seed) -# -# # Again, use a helper to create a model -# # But because model is owned by the reinforcer, model should not be accessed using this variable -# # but from reinforcer.model property -# model_factory = StochasticPolicyModelSeparateFactory( -# input_block=NormalizeObservationsFactory(input_shape=24), -# policy_backbone=MLPFactory(input_length=24, hidden_layers=[32, 32]), -# value_backbone=MLPFactory(input_length=24, hidden_layers=[32]) -# ) -# -# # Reinforcer - an object managing the learning process -# reinforcer = OnPolicyIterationReinforcer( -# device=device, -# settings=OnPolicyIterationReinforcerSettings( -# number_of_steps=12, -# ), -# model=model_factory.instantiate(action_space=vec_env.action_space), -# algo=TrpoPolicyGradient( -# max_kl=0.01, -# cg_iters=10, -# line_search_iters=10, -# improvement_acceptance_ratio=0.1, -# cg_damping=0.1, -# vf_iters=5, -# entropy_coef=0.0, -# discount_factor=0.99, -# max_grad_norm=0.5, -# gae_lambda=1.0 -# ), -# env_roller=StepEnvRoller( -# environment=vec_env, -# device=device, -# ) -# ) -# -# # Model optimizer -# optimizer = optim.Adam(reinforcer.model.parameters(), lr=1.0e-3, eps=1e-4) -# -# # Overall information store for training information -# training_info = TrainingInfo( -# metrics=[ -# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode -# ], -# callbacks=[ -# FrameTracker(100_000) -# ] # Print live metrics every epoch to standard output -# ) -# -# # A bit of training initialization bookkeeping... -# training_info.initialize() -# reinforcer.initialize_training(training_info) -# training_info.on_train_begin() -# -# # Let's make 100 batches per epoch to average metrics nicely -# num_epochs = 1 -# -# # Normal handrolled training loop -# for i in range(1, num_epochs+1): -# epoch_info = EpochInfo( -# training_info=training_info, -# global_epoch_idx=i, -# batches_per_epoch=1, -# optimizer=optimizer -# ) -# -# reinforcer.train_epoch(epoch_info, interactive=False) -# -# training_info.on_train_end() -# -# -# def test_acer_breakout(): -# """ -# 1 iteration of ACER on breakout environment -# """ -# device = torch.device('cpu') -# seed = 1001 -# -# # Set random seed in python std lib, numpy and pytorch -# set_seed(seed) -# -# # Create 16 environments evaluated in parallel in sub processess with all usual DeepMind wrappers -# # These are just helper functions for that -# vec_env = SubprocVecEnvWrapper( -# ClassicAtariEnv('BreakoutNoFrameskip-v4'), frame_history=4 -# ).instantiate(parallel_envs=16, seed=seed) -# -# # Again, use a helper to create a model -# # But because model is owned by the reinforcer, model should not be accessed using this variable -# # but from reinforcer.model property -# model_factory = QStochasticPolicyModelFactory( -# input_block=ImageToTensorFactory(), -# backbone=NatureCnnFactory(input_width=84, input_height=84, input_channels=4) -# ) -# -# # Reinforcer - an object managing the learning process -# reinforcer = BufferedMixedPolicyIterationReinforcer( -# device=device, -# settings=BufferedMixedPolicyIterationReinforcerSettings( -# experience_replay=2, -# number_of_steps=12, -# stochastic_experience_replay=False -# ), -# model=model_factory.instantiate(action_space=vec_env.action_space), -# env=vec_env, -# algo=AcerPolicyGradient( -# model_factory=model_factory, -# entropy_coefficient=0.01, -# q_coefficient=0.5, -# rho_cap=10.0, -# retrace_rho_cap=1.0, -# trust_region=True, -# trust_region_delta=1.0, -# discount_factor=0.99, -# max_grad_norm=10.0, -# ), -# env_roller=TrajectoryReplayEnvRoller( -# environment=vec_env, -# device=device, -# replay_buffer=CircularReplayBuffer( -# buffer_capacity=100, -# buffer_initial_size=100, -# num_envs=vec_env.num_envs, -# action_space=vec_env.action_space, -# observation_space=vec_env.observation_space, -# frame_stack_compensation=True, -# frame_history=4, -# ) -# ), -# ) -# -# # Model optimizer -# optimizer = optim.RMSprop(reinforcer.model.parameters(), lr=7.0e-4, eps=1e-3, alpha=0.99) -# -# # Overall information store for training information -# training_info = TrainingInfo( -# metrics=[ -# EpisodeRewardMetric('episode_rewards'), # Calculate average reward from episode -# ], -# callbacks=[] # Print live metrics every epoch to standard output -# ) -# -# # A bit of training initialization bookkeeping... -# training_info.initialize() -# reinforcer.initialize_training(training_info) -# training_info.on_train_begin() -# -# # Let's make 100 batches per epoch to average metrics nicely -# num_epochs = 1 -# -# # Normal handrolled training loop -# for i in range(1, num_epochs+1): -# epoch_info = EpochInfo( -# training_info=training_info, -# global_epoch_idx=i, -# batches_per_epoch=1, -# optimizer=optimizer -# ) -# -# reinforcer.train_epoch(epoch_info, interactive=False) -# -# training_info.on_train_end() From dfaef137289c51509a8d04851a8a41d287852f77 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 6 Oct 2019 21:40:37 -0700 Subject: [PATCH 124/162] Renaming, Network -> VModule. --- vel/api/__init__.py | 2 +- vel/api/model.py | 4 ++-- vel/api/model_factory.py | 4 ++-- vel/api/{network.py => vmodule.py} | 4 ++-- vel/model/nlp/language_model.py | 6 +++--- vel/module/input/flatten.py | 4 ++-- vel/module/input/image_to_tensor.py | 4 ++-- vel/module/input/normalize_observations.py | 4 ++-- vel/module/input/one_hot_encoding.py | 4 ++-- vel/net/layer_base.py | 4 ++-- vel/net/modular.py | 8 ++++---- vel/rl/module/actor_critic_policy.py | 8 ++++---- vel/rl/module/noise/eps_greedy.py | 4 ++-- vel/rl/module/noise/ou_noise.py | 4 ++-- vel/rl/module/q_policy.py | 6 +++--- vel/rl/module/q_stochastic_policy.py | 6 +++--- vel/rl/module/rainbow_policy.py | 6 +++--- vel/rl/module/stochastic_policy.py | 6 +++--- vel/rl/module/stochastic_rnn_policy.py | 6 +++--- vel/rl/policy/a2c.py | 4 ++-- vel/rl/policy/a2c_rnn.py | 4 ++-- vel/rl/policy/acer.py | 4 ++-- vel/rl/policy/ddpg.py | 4 ++-- vel/rl/policy/dqn.py | 4 ++-- vel/rl/policy/ppo.py | 4 ++-- vel/rl/policy/ppo_rnn.py | 4 ++-- vel/rl/policy/rainbow.py | 4 ++-- vel/rl/policy/trpo.py | 6 +++--- 28 files changed, 66 insertions(+), 66 deletions(-) rename vel/api/{network.py => vmodule.py} (92%) diff --git a/vel/api/__init__.py b/vel/api/__init__.py index 9ddf09d8..ceb95211 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -1,7 +1,7 @@ from .callback import Callback from .info import BatchInfo, EpochInfo, TrainingInfo from .size_hint import SizeHint, SizeHints -from .network import Network, BackboneNetwork +from .vmodule import VModule, BackboneModule from .model import ( Model, ValidatedModel, OptimizedModel, GradientModel, LossFunctionModel ) diff --git a/vel/api/model.py b/vel/api/model.py index 052bd1c3..ac3fe547 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -6,10 +6,10 @@ from vel.metric.loss_metric import Loss -from .network import Network +from .vmodule import VModule -class Model(Network): +class Model(VModule): """ Class representing full neural network model, generally used to solve some problem """ def metrics(self) -> list: diff --git a/vel/api/model_factory.py b/vel/api/model_factory.py index 0015f006..a79ad097 100644 --- a/vel/api/model_factory.py +++ b/vel/api/model_factory.py @@ -1,11 +1,11 @@ -from .network import Network +from .vmodule import VModule from vel.internal.generic_factory import GenericFactory class ModelFactory: """ Factory class for models """ - def instantiate(self, **extra_args) -> Network: + def instantiate(self, **extra_args) -> VModule: raise NotImplementedError @staticmethod diff --git a/vel/api/network.py b/vel/api/vmodule.py similarity index 92% rename from vel/api/network.py rename to vel/api/vmodule.py index b6c8b97d..1374258e 100644 --- a/vel/api/network.py +++ b/vel/api/vmodule.py @@ -3,7 +3,7 @@ from .size_hint import SizeHints -class Network(nn.Module): +class VModule(nn.Module): """ Vel wrapper over nn.Module offering a few internally useful utilities """ def reset_weights(self): @@ -20,7 +20,7 @@ def zero_state(self, batch_size): return None -class BackboneNetwork(Network): +class BackboneModule(VModule): """ Network, whose output feeds into other models. Needs to provide size hints. """ def size_hints(self) -> SizeHints: diff --git a/vel/model/nlp/language_model.py b/vel/model/nlp/language_model.py index 55b96adb..bd295a3b 100644 --- a/vel/model/nlp/language_model.py +++ b/vel/model/nlp/language_model.py @@ -2,13 +2,13 @@ import torch.nn as nn import torch.nn.functional as F -from vel.api import LossFunctionModel, ModelFactory, Network, BackboneNetwork, SizeHints, SizeHint +from vel.api import LossFunctionModel, ModelFactory, VModule, BackboneModule, SizeHints, SizeHint class LanguageModel(LossFunctionModel): """ Language model - autoregressive generative model for text """ - def __init__(self, alphabet_size: int, net: BackboneNetwork): + def __init__(self, alphabet_size: int, net: BackboneModule): super().__init__() self.net = net @@ -61,7 +61,7 @@ def __init__(self, alphabet_size: int, net_factory: ModelFactory): self.alphabet_size = alphabet_size self.net_factory = net_factory - def instantiate(self, **extra_args) -> Network: + def instantiate(self, **extra_args) -> VModule: size_hint = SizeHints(SizeHint(None, None)) net = self.net_factory.instantiate(alphabet_size=self.alphabet_size, size_hint=size_hint) diff --git a/vel/module/input/flatten.py b/vel/module/input/flatten.py index c69e9f02..9462f689 100644 --- a/vel/module/input/flatten.py +++ b/vel/module/input/flatten.py @@ -1,10 +1,10 @@ from vel.module.layers import Flatten -from vel.api import Network +from vel.api import VModule -class FlattenInput(Network): +class FlattenInput(VModule): """ Sequence input """ def __init__(self): super().__init__() diff --git a/vel/module/input/image_to_tensor.py b/vel/module/input/image_to_tensor.py index b02a3229..3ca95d3c 100644 --- a/vel/module/input/image_to_tensor.py +++ b/vel/module/input/image_to_tensor.py @@ -1,6 +1,6 @@ import torch -from vel.api import Network +from vel.api import VModule def image_to_tensor(image: torch.Tensor) -> torch.Tensor: @@ -15,7 +15,7 @@ def image_to_tensor(image: torch.Tensor) -> torch.Tensor: return result -class ImageToTensor(Network): +class ImageToTensor(VModule): """ Convert simple image to tensor. diff --git a/vel/module/input/normalize_observations.py b/vel/module/input/normalize_observations.py index 52dc8de9..a1965a0e 100644 --- a/vel/module/input/normalize_observations.py +++ b/vel/module/input/normalize_observations.py @@ -1,9 +1,9 @@ import torch -from vel.api import Network +from vel.api import VModule -class NormalizeObservations(Network): +class NormalizeObservations(VModule): """ Normalize a vector of observations """ def __init__(self, input_shape, epsilon=1e-6): diff --git a/vel/module/input/one_hot_encoding.py b/vel/module/input/one_hot_encoding.py index 15f4e961..6cc83b74 100644 --- a/vel/module/input/one_hot_encoding.py +++ b/vel/module/input/one_hot_encoding.py @@ -1,8 +1,8 @@ -from vel.api import Network +from vel.api import VModule from vel.module.layers import OneHotEncode -class OneHotEncodingInput(Network): +class OneHotEncodingInput(VModule): """ One-hot encoding input layer """ def __init__(self, alphabet_size: int): diff --git a/vel/net/layer_base.py b/vel/net/layer_base.py index d1be235f..1738e694 100644 --- a/vel/net/layer_base.py +++ b/vel/net/layer_base.py @@ -1,7 +1,7 @@ -from vel.api import BackboneNetwork, SizeHints +from vel.api import BackboneModule, SizeHints -class Layer(BackboneNetwork): +class Layer(BackboneModule): def __init__(self, name: str): super().__init__() self.name = name diff --git a/vel/net/modular.py b/vel/net/modular.py index 517d2c2c..d448d628 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -2,7 +2,7 @@ import torch.nn as nn -from vel.api import BackboneNetwork, ModelFactory, SizeHints +from vel.api import BackboneModule, ModelFactory, SizeHints from vel.util.tensor_util import to_device from .layer_base import LayerFactory @@ -47,7 +47,7 @@ def instantiate_layers(layers: [LayerFactory], size_hint: SizeHints, extra_args: return ModularSequential(module_dict) -class ModularNetwork(BackboneNetwork): +class ModularNetwork(BackboneModule): """ Network that is built from layers """ def __init__(self, layers: nn.Module): @@ -82,7 +82,7 @@ def forward(self, input_data, state=None): return self.layers(input_data, context=context) -class StatefulModularNetwork(BackboneNetwork): +class StatefulModularNetwork(BackboneModule): """ Modular network handling the state between the episodes """ def __init__(self, layers: nn.Module): @@ -142,7 +142,7 @@ class ModularNetworkFactory(ModelFactory): def __init__(self, layers: [LayerFactory]): self.layers = layers - def instantiate(self, size_hint=None, **extra_args) -> BackboneNetwork: + def instantiate(self, size_hint=None, **extra_args) -> BackboneModule: """ Create either stateful or not modular network instance """ if size_hint is None: size_hint = SizeHints() diff --git a/vel/rl/module/actor_critic_policy.py b/vel/rl/module/actor_critic_policy.py index dc6ae23f..bbfc45de 100644 --- a/vel/rl/module/actor_critic_policy.py +++ b/vel/rl/module/actor_critic_policy.py @@ -3,18 +3,18 @@ import gym import torch -from vel.api import Network, BackboneNetwork +from vel.api import VModule, BackboneModule from vel.rl.module.head.deterministic_action_head import DeterministicActionHead from vel.rl.module.head.deterministic_critic_head import DeterministicCriticHead -class ActorCriticPolicy(Network): +class ActorCriticPolicy(VModule): """ Deterministic Policy Gradient - model """ - def __init__(self, input_net: BackboneNetwork, policy_net: BackboneNetwork, - value_net: BackboneNetwork, action_space: gym.Space): + def __init__(self, input_net: BackboneModule, policy_net: BackboneModule, + value_net: BackboneModule, action_space: gym.Space): super().__init__() self.input_net = input_net diff --git a/vel/rl/module/noise/eps_greedy.py b/vel/rl/module/noise/eps_greedy.py index becf13e1..328c140e 100644 --- a/vel/rl/module/noise/eps_greedy.py +++ b/vel/rl/module/noise/eps_greedy.py @@ -3,12 +3,12 @@ import torch -from vel.api import Schedule, Network +from vel.api import Schedule, VModule from vel.internal.generic_factory import GenericFactory from vel.function.constant import ConstantSchedule -class EpsGreedy(Network): +class EpsGreedy(VModule): """ Epsilon-greedy action selection """ def __init__(self, action_space: gym.Space): super().__init__() diff --git a/vel/rl/module/noise/ou_noise.py b/vel/rl/module/noise/ou_noise.py index 721b8772..10c154f0 100644 --- a/vel/rl/module/noise/ou_noise.py +++ b/vel/rl/module/noise/ou_noise.py @@ -2,12 +2,12 @@ import numpy as np import torch -from vel.api import Network +from vel.api import VModule from vel.util.process import OrnsteinUhlenbeckNoiseProcess from vel.internal.generic_factory import GenericFactory -class OuNoise(Network): +class OuNoise(VModule): """ Ornstein–Uhlenbeck noise process for action noise """ def __init__(self, std_dev: float, action_space: gym.Space): diff --git a/vel/rl/module/q_policy.py b/vel/rl/module/q_policy.py index 7e6e8925..6c18a235 100644 --- a/vel/rl/module/q_policy.py +++ b/vel/rl/module/q_policy.py @@ -1,17 +1,17 @@ import gym -from vel.api import Network, BackboneNetwork +from vel.api import VModule, BackboneModule from vel.rl.module.head.q_head import QHead from vel.rl.module.head.q_dueling_head import QDuelingHead -class QPolicy(Network): +class QPolicy(VModule): """ Simple deterministic greedy action-value model. Supports only discrete action spaces (ones that can be enumerated) """ - def __init__(self, net: BackboneNetwork, action_space: gym.Space, dueling_dqn=False): + def __init__(self, net: BackboneModule, action_space: gym.Space, dueling_dqn=False): super().__init__() self.dueling_dqn = dueling_dqn diff --git a/vel/rl/module/q_stochastic_policy.py b/vel/rl/module/q_stochastic_policy.py index d98b1ac3..29f12107 100644 --- a/vel/rl/module/q_stochastic_policy.py +++ b/vel/rl/module/q_stochastic_policy.py @@ -1,17 +1,17 @@ import gym -from vel.api import BackboneNetwork, Network +from vel.api import BackboneModule, VModule from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head from vel.rl.module.head.q_head import QHead -class QStochasticPolicy(Network): +class QStochasticPolicy(VModule): """ A policy model with an action-value critic head (instead of more common state-value critic head). Supports only discrete action spaces (ones that can be enumerated) """ - def __init__(self, net: BackboneNetwork, action_space: gym.Space): + def __init__(self, net: BackboneModule, action_space: gym.Space): super().__init__() assert isinstance(action_space, gym.spaces.Discrete) diff --git a/vel/rl/module/rainbow_policy.py b/vel/rl/module/rainbow_policy.py index 8e709758..2b4fe914 100644 --- a/vel/rl/module/rainbow_policy.py +++ b/vel/rl/module/rainbow_policy.py @@ -1,11 +1,11 @@ import gym import torch -from vel.api import Network, BackboneNetwork +from vel.api import VModule, BackboneModule from vel.rl.module.head.q_distributional_noisy_dueling_head import QDistributionalNoisyDuelingHead -class RainbowPolicy(Network): +class RainbowPolicy(VModule): """ A deterministic greedy action-value model. Includes following commonly known modifications: @@ -14,7 +14,7 @@ class RainbowPolicy(Network): - Noisy Nets """ - def __init__(self, net: BackboneNetwork, action_space: gym.Space, vmin: float, vmax: float, + def __init__(self, net: BackboneModule, action_space: gym.Space, vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): super().__init__() diff --git a/vel/rl/module/stochastic_policy.py b/vel/rl/module/stochastic_policy.py index 47a22e3f..d11ac0a6 100644 --- a/vel/rl/module/stochastic_policy.py +++ b/vel/rl/module/stochastic_policy.py @@ -1,17 +1,17 @@ import gym -from vel.api import Network, BackboneNetwork +from vel.api import VModule, BackboneModule from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head from vel.rl.module.head.value_head import ValueHead -class StochasticPolicy(Network): +class StochasticPolicy(VModule): """ Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone """ - def __init__(self, net: BackboneNetwork, action_space: gym.Space): + def __init__(self, net: BackboneModule, action_space: gym.Space): super().__init__() self.net = net diff --git a/vel/rl/module/stochastic_rnn_policy.py b/vel/rl/module/stochastic_rnn_policy.py index ae2e17b2..94410c63 100644 --- a/vel/rl/module/stochastic_rnn_policy.py +++ b/vel/rl/module/stochastic_rnn_policy.py @@ -1,19 +1,19 @@ import gym -from vel.api import Network, BackboneNetwork +from vel.api import VModule, BackboneModule from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head from vel.rl.module.head.value_head import ValueHead from vel.util.tensor_util import to_device -class StochasticRnnPolicy(Network): +class StochasticRnnPolicy(VModule): """ Most generic policy gradient model class with a set of common actor-critic heads that share a single backbone RNN version """ - def __init__(self, net: BackboneNetwork, action_space: gym.Space): + def __init__(self, net: BackboneModule, action_space: gym.Space): super().__init__() self.net = net diff --git a/vel/rl/policy/a2c.py b/vel/rl/policy/a2c.py index 68589fdd..8a96c277 100644 --- a/vel/rl/policy/a2c.py +++ b/vel/rl/policy/a2c.py @@ -5,7 +5,7 @@ from vel.metric.base import AveragingNamedMetric from vel.util.situational import gym_space_to_size_hint from vel.util.stats import explained_variance -from vel.api import ModelFactory, BatchInfo, BackboneNetwork +from vel.api import ModelFactory, BatchInfo, BackboneModule from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae @@ -14,7 +14,7 @@ class A2C(RlPolicy): """ Simplest policy gradient - calculate loss as an advantage of an actor versus value function """ - def __init__(self, net: BackboneNetwork, action_space: gym.Space, + def __init__(self, net: BackboneModule, action_space: gym.Space, entropy_coefficient, value_coefficient, discount_factor: float, gae_lambda=1.0): super().__init__(discount_factor) diff --git a/vel/rl/policy/a2c_rnn.py b/vel/rl/policy/a2c_rnn.py index 84b4ef50..8b3a6654 100644 --- a/vel/rl/policy/a2c_rnn.py +++ b/vel/rl/policy/a2c_rnn.py @@ -2,7 +2,7 @@ import torch import torch.nn.functional as F -from vel.api import ModelFactory, BatchInfo, BackboneNetwork +from vel.api import ModelFactory, BatchInfo, BackboneModule from vel.metric.base import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae @@ -13,7 +13,7 @@ class A2CRnn(RlPolicy): """ Simplest policy gradient - calculate loss as an advantage of an actor versus value function """ - def __init__(self, net: BackboneNetwork, action_space: gym.Space, + def __init__(self, net: BackboneModule, action_space: gym.Space, entropy_coefficient, value_coefficient, discount_factor: float, gae_lambda=1.0): super().__init__(discount_factor) diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index 3d30f733..dbed10ce 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -3,7 +3,7 @@ import torch import torch.nn.functional as F -from vel.api import BackboneNetwork, ModelFactory, BatchInfo, OptimizerFactory, VelOptimizer +from vel.api import BackboneModule, ModelFactory, BatchInfo, OptimizerFactory, VelOptimizer from vel.metric.base import AveragingNamedMetric from vel.rl.api import Trajectories, RlPolicy, Rollout from vel.rl.module.q_stochastic_policy import QStochasticPolicy @@ -18,7 +18,7 @@ def select_indices(tensor, indices): class ACER(RlPolicy): """ Actor-Critic with Experience Replay - policy gradient calculations """ - def __init__(self, net: BackboneNetwork, target_net: typing.Optional[BackboneNetwork], action_space: gym.Space, + def __init__(self, net: BackboneModule, target_net: typing.Optional[BackboneModule], action_space: gym.Space, discount_factor: float, trust_region: bool = True, entropy_coefficient: float = 0.01, q_coefficient: float = 0.5, rho_cap: float = 10.0, retrace_rho_cap: float = 1.0, average_model_alpha: float = 0.99, trust_region_delta: float = 1.0): diff --git a/vel/rl/policy/ddpg.py b/vel/rl/policy/ddpg.py index 0e011ecb..915fdc3e 100644 --- a/vel/rl/policy/ddpg.py +++ b/vel/rl/policy/ddpg.py @@ -8,7 +8,7 @@ import vel.util.module_util as mu -from vel.api import BackboneNetwork, BatchInfo, ModelFactory, OptimizerFactory, VelOptimizer, SizeHints +from vel.api import BackboneModule, BatchInfo, ModelFactory, OptimizerFactory, VelOptimizer, SizeHints from vel.metric.base import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout from vel.rl.module.actor_critic_policy import ActorCriticPolicy @@ -19,7 +19,7 @@ class DDPG(RlPolicy): """ Deep Deterministic Policy Gradient (DDPG) - policy gradient calculations """ - def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_space: gym.Space, + def __init__(self, net: BackboneModule, target_net: BackboneModule, action_space: gym.Space, discount_factor: float, tau: float, noise_std_dev: float): super().__init__(discount_factor) diff --git a/vel/rl/policy/dqn.py b/vel/rl/policy/dqn.py index 9bc8dd64..c93bf013 100644 --- a/vel/rl/policy/dqn.py +++ b/vel/rl/policy/dqn.py @@ -6,7 +6,7 @@ import torch.nn.functional as F import torch.nn.utils -from vel.api import ModelFactory, BackboneNetwork, BatchInfo, Schedule, OptimizerFactory, VelOptimizer +from vel.api import ModelFactory, BackboneModule, BatchInfo, Schedule, OptimizerFactory, VelOptimizer from vel.function.constant import ConstantSchedule from vel.metric import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout @@ -18,7 +18,7 @@ class DQN(RlPolicy): """ Deep Q-Learning algorithm """ - def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_space: gym.Space, + def __init__(self, net: BackboneModule, target_net: BackboneModule, action_space: gym.Space, epsilon: typing.Union[float, Schedule], discount_factor: float, double_dqn: bool, dueling_dqn: bool, target_update_frequency: int): super().__init__(discount_factor) diff --git a/vel/rl/policy/ppo.py b/vel/rl/policy/ppo.py index 4e68445b..9ae38ba8 100644 --- a/vel/rl/policy/ppo.py +++ b/vel/rl/policy/ppo.py @@ -3,7 +3,7 @@ import numbers -from vel.api import BatchInfo, ModelFactory, BackboneNetwork +from vel.api import BatchInfo, ModelFactory, BackboneModule from vel.util.situational import gym_space_to_size_hint from vel.util.stats import explained_variance from vel.function.constant import ConstantSchedule @@ -16,7 +16,7 @@ class PPO(RlPolicy): """ Proximal Policy Optimization - https://arxiv.org/abs/1707.06347 """ - def __init__(self, net: BackboneNetwork, action_space: gym.Space, + def __init__(self, net: BackboneModule, action_space: gym.Space, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): super().__init__(discount_factor) diff --git a/vel/rl/policy/ppo_rnn.py b/vel/rl/policy/ppo_rnn.py index fc28e2f1..0ac60339 100644 --- a/vel/rl/policy/ppo_rnn.py +++ b/vel/rl/policy/ppo_rnn.py @@ -3,7 +3,7 @@ import gym import torch -from vel.api import BatchInfo, ModelFactory, BackboneNetwork +from vel.api import BatchInfo, ModelFactory, BackboneModule from vel.function.constant import ConstantSchedule from vel.metric.base import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout, Trajectories @@ -15,7 +15,7 @@ class PPORnn(RlPolicy): """ Proximal Policy Optimization - https://arxiv.org/abs/1707.06347 """ - def __init__(self, net: BackboneNetwork, action_space: gym.Space, + def __init__(self, net: BackboneModule, action_space: gym.Space, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): super().__init__(discount_factor) diff --git a/vel/rl/policy/rainbow.py b/vel/rl/policy/rainbow.py index f8693131..ed9be961 100644 --- a/vel/rl/policy/rainbow.py +++ b/vel/rl/policy/rainbow.py @@ -2,7 +2,7 @@ import torch import torch.nn.utils -from vel.api import ModelFactory, BackboneNetwork, BatchInfo, OptimizerFactory, VelOptimizer +from vel.api import ModelFactory, BackboneModule, BatchInfo, OptimizerFactory, VelOptimizer from vel.metric import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout from vel.rl.module.rainbow_policy import RainbowPolicy @@ -12,7 +12,7 @@ class Rainbow(RlPolicy): """ Deep Q-Learning algorithm """ - def __init__(self, net: BackboneNetwork, target_net: BackboneNetwork, action_space: gym.Space, + def __init__(self, net: BackboneModule, target_net: BackboneModule, action_space: gym.Space, discount_factor: float, target_update_frequency: int, vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): super().__init__(discount_factor) diff --git a/vel/rl/policy/trpo.py b/vel/rl/policy/trpo.py index 0e4cba3a..c1fae215 100644 --- a/vel/rl/policy/trpo.py +++ b/vel/rl/policy/trpo.py @@ -8,7 +8,7 @@ import torch.nn.functional as F import torch.nn.utils -from vel.api import BatchInfo, VelOptimizer, OptimizerFactory, ModelFactory, BackboneNetwork +from vel.api import BatchInfo, VelOptimizer, OptimizerFactory, ModelFactory, BackboneModule from vel.util.stats import explained_variance from vel.metric.base import AveragingNamedMetric @@ -59,10 +59,10 @@ def conjugate_gradient_method(matrix_vector_operator, loss_gradient, nsteps, rdo class TRPO(RlPolicy): """ Trust Region Policy Optimization - https://arxiv.org/abs/1502.05477 """ - def __init__(self, policy_net: BackboneNetwork, value_net: BackboneNetwork, action_space: gym.Space, + def __init__(self, policy_net: BackboneModule, value_net: BackboneModule, action_space: gym.Space, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, discount_factor, gae_lambda, improvement_acceptance_ratio, - input_net: typing.Optional[BackboneNetwork] = None, + input_net: typing.Optional[BackboneModule] = None, ): super().__init__(discount_factor) From 0c3bd10f2a6a61acbb2ebc61b572060c2371d7a7 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 6 Oct 2019 21:42:34 -0700 Subject: [PATCH 125/162] Renaming ModelFactory -> ModuleFactory. --- vel/api/__init__.py | 2 +- vel/api/model_factory.py | 8 ++++---- vel/command/phase_train_command.py | 2 +- vel/command/train_command.py | 2 +- vel/model/autoencoder/cnn_autoencoder.py | 4 ++-- vel/model/gan/simple_gan.py | 4 ++-- vel/model/imagenet/resnet34.py | 4 ++-- vel/model/latent/cnn_iwae.py | 4 ++-- vel/model/latent/cnn_vae.py | 4 ++-- vel/model/latent/fc_iwae.py | 4 ++-- vel/model/latent/fc_vae.py | 4 ++-- vel/model/latent/vq_vae.py | 4 ++-- vel/model/nlp/language_model.py | 8 ++++---- .../rnn/multilayer_rnn_sequence_classification.py | 6 +++--- vel/model/rnn/multilayer_rnn_sequence_model.py | 6 +++--- vel/model/vision/cifar10_cnn_01.py | 4 ++-- vel/model/vision/cifar_resnet_v1.py | 4 ++-- vel/model/vision/cifar_resnet_v2.py | 4 ++-- vel/model/vision/cifar_resnext.py | 4 ++-- vel/model/vision/mnist_cnn_01.py | 4 ++-- vel/net/modular.py | 4 ++-- vel/rl/command/enjoy.py | 4 ++-- vel/rl/command/evaluate_env_command.py | 6 +++--- vel/rl/command/record_movie_command.py | 4 ++-- vel/rl/layer/nature_cnn_rnn.py | 4 ++-- vel/rl/policy/a2c.py | 6 +++--- vel/rl/policy/a2c_rnn.py | 6 +++--- vel/rl/policy/acer.py | 4 ++-- vel/rl/policy/ddpg.py | 12 ++++++------ vel/rl/policy/dqn.py | 8 ++++---- vel/rl/policy/ppo.py | 6 +++--- vel/rl/policy/ppo_rnn.py | 6 +++--- vel/rl/policy/rainbow.py | 8 ++++---- vel/rl/policy/trpo.py | 12 ++++++------ .../buffered_mixed_policy_iteration_reinforcer.py | 4 ++-- .../buffered_off_policy_iteration_reinforcer.py | 4 ++-- vel/rl/reinforcer/on_policy_iteration_reinforcer.py | 4 ++-- 37 files changed, 94 insertions(+), 94 deletions(-) diff --git a/vel/api/__init__.py b/vel/api/__init__.py index ceb95211..4df70bd3 100644 --- a/vel/api/__init__.py +++ b/vel/api/__init__.py @@ -6,7 +6,7 @@ Model, ValidatedModel, OptimizedModel, GradientModel, LossFunctionModel ) from .model_config import ModelConfig -from .model_factory import ModelFactory +from .model_factory import ModuleFactory from .optimizer import OptimizerFactory, VelOptimizer, VelOptimizerProxy from .schedule import Schedule from .scheduler import SchedulerFactory diff --git a/vel/api/model_factory.py b/vel/api/model_factory.py index a79ad097..2920877c 100644 --- a/vel/api/model_factory.py +++ b/vel/api/model_factory.py @@ -1,15 +1,15 @@ -from .vmodule import VModule from vel.internal.generic_factory import GenericFactory +from .vmodule import VModule -class ModelFactory: - """ Factory class for models """ +class ModuleFactory: + """ Factory for modules """ def instantiate(self, **extra_args) -> VModule: raise NotImplementedError @staticmethod - def generic(closure, **kwargs) -> 'ModelFactory': + def generic(closure, **kwargs) -> 'ModuleFactory': """ Return a generic model factory """ # noinspection PyTypeChecker return GenericFactory(closure, kwargs) diff --git a/vel/command/phase_train_command.py b/vel/command/phase_train_command.py index 0fb91197..2670fcac 100644 --- a/vel/command/phase_train_command.py +++ b/vel/command/phase_train_command.py @@ -14,7 +14,7 @@ class PhaseTrainCommand: """ Training command - learn according to a set of phases """ - def __init__(self, model_config: api.ModelConfig, model_factory: api.ModelFactory, loader: data.DatasetLoader, + def __init__(self, model_config: api.ModelConfig, model_factory: api.ModuleFactory, loader: data.DatasetLoader, storage: api.Storage, phases: typing.List[train.TrainPhase], callbacks=None, restart=True): self.model_config = model_config diff --git a/vel/command/train_command.py b/vel/command/train_command.py index a504f9c7..9e94450c 100644 --- a/vel/command/train_command.py +++ b/vel/command/train_command.py @@ -12,7 +12,7 @@ class SimpleTrainCommand: """ Very simple training command - just run the supplied generators """ - def __init__(self, epochs: int, model_config: api.ModelConfig, model_factory: api.ModelFactory, + def __init__(self, epochs: int, model_config: api.ModelConfig, model_factory: api.ModuleFactory, optimizer_factory: api.OptimizerFactory, scheduler_factory: typing.Optional[api.SchedulerFactory], loader: data.DatasetLoader, storage: api.Storage, callbacks: typing.Optional[typing.List[api.Callback]]): diff --git a/vel/model/autoencoder/cnn_autoencoder.py b/vel/model/autoencoder/cnn_autoencoder.py index 0bb3197e..51ca0907 100644 --- a/vel/model/autoencoder/cnn_autoencoder.py +++ b/vel/model/autoencoder/cnn_autoencoder.py @@ -6,7 +6,7 @@ import vel.util.network as net_util -from vel.api import LossFunctionModel, ModelFactory +from vel.api import LossFunctionModel, ModuleFactory from vel.metric.loss_metric import Loss from vel.module.layers import Flatten, Reshape @@ -104,4 +104,4 @@ def instantiate(**_): img_rows, img_cols, img_channels, channels=channels, representation_length=representation_length ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/gan/simple_gan.py b/vel/model/gan/simple_gan.py index 78d516eb..0823888b 100644 --- a/vel/model/gan/simple_gan.py +++ b/vel/model/gan/simple_gan.py @@ -6,7 +6,7 @@ import torch import torch.nn as nn -from vel.api import OptimizedModel, ModelFactory, VelOptimizer, OptimizerFactory +from vel.api import OptimizedModel, ModuleFactory, VelOptimizer, OptimizerFactory from vel.api.optimizer import VelMultiOptimizer from vel.metric import AveragingNamedMetric @@ -159,4 +159,4 @@ def instantiate(**_): img_rows, img_cols, img_channels, latent_dim=latent_dim ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/imagenet/resnet34.py b/vel/model/imagenet/resnet34.py index f9901eba..3537a226 100644 --- a/vel/model/imagenet/resnet34.py +++ b/vel/model/imagenet/resnet34.py @@ -5,7 +5,7 @@ import vel.module.layers as layers import vel.util.module_util as mu -from vel.api import LossFunctionModel, ModelFactory, OptimizerFactory, VelOptimizer +from vel.api import LossFunctionModel, ModuleFactory, OptimizerFactory, VelOptimizer # Because of concat pooling it's 2x 512 @@ -108,4 +108,4 @@ def create(fc_layers=None, dropout=None, pretrained=True): def instantiate(**_): return Resnet34(fc_layers, dropout, pretrained) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/latent/cnn_iwae.py b/vel/model/latent/cnn_iwae.py index a6ee1d7f..6827b0af 100644 --- a/vel/model/latent/cnn_iwae.py +++ b/vel/model/latent/cnn_iwae.py @@ -8,7 +8,7 @@ import vel.util.network as net_util -from vel.api import ModelFactory +from vel.api import ModuleFactory from vel.module.layers import Flatten, Reshape from vel.model.latent.iwae import IWAE @@ -155,4 +155,4 @@ def instantiate(**_): img_rows, img_cols, img_channels, k=k, channels=channels, representation_length=representation_length ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/latent/cnn_vae.py b/vel/model/latent/cnn_vae.py index 491cdb70..71582449 100644 --- a/vel/model/latent/cnn_vae.py +++ b/vel/model/latent/cnn_vae.py @@ -8,7 +8,7 @@ import vel.util.network as net_util -from vel.api import ModelFactory +from vel.api import ModuleFactory from vel.module.layers import Flatten, Reshape from vel.model.latent.vae_base import VaeBase @@ -155,4 +155,4 @@ def instantiate(**_): img_rows, img_cols, img_channels, channels=channels, representation_length=representation_length ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/latent/fc_iwae.py b/vel/model/latent/fc_iwae.py index 7e7a44da..43e47d5e 100644 --- a/vel/model/latent/fc_iwae.py +++ b/vel/model/latent/fc_iwae.py @@ -4,7 +4,7 @@ import torch.nn.functional as F import torch.nn.init as init -from vel.api import ModelFactory +from vel.api import ModuleFactory from vel.module.layers import Flatten, Reshape from vel.model.latent.iwae import IWAE @@ -102,4 +102,4 @@ def instantiate(**_): analytical_kl_div=analytical_kl_div ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/latent/fc_vae.py b/vel/model/latent/fc_vae.py index fbad9e29..4ae9323a 100644 --- a/vel/model/latent/fc_vae.py +++ b/vel/model/latent/fc_vae.py @@ -4,7 +4,7 @@ import torch.nn.functional as F import torch.nn.init as init -from vel.api import ModelFactory +from vel.api import ModuleFactory from vel.module.layers import Flatten, Reshape from vel.model.latent.vae_base import VaeBase @@ -103,4 +103,4 @@ def instantiate(**_): analytical_kl_div=analytical_kl_div ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/latent/vq_vae.py b/vel/model/latent/vq_vae.py index 5f3d4677..f28dbf50 100644 --- a/vel/model/latent/vq_vae.py +++ b/vel/model/latent/vq_vae.py @@ -293,7 +293,7 @@ def metrics(self): def create(img_rows, img_cols, img_channels, channels=None, k: int = 512, d: int = 256, beta: float = 1.0): """ Vel factory function """ - from vel.api import ModelFactory + from vel.api import ModuleFactory if channels is None: channels = [16, 32, 32] @@ -303,4 +303,4 @@ def instantiate(**_): img_rows, img_cols, img_channels, channels=channels, k=k, d=d, beta=beta ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/nlp/language_model.py b/vel/model/nlp/language_model.py index bd295a3b..e27835b9 100644 --- a/vel/model/nlp/language_model.py +++ b/vel/model/nlp/language_model.py @@ -2,7 +2,7 @@ import torch.nn as nn import torch.nn.functional as F -from vel.api import LossFunctionModel, ModelFactory, VModule, BackboneModule, SizeHints, SizeHint +from vel.api import LossFunctionModel, ModuleFactory, VModule, BackboneModule, SizeHints, SizeHint class LanguageModel(LossFunctionModel): @@ -56,8 +56,8 @@ def loss_value(self, x_data, y_true, y_pred) -> torch.tensor: return F.nll_loss(y_pred, y_true) -class LanguageModelFactory(ModelFactory): - def __init__(self, alphabet_size: int, net_factory: ModelFactory): +class LanguageModelFactory(ModuleFactory): + def __init__(self, alphabet_size: int, net_factory: ModuleFactory): self.alphabet_size = alphabet_size self.net_factory = net_factory @@ -71,7 +71,7 @@ def instantiate(self, **extra_args) -> VModule: ) -def create(loader, net: ModelFactory): +def create(loader, net: ModuleFactory): """ Vel factory function """ return LanguageModelFactory( alphabet_size=loader.alphabet_size, diff --git a/vel/model/rnn/multilayer_rnn_sequence_classification.py b/vel/model/rnn/multilayer_rnn_sequence_classification.py index 20f40706..82953b5b 100644 --- a/vel/model/rnn/multilayer_rnn_sequence_classification.py +++ b/vel/model/rnn/multilayer_rnn_sequence_classification.py @@ -6,7 +6,7 @@ import vel.util.module_util as mu -from vel.api import LossFunctionModel, ModelFactory, LinearBackboneModel, OptimizerFactory, VelOptimizer +from vel.api import LossFunctionModel, ModuleFactory, LinearBackboneModel, OptimizerFactory, VelOptimizer from vel.metric.accuracy import Accuracy from vel.metric.loss_metric import Loss from vel.module.rnn_layer import RnnLayer @@ -150,7 +150,7 @@ def metrics(self) -> list: return [Loss(), Accuracy()] -def create(input_block: ModelFactory, rnn_type: str, output_dim: int, +def create(input_block: ModuleFactory, rnn_type: str, output_dim: int, rnn_layers: typing.List[int], rnn_dropout: float = 0.0, bidirectional: bool = False, linear_layers: typing.List[int] = None, linear_dropout: float = 0.0): """ Vel factory function """ @@ -164,4 +164,4 @@ def instantiate(**_): linear_layers=linear_layers, linear_dropout=linear_dropout ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/rnn/multilayer_rnn_sequence_model.py b/vel/model/rnn/multilayer_rnn_sequence_model.py index 3f5c332a..70f98c75 100644 --- a/vel/model/rnn/multilayer_rnn_sequence_model.py +++ b/vel/model/rnn/multilayer_rnn_sequence_model.py @@ -4,7 +4,7 @@ import torch.nn.functional as F import torch.nn as nn -from vel.api import LossFunctionModel, ModelFactory +from vel.api import LossFunctionModel, ModuleFactory from vel.module.rnn_layer import RnnLayer @@ -110,7 +110,7 @@ def loss_value(self, x_data, y_true, y_pred): return F.nll_loss(y_pred, y_true) -def create(input_block: ModelFactory, rnn_type: str, hidden_layers: typing.List[int], +def create(input_block: ModuleFactory, rnn_type: str, hidden_layers: typing.List[int], output_dim: int, dropout=0.0): """ Vel factory function """ def instantiate(**_): @@ -119,4 +119,4 @@ def instantiate(**_): dropout=dropout ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/vision/cifar10_cnn_01.py b/vel/model/vision/cifar10_cnn_01.py index 3f3551af..da9e2c57 100644 --- a/vel/model/vision/cifar10_cnn_01.py +++ b/vel/model/vision/cifar10_cnn_01.py @@ -8,7 +8,7 @@ import torch.nn.init as init import torch.nn.functional as F -from vel.api import LossFunctionModel, ModelFactory +from vel.api import LossFunctionModel, ModuleFactory from vel.metric.loss_metric import Loss from vel.metric.accuracy import Accuracy @@ -92,4 +92,4 @@ def create(img_rows, img_cols, img_channels, num_classes): """ Vel factory function """ def instantiate(**_): return Net(img_rows, img_cols, img_channels, num_classes) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/vision/cifar_resnet_v1.py b/vel/model/vision/cifar_resnet_v1.py index ab8fac25..4a520c65 100644 --- a/vel/model/vision/cifar_resnet_v1.py +++ b/vel/model/vision/cifar_resnet_v1.py @@ -6,7 +6,7 @@ import torch.nn as nn import torch.nn.functional as F -from vel.api import LossFunctionModel, ModelFactory +from vel.api import LossFunctionModel, ModuleFactory from vel.module.resnet_v1 import Bottleneck, BasicBlock @@ -89,4 +89,4 @@ def create(blocks, mode='basic', inplanes=16, divisor=4, num_classes=1000): def instantiate(**_): return ResNetV1(block_dict[mode], blocks, inplanes=inplanes, divisor=divisor, num_classes=num_classes) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/vision/cifar_resnet_v2.py b/vel/model/vision/cifar_resnet_v2.py index 66e96fb6..eef5ab38 100644 --- a/vel/model/vision/cifar_resnet_v2.py +++ b/vel/model/vision/cifar_resnet_v2.py @@ -6,7 +6,7 @@ import torch.nn as nn import torch.nn.functional as F -from vel.api import LossFunctionModel, ModelFactory +from vel.api import LossFunctionModel, ModuleFactory from vel.module.resnet_v2 import Bottleneck, BasicBlock @@ -91,4 +91,4 @@ def create(blocks, mode='basic', inplanes=16, divisor=4, num_classes=1000): def instantiate(**_): return ResNetV2(block_dict[mode], blocks, inplanes=inplanes, divisor=divisor, num_classes=num_classes) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/vision/cifar_resnext.py b/vel/model/vision/cifar_resnext.py index edb6d8a2..d3ce97a3 100644 --- a/vel/model/vision/cifar_resnext.py +++ b/vel/model/vision/cifar_resnext.py @@ -6,7 +6,7 @@ import torch.nn as nn import torch.nn.functional as F -from vel.api import LossFunctionModel, ModelFactory +from vel.api import LossFunctionModel, ModuleFactory from vel.module.resnext import ResNeXtBottleneck @@ -91,4 +91,4 @@ def instantiate(**_): cardinality=cardinality, divisor=divisor, num_classes=num_classes ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/model/vision/mnist_cnn_01.py b/vel/model/vision/mnist_cnn_01.py index 513f33c0..df8c5ada 100644 --- a/vel/model/vision/mnist_cnn_01.py +++ b/vel/model/vision/mnist_cnn_01.py @@ -9,7 +9,7 @@ import torch.nn.functional as F -from vel.api import LossFunctionModel, ModelFactory +from vel.api import LossFunctionModel, ModuleFactory from vel.metric.loss_metric import Loss from vel.metric.accuracy import Accuracy @@ -77,4 +77,4 @@ def create(img_rows, img_cols, img_channels, num_classes): def instantiate(**_): return Net(img_rows, img_cols, img_channels, num_classes) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/net/modular.py b/vel/net/modular.py index d448d628..c17fd81a 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -2,7 +2,7 @@ import torch.nn as nn -from vel.api import BackboneModule, ModelFactory, SizeHints +from vel.api import BackboneModule, ModuleFactory, SizeHints from vel.util.tensor_util import to_device from .layer_base import LayerFactory @@ -137,7 +137,7 @@ def forward(self, input_data, state=None): return data, output_state -class ModularNetworkFactory(ModelFactory): +class ModularNetworkFactory(ModuleFactory): """ Factory class for the modular network """ def __init__(self, layers: [LayerFactory]): self.layers = layers diff --git a/vel/rl/command/enjoy.py b/vel/rl/command/enjoy.py index 14da7fa2..2cef2bf2 100644 --- a/vel/rl/command/enjoy.py +++ b/vel/rl/command/enjoy.py @@ -3,14 +3,14 @@ import typing import time -from vel.api import ModelConfig, TrainingInfo, Storage, ModelFactory +from vel.api import ModelConfig, TrainingInfo, Storage, ModuleFactory from vel.rl.api import VecEnvFactory class EnjoyCommand: """ Play render("human") in a loop for a human to enjoy """ - def __init__(self, model_config: ModelConfig, model_factory: ModelFactory, vec_env_factory: VecEnvFactory, + def __init__(self, model_config: ModelConfig, model_factory: ModuleFactory, vec_env_factory: VecEnvFactory, storage: Storage, fps: float, sample_args: typing.Optional[dict]): self.model_config = model_config self.model_factory = model_factory diff --git a/vel/rl/command/evaluate_env_command.py b/vel/rl/command/evaluate_env_command.py index 3bf0eec7..0a27aec6 100644 --- a/vel/rl/command/evaluate_env_command.py +++ b/vel/rl/command/evaluate_env_command.py @@ -4,14 +4,14 @@ import tqdm import typing -from vel.api import ModelConfig, TrainingInfo, Storage, ModelFactory +from vel.api import ModelConfig, TrainingInfo, Storage, ModuleFactory from vel.rl.api import VecEnvFactory class EvaluateEnvCommand: """ Record environment playthrough as a game """ - def __init__(self, model_config: ModelConfig, env_factory: VecEnvFactory, model_factory: ModelFactory, - storage: Storage, parallel_envs: int, action_noise: typing.Optional[ModelFactory], takes: int, + def __init__(self, model_config: ModelConfig, env_factory: VecEnvFactory, model_factory: ModuleFactory, + storage: Storage, parallel_envs: int, action_noise: typing.Optional[ModuleFactory], takes: int, sample_args: dict = None): self.model_config = model_config self.model_factory = model_factory diff --git a/vel/rl/command/record_movie_command.py b/vel/rl/command/record_movie_command.py index a7a14d78..79598e30 100644 --- a/vel/rl/command/record_movie_command.py +++ b/vel/rl/command/record_movie_command.py @@ -7,13 +7,13 @@ import tqdm import typing -from vel.api import ModelConfig, TrainingInfo, Storage, ModelFactory +from vel.api import ModelConfig, TrainingInfo, Storage, ModuleFactory from vel.rl.api import VecEnvFactory class RecordMovieCommand: """ Record environment playthrough as a game """ - def __init__(self, model_config: ModelConfig, env_factory: VecEnvFactory, model_factory: ModelFactory, + def __init__(self, model_config: ModelConfig, env_factory: VecEnvFactory, model_factory: ModuleFactory, storage: Storage, videoname: str, takes: int, fps: int, sample_args: typing.Optional[dict] = None): self.model_config = model_config self.model_factory = model_factory diff --git a/vel/rl/layer/nature_cnn_rnn.py b/vel/rl/layer/nature_cnn_rnn.py index 699e7387..fd4864a2 100644 --- a/vel/rl/layer/nature_cnn_rnn.py +++ b/vel/rl/layer/nature_cnn_rnn.py @@ -1,4 +1,4 @@ -from vel.api import LinearBackboneModel, ModelFactory +from vel.api import LinearBackboneModel, ModuleFactory from vel.rl.backbone.nature_cnn import NatureCnn from vel.module.rnn_cell import RnnCell @@ -58,4 +58,4 @@ def instantiate(**_): rnn_type=rnn_type, cnn_output_dim=cnn_output_dim, hidden_units=hidden_units ) - return ModelFactory.generic(instantiate) + return ModuleFactory.generic(instantiate) diff --git a/vel/rl/policy/a2c.py b/vel/rl/policy/a2c.py index 8a96c277..ef4cb1d8 100644 --- a/vel/rl/policy/a2c.py +++ b/vel/rl/policy/a2c.py @@ -5,7 +5,7 @@ from vel.metric.base import AveragingNamedMetric from vel.util.situational import gym_space_to_size_hint from vel.util.stats import explained_variance -from vel.api import ModelFactory, BatchInfo, BackboneModule +from vel.api import ModuleFactory, BatchInfo, BackboneModule from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae @@ -112,7 +112,7 @@ def metrics(self) -> list: ] -class A2CFactory(ModelFactory): +class A2CFactory(ModuleFactory): """ Factory class for policy gradient models """ def __init__(self, net_factory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): self.net_factory = net_factory @@ -140,7 +140,7 @@ def instantiate(self, **extra_args): ) -def create(net: ModelFactory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): +def create(net: ModuleFactory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): """ Vel factory function """ return A2CFactory( net_factory=net, diff --git a/vel/rl/policy/a2c_rnn.py b/vel/rl/policy/a2c_rnn.py index 8b3a6654..5ec298de 100644 --- a/vel/rl/policy/a2c_rnn.py +++ b/vel/rl/policy/a2c_rnn.py @@ -2,7 +2,7 @@ import torch import torch.nn.functional as F -from vel.api import ModelFactory, BatchInfo, BackboneModule +from vel.api import ModuleFactory, BatchInfo, BackboneModule from vel.metric.base import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout, Trajectories from vel.rl.discount_bootstrap import discount_bootstrap_gae @@ -153,7 +153,7 @@ def metrics(self) -> list: ] -class A2CRnnFactory(ModelFactory): +class A2CRnnFactory(ModuleFactory): """ Factory class for policy gradient models """ def __init__(self, net_factory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): self.net_factory = net_factory @@ -181,7 +181,7 @@ def instantiate(self, **extra_args): ) -def create(net: ModelFactory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): +def create(net: ModuleFactory, entropy_coefficient, value_coefficient, discount_factor, gae_lambda=1.0): """ Vel factory function """ return A2CRnnFactory( net_factory=net, diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index dbed10ce..59003e45 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -3,7 +3,7 @@ import torch import torch.nn.functional as F -from vel.api import BackboneModule, ModelFactory, BatchInfo, OptimizerFactory, VelOptimizer +from vel.api import BackboneModule, ModuleFactory, BatchInfo, OptimizerFactory, VelOptimizer from vel.metric.base import AveragingNamedMetric from vel.rl.api import Trajectories, RlPolicy, Rollout from vel.rl.module.q_stochastic_policy import QStochasticPolicy @@ -247,7 +247,7 @@ def metrics(self) -> list: ] -class ACERFactory(ModelFactory): +class ACERFactory(ModuleFactory): """ Factory class for ACER policies """ def __init__(self, net_factory, trust_region: bool, entropy_coefficient: float, q_coefficient: float, discount_factor: float, rho_cap: float = 10.0, retrace_rho_cap: float = 1.0, diff --git a/vel/rl/policy/ddpg.py b/vel/rl/policy/ddpg.py index 915fdc3e..3f40d317 100644 --- a/vel/rl/policy/ddpg.py +++ b/vel/rl/policy/ddpg.py @@ -8,7 +8,7 @@ import vel.util.module_util as mu -from vel.api import BackboneModule, BatchInfo, ModelFactory, OptimizerFactory, VelOptimizer, SizeHints +from vel.api import BackboneModule, BatchInfo, ModuleFactory, OptimizerFactory, VelOptimizer, SizeHints from vel.metric.base import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout from vel.rl.module.actor_critic_policy import ActorCriticPolicy @@ -127,12 +127,12 @@ def metrics(self) -> list: ] -class DDPGFactory(ModelFactory): +class DDPGFactory(ModuleFactory): """ Factory for the DDPG policy """ - def __init__(self, actor_net: ModelFactory, critic_net: ModelFactory, + def __init__(self, actor_net: ModuleFactory, critic_net: ModuleFactory, discount_factor: float, tau: float, noise_std_dev: float, - input_net: typing.Optional[ModelFactory] = None): + input_net: typing.Optional[ModuleFactory] = None): self.actor_net_factory = actor_net self.critic_net_factory = critic_net self.input_net_factory = input_net @@ -182,9 +182,9 @@ def instantiate(self, **extra_args): ) -def create(actor_net: ModelFactory, critic_net: ModelFactory, +def create(actor_net: ModuleFactory, critic_net: ModuleFactory, discount_factor: float, tau: float, noise_std_dev: float, - input_net: typing.Optional[ModelFactory] = None + input_net: typing.Optional[ModuleFactory] = None ): """ Vel factory function """ return DDPGFactory( diff --git a/vel/rl/policy/dqn.py b/vel/rl/policy/dqn.py index c93bf013..b6a27aa0 100644 --- a/vel/rl/policy/dqn.py +++ b/vel/rl/policy/dqn.py @@ -6,7 +6,7 @@ import torch.nn.functional as F import torch.nn.utils -from vel.api import ModelFactory, BackboneModule, BatchInfo, Schedule, OptimizerFactory, VelOptimizer +from vel.api import ModuleFactory, BackboneModule, BatchInfo, Schedule, OptimizerFactory, VelOptimizer from vel.function.constant import ConstantSchedule from vel.metric import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout @@ -133,8 +133,8 @@ def metrics(self) -> list: ] -class DQNFactory(ModelFactory): - def __init__(self, net_factory: ModelFactory, epsilon: typing.Union[float, Schedule], discount_factor: float, +class DQNFactory(ModuleFactory): + def __init__(self, net_factory: ModuleFactory, epsilon: typing.Union[float, Schedule], discount_factor: float, target_update_frequency: int, double_dqn: bool = False, dueling_dqn: bool = False): self.net_factory = net_factory self.epsilon = epsilon @@ -165,7 +165,7 @@ def instantiate(self, **extra_args): ) -def create(net: ModelFactory, epsilon: typing.Union[float, Schedule], discount_factor: float, +def create(net: ModuleFactory, epsilon: typing.Union[float, Schedule], discount_factor: float, target_update_frequency: int, double_dqn: bool = False, dueling_dqn: bool = False): """ Vel factory function """ diff --git a/vel/rl/policy/ppo.py b/vel/rl/policy/ppo.py index 9ae38ba8..313407eb 100644 --- a/vel/rl/policy/ppo.py +++ b/vel/rl/policy/ppo.py @@ -3,7 +3,7 @@ import numbers -from vel.api import BatchInfo, ModelFactory, BackboneModule +from vel.api import BatchInfo, ModuleFactory, BackboneModule from vel.util.situational import gym_space_to_size_hint from vel.util.stats import explained_variance from vel.function.constant import ConstantSchedule @@ -152,7 +152,7 @@ def metrics(self) -> list: ] -class PPOFactory(ModelFactory): +class PPOFactory(ModuleFactory): """ Factory class for policy gradient models """ def __init__(self, net_factory, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): @@ -185,7 +185,7 @@ def instantiate(self, **extra_args): ) -def create(net: ModelFactory, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, +def create(net: ModuleFactory, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): """ Vel factory function """ return PPOFactory( diff --git a/vel/rl/policy/ppo_rnn.py b/vel/rl/policy/ppo_rnn.py index 0ac60339..786d4825 100644 --- a/vel/rl/policy/ppo_rnn.py +++ b/vel/rl/policy/ppo_rnn.py @@ -3,7 +3,7 @@ import gym import torch -from vel.api import BatchInfo, ModelFactory, BackboneModule +from vel.api import BatchInfo, ModuleFactory, BackboneModule from vel.function.constant import ConstantSchedule from vel.metric.base import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout, Trajectories @@ -190,7 +190,7 @@ def metrics(self) -> list: ] -class PPORnnFactory(ModelFactory): +class PPORnnFactory(ModuleFactory): """ Factory class for policy gradient models """ def __init__(self, net_factory, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, @@ -224,7 +224,7 @@ def instantiate(self, **extra_args): ) -def create(net: ModelFactory, +def create(net: ModuleFactory, entropy_coefficient, value_coefficient, cliprange, discount_factor: float, normalize_advantage: bool = True, gae_lambda: float = 1.0): """ Vel factory function """ diff --git a/vel/rl/policy/rainbow.py b/vel/rl/policy/rainbow.py index ed9be961..be7253c9 100644 --- a/vel/rl/policy/rainbow.py +++ b/vel/rl/policy/rainbow.py @@ -2,7 +2,7 @@ import torch import torch.nn.utils -from vel.api import ModelFactory, BackboneModule, BatchInfo, OptimizerFactory, VelOptimizer +from vel.api import ModuleFactory, BackboneModule, BatchInfo, OptimizerFactory, VelOptimizer from vel.metric import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout from vel.rl.module.rainbow_policy import RainbowPolicy @@ -203,8 +203,8 @@ def metrics(self) -> list: ] -class RainbowFactory(ModelFactory): - def __init__(self, net_factory: ModelFactory, discount_factor: float, target_update_frequency: int, +class RainbowFactory(ModuleFactory): + def __init__(self, net_factory: ModuleFactory, discount_factor: float, target_update_frequency: int, vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): self.net_factory = net_factory self.discount_factor = discount_factor @@ -240,7 +240,7 @@ def instantiate(self, **extra_args): ) -def create(net: ModelFactory, discount_factor: float, target_update_frequency: int, +def create(net: ModuleFactory, discount_factor: float, target_update_frequency: int, vmin: float, vmax: float, atoms: int = 1, initial_std_dev: float = 0.4, factorized_noise: bool = True): """ Vel factory function """ return RainbowFactory( diff --git a/vel/rl/policy/trpo.py b/vel/rl/policy/trpo.py index c1fae215..8c6a554f 100644 --- a/vel/rl/policy/trpo.py +++ b/vel/rl/policy/trpo.py @@ -8,7 +8,7 @@ import torch.nn.functional as F import torch.nn.utils -from vel.api import BatchInfo, VelOptimizer, OptimizerFactory, ModelFactory, BackboneModule +from vel.api import BatchInfo, VelOptimizer, OptimizerFactory, ModuleFactory, BackboneModule from vel.util.stats import explained_variance from vel.metric.base import AveragingNamedMetric @@ -351,11 +351,11 @@ def metrics(self) -> list: ] -class TRPOFactory(ModelFactory): +class TRPOFactory(ModuleFactory): """ Factory class for policy gradient models """ - def __init__(self, policy_net: ModelFactory, value_net: ModelFactory, + def __init__(self, policy_net: ModuleFactory, value_net: ModuleFactory, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, - discount_factor, gae_lambda, improvement_acceptance_ratio, input_net: typing.Optional[ModelFactory]): + discount_factor, gae_lambda, improvement_acceptance_ratio, input_net: typing.Optional[ModuleFactory]): self.policy_net = policy_net self.value_net = value_net self.input_net = input_net @@ -403,9 +403,9 @@ def instantiate(self, **extra_args): ) -def create(policy_net: ModelFactory, value_net: ModelFactory, +def create(policy_net: ModuleFactory, value_net: ModuleFactory, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, - discount_factor, gae_lambda, improvement_acceptance_ratio, input_net: typing.Optional[ModelFactory]=None): + discount_factor, gae_lambda, improvement_acceptance_ratio, input_net: typing.Optional[ModuleFactory]=None): """ Vel factory function """ return TRPOFactory( diff --git a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py index 92c30a5d..548d1663 100644 --- a/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_mixed_policy_iteration_reinforcer.py @@ -4,7 +4,7 @@ import torch import tqdm -from vel.api import TrainingInfo, EpochInfo, BatchInfo, ModelFactory +from vel.api import TrainingInfo, EpochInfo, BatchInfo, ModuleFactory from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( Reinforcer, ReinforcerFactory, VecEnvFactory, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, @@ -136,7 +136,7 @@ def off_policy_train_batch(self, batch_info: BatchInfo): class BufferedMixedPolicyIterationReinforcerFactory(ReinforcerFactory): """ Factory class for the PolicyGradientReplayBuffer factory """ - def __init__(self, settings, env_factory: VecEnvFactory, model_factory: ModelFactory, + def __init__(self, settings, env_factory: VecEnvFactory, model_factory: ModuleFactory, env_roller_factory: ReplayEnvRollerFactoryBase, parallel_envs: int, seed: int): self.settings = settings diff --git a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py index 9deeb210..f3dd3310 100644 --- a/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/buffered_off_policy_iteration_reinforcer.py @@ -3,7 +3,7 @@ import torch import tqdm -from vel.api import TrainingInfo, EpochInfo, BatchInfo, Model, ModelFactory +from vel.api import TrainingInfo, EpochInfo, BatchInfo, Model, ModuleFactory from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( Reinforcer, ReinforcerFactory, ReplayEnvRollerBase, VecEnvFactory, ReplayEnvRollerFactoryBase, @@ -153,7 +153,7 @@ def train_on_replay_memory(self, batch_info): class BufferedOffPolicyIterationReinforcerFactory(ReinforcerFactory): """ Factory class for the DQN reinforcer """ - def __init__(self, settings, env_factory: VecEnvFactory, model_factory: ModelFactory, + def __init__(self, settings, env_factory: VecEnvFactory, model_factory: ModuleFactory, env_roller_factory: ReplayEnvRollerFactoryBase, parallel_envs: int, seed: int): self.settings = settings diff --git a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py index 64af89e5..13b9853f 100644 --- a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py @@ -4,7 +4,7 @@ import torch import tqdm -from vel.api import ModelFactory, TrainingInfo, EpochInfo, BatchInfo +from vel.api import ModuleFactory, TrainingInfo, EpochInfo, BatchInfo from vel.rl.api import ( Reinforcer, ReinforcerFactory, VecEnvFactory, EnvRollerFactoryBase, EnvRollerBase, RlPolicy @@ -146,7 +146,7 @@ def train_batch(self, batch_info: BatchInfo) -> None: class OnPolicyIterationReinforcerFactory(ReinforcerFactory): """ Vel factory class for the PolicyGradientReinforcer """ - def __init__(self, settings, parallel_envs: int, env_factory: VecEnvFactory, model_factory: ModelFactory, + def __init__(self, settings, parallel_envs: int, env_factory: VecEnvFactory, model_factory: ModuleFactory, env_roller_factory: EnvRollerFactoryBase, seed: int): self.settings = settings self.parallel_envs = parallel_envs From 5a7a3f1926793ed0fa47a54a931f4b8a44bf14ae Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 10 Oct 2019 16:59:59 -0700 Subject: [PATCH 126/162] Code lint update. --- .flake8 | 2 +- vel/api/optimizer.py | 1 - vel/api/size_hint.py | 2 +- vel/callback/sample_tracker.py | 1 - vel/data/augmentation/scale_min_size.py | 2 - vel/internal/provider.py | 3 +- vel/model/gan/simple_gan.py | 3 +- vel/model/latent/fc_iwae.py | 2 +- vel/model/latent/fc_vae.py | 2 +- vel/model/latent/vq_vae.py | 2 - .../rnn/multilayer_rnn_sequence_model.py | 122 ------------------ vel/module/input/flatten.py | 1 - vel/module/input/normalize_observations.py | 1 - vel/module/input/one_hot_encoding.py | 1 - vel/net/layer/mlp.py | 4 +- vel/net/layer_base.py | 1 - vel/rl/command/rl_train_command.py | 1 - vel/rl/layer/nature_cnn_rnn.py | 61 --------- vel/rl/module/actor_critic_policy.py | 1 - vel/rl/module/noise/eps_greedy.py | 1 - vel/rl/module/stochastic_policy.py | 1 - vel/rl/module/stochastic_rnn_policy.py | 3 - vel/rl/policy/acer.py | 2 +- vel/rl/policy/ppo.py | 1 - vel/rl/policy/ppo_rnn.py | 1 - vel/rl/policy/trpo.py | 8 +- vel/util/dataloader.py | 3 - vel/util/module_util.py | 2 - vel/util/situational.py | 1 - 29 files changed, 15 insertions(+), 221 deletions(-) delete mode 100644 vel/model/rnn/multilayer_rnn_sequence_model.py delete mode 100644 vel/rl/layer/nature_cnn_rnn.py diff --git a/.flake8 b/.flake8 index 25d4293b..d26ea701 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,3 @@ [flake8] max-line-length = 120 -exclude = vel/openai, test, vel/api/__init__.py, vel/rl/api/__init__.py +exclude = vel/openai, test, vel/api/__init__.py, vel/rl/api/__init__.py, vel/data/__init__.py, vel/metric/__init__.py, vel/metric/base/__init__.py, vel/train/__init__.py, vel/optimizer/ranger.py, vel/optimizer/radam.py diff --git a/vel/api/optimizer.py b/vel/api/optimizer.py index bed2a75b..e9f97f83 100644 --- a/vel/api/optimizer.py +++ b/vel/api/optimizer.py @@ -187,4 +187,3 @@ def instantiate_multi(self, parameter_dict: dict) -> VelMultiOptimizer: od[name] = self.instantiate(value) return VelMultiOptimizer(od) - diff --git a/vel/api/size_hint.py b/vel/api/size_hint.py index b1e4fecb..c8e687a8 100644 --- a/vel/api/size_hint.py +++ b/vel/api/size_hint.py @@ -65,7 +65,7 @@ def __init__(self, size_hints: typing.Union[SizeHint, SizeTuple, SizeDict] = Non else: raise VelException("Invalid size hints: {}".format(self.size_hints)) - def assert_tuple(self, length : typing.Optional[int] = None) -> SizeTuple: + def assert_tuple(self, length: typing.Optional[int] = None) -> SizeTuple: """ Assert given size hints is a tuple """ assert self.type == self.TYPE_TUPLE, "Network needs to return a tuple" diff --git a/vel/callback/sample_tracker.py b/vel/callback/sample_tracker.py index a1c9d789..aadefd12 100644 --- a/vel/callback/sample_tracker.py +++ b/vel/callback/sample_tracker.py @@ -23,4 +23,3 @@ def write_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict) def load_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict): training_info['samples'] = hidden_state_dict['sample_tracker/samples'] - diff --git a/vel/data/augmentation/scale_min_size.py b/vel/data/augmentation/scale_min_size.py index 88554a09..0fae3bd6 100644 --- a/vel/data/augmentation/scale_min_size.py +++ b/vel/data/augmentation/scale_min_size.py @@ -2,8 +2,6 @@ Code based on: https://github.com/fastai/fastai/blob/master/fastai/transforms.py """ -import PIL.Image as Image - import vel.api as api import vel.data.operation.image_op as op diff --git a/vel/internal/provider.py b/vel/internal/provider.py index 79921125..6a6950d0 100644 --- a/vel/internal/provider.py +++ b/vel/internal/provider.py @@ -8,7 +8,8 @@ class Provider: """ Dependency injection resolver for the configuration file """ - def __init__(self, environment: dict, instances: typing.Optional[dict] = None, parameters: typing.Optional[dict] = None): + def __init__(self, environment: dict, instances: typing.Optional[dict] = None, + parameters: typing.Optional[dict] = None): self.environment = environment self.parameters = parameters if parameters is not None else {} diff --git a/vel/model/gan/simple_gan.py b/vel/model/gan/simple_gan.py index 0823888b..a8f56044 100644 --- a/vel/model/gan/simple_gan.py +++ b/vel/model/gan/simple_gan.py @@ -2,11 +2,12 @@ Simple GAN code is based on https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/gan/gan.py """ import collections + import numpy as np import torch import torch.nn as nn -from vel.api import OptimizedModel, ModuleFactory, VelOptimizer, OptimizerFactory +from vel.api import OptimizedModel, ModuleFactory, OptimizerFactory from vel.api.optimizer import VelMultiOptimizer from vel.metric import AveragingNamedMetric diff --git a/vel/model/latent/fc_iwae.py b/vel/model/latent/fc_iwae.py index 43e47d5e..4ba19597 100644 --- a/vel/model/latent/fc_iwae.py +++ b/vel/model/latent/fc_iwae.py @@ -2,7 +2,6 @@ import torch.distributions as dist import torch.nn as nn import torch.nn.functional as F -import torch.nn.init as init from vel.api import ModuleFactory from vel.module.layers import Flatten, Reshape @@ -76,6 +75,7 @@ def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ return decoded +# import torch.nn.init as init # @staticmethod # def _weight_initializer(tensor): # init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('tanh')) diff --git a/vel/model/latent/fc_vae.py b/vel/model/latent/fc_vae.py index 4ae9323a..701c5717 100644 --- a/vel/model/latent/fc_vae.py +++ b/vel/model/latent/fc_vae.py @@ -2,7 +2,6 @@ import torch.distributions as dist import torch.nn as nn import torch.nn.functional as F -import torch.nn.init as init from vel.api import ModuleFactory from vel.module.layers import Flatten, Reshape @@ -76,6 +75,7 @@ def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ return decoded +# import torch.nn.init as init # @staticmethod # def _weight_initializer(tensor): # init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('tanh')) diff --git a/vel/model/latent/vq_vae.py b/vel/model/latent/vq_vae.py index f28dbf50..608260f9 100644 --- a/vel/model/latent/vq_vae.py +++ b/vel/model/latent/vq_vae.py @@ -272,8 +272,6 @@ def calculate_gradient(self, data: dict) -> dict: return { 'loss': loss.item(), - - 'grad_norm': grad_norm, 'reconstruction': loss_recons.item(), 'loss_vq': loss_vq.item(), 'loss_commit': loss_commit.item() diff --git a/vel/model/rnn/multilayer_rnn_sequence_model.py b/vel/model/rnn/multilayer_rnn_sequence_model.py deleted file mode 100644 index 70f98c75..00000000 --- a/vel/model/rnn/multilayer_rnn_sequence_model.py +++ /dev/null @@ -1,122 +0,0 @@ -import typing - -import torch -import torch.nn.functional as F -import torch.nn as nn - -from vel.api import LossFunctionModel, ModuleFactory -from vel.module.rnn_layer import RnnLayer - - -class MultilayerRnnSequenceModel(LossFunctionModel): - """ Multilayer RNN network for sequence modeling (n:n) """ - - def __init__(self, input_block: LinearBackboneModel, rnn_type: str, hidden_layers: typing.List[int], - output_dim: int, dropout: float = 0.0): - super().__init__() - - self.output_dim = output_dim - self.hidden_layers = hidden_layers - - self.input_block = input_block - - current_dim = self.input_block.output_dim - - self.recurrent_layers = [] - self.dropout_layers = [] - - for idx, current_layer in enumerate(hidden_layers, 1): - rnn = RnnLayer( - input_size=current_dim, - hidden_size=current_layer, - rnn_type=rnn_type, - ) - - self.add_module('{}{:02}'.format(rnn_type, idx), rnn) - self.recurrent_layers.append(rnn) - - if dropout > 0.0: - dropout_layer = nn.Dropout(p=dropout) - - self.add_module('rnn_dropout{:02}'.format(idx), dropout_layer) - self.dropout_layers.append(dropout_layer) - - current_dim = current_layer - - self.output_layer = nn.Linear(current_dim, output_dim) - self.output_activation = nn.LogSoftmax(dim=2) - - def reset_weights(self): - self.input_block.reset_weights() - - def forward(self, sequence): - """ Forward propagate batch of sequences through the network, without accounting for the state """ - data = self.input_block(sequence) - - for idx in range(len(self.recurrent_layers)): - data, _ = self.recurrent_layers[idx](data) - - if self.dropout_layers: - data = self.dropout_layers[idx](data) - - data = self.output_layer(data) - - return self.output_activation(data) - - def forward_state(self, sequence, state=None): - """ Forward propagate a sequence through the network accounting for the state """ - if state is None: - state = self.zero_state(sequence.size(0)) - - data = self.input_block(sequence) - - state_outputs = [] - - # for layer_length, layer in zip(self.hidden_layers, self.recurrent_layers): - for idx in range(len(self.recurrent_layers)): - layer_length = self.recurrent_layers[idx].state_dim - - # Partition hidden state, for each layer we have layer_length of h state and layer_length of c state - current_state = state[:, :, :layer_length] - state = state[:, :, layer_length:] - - # Propagate through the GRU state - data, new_h = self.recurrent_layers[idx](data, current_state) - - if self.dropout_layers: - data = self.dropout_layers[idx](data) - - state_outputs.append(new_h) - - output_data = self.output_activation(self.output_layer(data)) - - concatenated_hidden_output = torch.cat(state_outputs, dim=2) - - return output_data, concatenated_hidden_output - - @property - def state_dim(self) -> int: - """ Dimension of model state """ - return sum(x.state_dim for x in self.recurrent_layers) - - def zero_state(self, batch_size): - """ Initial state of the network """ - return torch.zeros(1, batch_size, self.state_dim) - - def loss_value(self, x_data, y_true, y_pred): - """ Calculate a value of loss function """ - y_pred = y_pred.view(-1, y_pred.size(2)) - y_true = y_true.view(-1).to(torch.long) - return F.nll_loss(y_pred, y_true) - - -def create(input_block: ModuleFactory, rnn_type: str, hidden_layers: typing.List[int], - output_dim: int, dropout=0.0): - """ Vel factory function """ - def instantiate(**_): - return MultilayerRnnSequenceModel( - input_block.instantiate(), rnn_type=rnn_type, hidden_layers=hidden_layers, output_dim=output_dim, - dropout=dropout - ) - - return ModuleFactory.generic(instantiate) diff --git a/vel/module/input/flatten.py b/vel/module/input/flatten.py index 9462f689..7a1e5246 100644 --- a/vel/module/input/flatten.py +++ b/vel/module/input/flatten.py @@ -12,4 +12,3 @@ def __init__(self): def forward(self, input_data): return self.model(input_data) - diff --git a/vel/module/input/normalize_observations.py b/vel/module/input/normalize_observations.py index a1965a0e..a7dca4be 100644 --- a/vel/module/input/normalize_observations.py +++ b/vel/module/input/normalize_observations.py @@ -44,4 +44,3 @@ def forward(self, input_vector): self.running_var.copy_(new_var) return (input_vector - self.running_mean.unsqueeze(0)) / torch.sqrt(self.running_var.unsqueeze(0)) - diff --git a/vel/module/input/one_hot_encoding.py b/vel/module/input/one_hot_encoding.py index 6cc83b74..0bbc5f52 100644 --- a/vel/module/input/one_hot_encoding.py +++ b/vel/module/input/one_hot_encoding.py @@ -14,4 +14,3 @@ def __init__(self, alphabet_size: int): def forward(self, input_data): return self.layer(input_data) - diff --git a/vel/net/layer/mlp.py b/vel/net/layer/mlp.py index d2f57b49..551bf7f5 100644 --- a/vel/net/layer/mlp.py +++ b/vel/net/layer/mlp.py @@ -5,14 +5,14 @@ Under MIT license. """ import typing -import numpy as np +import numpy as np import torch.nn as nn import torch.nn.init as init import vel.util.network as net_util -from vel.api import SizeHints, SizeHint +from vel.api import SizeHints from vel.net.layer_base import LayerFactory, Layer diff --git a/vel/net/layer_base.py b/vel/net/layer_base.py index 1738e694..cdc90487 100644 --- a/vel/net/layer_base.py +++ b/vel/net/layer_base.py @@ -22,4 +22,3 @@ def name_base(self) -> str: def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: """ Create a given layer object """ raise NotImplementedError - diff --git a/vel/rl/command/rl_train_command.py b/vel/rl/command/rl_train_command.py index a879a0f0..b63807fb 100644 --- a/vel/rl/command/rl_train_command.py +++ b/vel/rl/command/rl_train_command.py @@ -2,7 +2,6 @@ from vel.api import ModelConfig, EpochInfo, TrainingInfo, BatchInfo, OptimizerFactory, Storage, Callback, VelOptimizer from vel.callback.time_tracker import TimeTracker -from vel.metric.samples_per_sec import SamplesPerSec from vel.rl.api import ReinforcerFactory, Reinforcer import vel.openai.baselines.logger as openai_logger diff --git a/vel/rl/layer/nature_cnn_rnn.py b/vel/rl/layer/nature_cnn_rnn.py deleted file mode 100644 index fd4864a2..00000000 --- a/vel/rl/layer/nature_cnn_rnn.py +++ /dev/null @@ -1,61 +0,0 @@ -from vel.api import LinearBackboneModel, ModuleFactory -from vel.rl.backbone.nature_cnn import NatureCnn -from vel.module.rnn_cell import RnnCell - -from vel.api import SizeHint, SizeHints -from vel.net.layer_base import Layer, LayerFactory - - -class NatureCnnRnnBackbone(LinearBackboneModel): - """ - Long-Short-Term Memory rnn cell together with DeepMind-style 'Nature' cnn preprocessing - """ - - def __init__(self, input_width: int, input_height: int, input_channels: int, rnn_type: str = 'lstm', - cnn_output_dim: int = 512, hidden_units: int = 128): - super().__init__() - - self.hidden_units = hidden_units - - self.nature_cnn = NatureCnn(input_width, input_height, input_channels, cnn_output_dim) - self.rnn_cell = RnnCell(input_size=self.nature_cnn.output_dim, hidden_size=self.hidden_units, rnn_type=rnn_type) - - def reset_weights(self): - """ Call proper initializers for the weights """ - self.nature_cnn.reset_weights() - self.rnn_cell.reset_weights() - - @property - def output_dim(self) -> int: - return self.rnn_cell.output_dim - - @property - def state_dim(self) -> int: - """ Initial state of the network """ - return self.rnn_cell.state_dim - - @property - def is_stateful(self) -> bool: - """ If the model has a state that needs to be fed between individual observations """ - return True - - def zero_state(self, batch_size): - """ Potential state for the model """ - return self.rnn_cell.zero_state(batch_size) - - def forward(self, input_image, state): - cnn_output = self.nature_cnn(input_image) - hidden_state, new_state = self.rnn_cell(cnn_output, state) - - return hidden_state, new_state - - -def create(input_width, input_height, input_channels=1, rnn_type='lstm', cnn_output_dim=512, hidden_units=128): - """ Vel factory function """ - def instantiate(**_): - return NatureCnnRnnBackbone( - input_width=input_width, input_height=input_height, input_channels=input_channels, - rnn_type=rnn_type, cnn_output_dim=cnn_output_dim, hidden_units=hidden_units - ) - - return ModuleFactory.generic(instantiate) diff --git a/vel/rl/module/actor_critic_policy.py b/vel/rl/module/actor_critic_policy.py index bbfc45de..cb252447 100644 --- a/vel/rl/module/actor_critic_policy.py +++ b/vel/rl/module/actor_critic_policy.py @@ -1,7 +1,6 @@ import itertools as it import gym -import torch from vel.api import VModule, BackboneModule diff --git a/vel/rl/module/noise/eps_greedy.py b/vel/rl/module/noise/eps_greedy.py index 328c140e..e48ace91 100644 --- a/vel/rl/module/noise/eps_greedy.py +++ b/vel/rl/module/noise/eps_greedy.py @@ -5,7 +5,6 @@ from vel.api import Schedule, VModule from vel.internal.generic_factory import GenericFactory -from vel.function.constant import ConstantSchedule class EpsGreedy(VModule): diff --git a/vel/rl/module/stochastic_policy.py b/vel/rl/module/stochastic_policy.py index d11ac0a6..ec73101d 100644 --- a/vel/rl/module/stochastic_policy.py +++ b/vel/rl/module/stochastic_policy.py @@ -39,4 +39,3 @@ def forward(self, observation): """ Calculate model outputs """ action_hidden, value_hidden = self.net(observation) return self.action_head(action_hidden), self.value_head(value_hidden) - diff --git a/vel/rl/module/stochastic_rnn_policy.py b/vel/rl/module/stochastic_rnn_policy.py index 94410c63..aa04f927 100644 --- a/vel/rl/module/stochastic_rnn_policy.py +++ b/vel/rl/module/stochastic_rnn_policy.py @@ -1,10 +1,8 @@ import gym from vel.api import VModule, BackboneModule - from vel.rl.module.head.stochastic_action_head import make_stockastic_action_head from vel.rl.module.head.value_head import ValueHead -from vel.util.tensor_util import to_device class StochasticRnnPolicy(VModule): @@ -73,4 +71,3 @@ def reset_state(self, state, dones): return out_state else: return state - diff --git a/vel/rl/policy/acer.py b/vel/rl/policy/acer.py index 59003e45..68792faf 100644 --- a/vel/rl/policy/acer.py +++ b/vel/rl/policy/acer.py @@ -291,7 +291,7 @@ def instantiate(self, **extra_args): ) -def create(net, trust_region: bool , entropy_coefficient: float, q_coefficient: float, discount_factor: float, +def create(net, trust_region: bool, entropy_coefficient: float, q_coefficient: float, discount_factor: float, rho_cap: float = 10.0, retrace_rho_cap: float = 1.0, average_model_alpha: float = 0.99, trust_region_delta: float = 1.0): """ Vel factory function """ diff --git a/vel/rl/policy/ppo.py b/vel/rl/policy/ppo.py index 313407eb..915a1bdf 100644 --- a/vel/rl/policy/ppo.py +++ b/vel/rl/policy/ppo.py @@ -197,4 +197,3 @@ def create(net: ModuleFactory, entropy_coefficient, value_coefficient, cliprange normalize_advantage=normalize_advantage, gae_lambda=gae_lambda ) - diff --git a/vel/rl/policy/ppo_rnn.py b/vel/rl/policy/ppo_rnn.py index 786d4825..c09e08cc 100644 --- a/vel/rl/policy/ppo_rnn.py +++ b/vel/rl/policy/ppo_rnn.py @@ -237,4 +237,3 @@ def create(net: ModuleFactory, normalize_advantage=normalize_advantage, gae_lambda=gae_lambda ) - diff --git a/vel/rl/policy/trpo.py b/vel/rl/policy/trpo.py index 8c6a554f..58c44ca6 100644 --- a/vel/rl/policy/trpo.py +++ b/vel/rl/policy/trpo.py @@ -256,8 +256,8 @@ def optimize(self, batch_info: BatchInfo, rollout: Rollout) -> dict: 'explained_variance': explained_variance(returns, rollout.batch_tensor('values')) } - def line_search(self, normalized_observations, rollout, original_policy_loss, original_policy_params, original_parameter_vec, - full_step, expected_improvement_full): + def line_search(self, normalized_observations, rollout, original_policy_loss, original_policy_params, + original_parameter_vec, full_step, expected_improvement_full): """ Find the right stepsize to make sure policy improves """ current_parameter_vec = original_parameter_vec.clone() @@ -405,7 +405,8 @@ def instantiate(self, **extra_args): def create(policy_net: ModuleFactory, value_net: ModuleFactory, max_kl, cg_iters, line_search_iters, cg_damping, entropy_coefficient, vf_iters, - discount_factor, gae_lambda, improvement_acceptance_ratio, input_net: typing.Optional[ModuleFactory]=None): + discount_factor, gae_lambda, improvement_acceptance_ratio, + input_net: typing.Optional[ModuleFactory] = None): """ Vel factory function """ return TRPOFactory( @@ -422,4 +423,3 @@ def create(policy_net: ModuleFactory, value_net: ModuleFactory, gae_lambda=gae_lambda, improvement_acceptance_ratio=improvement_acceptance_ratio, ) - diff --git a/vel/util/dataloader.py b/vel/util/dataloader.py index b6b03fe5..0bdbada2 100644 --- a/vel/util/dataloader.py +++ b/vel/util/dataloader.py @@ -16,6 +16,3 @@ def map_values(self, item): return { name: getattr(item, argument) for name, argument in self.field_mapping.items() } - - - diff --git a/vel/util/module_util.py b/vel/util/module_util.py index ae415425..e2dbef9b 100644 --- a/vel/util/module_util.py +++ b/vel/util/module_util.py @@ -95,5 +95,3 @@ def optimizer_parameter_helper(parameters, parameter_dict): out_dict[parameter] = value[0] return out_dict - - diff --git a/vel/util/situational.py b/vel/util/situational.py index 4a4cb402..4ea7140c 100644 --- a/vel/util/situational.py +++ b/vel/util/situational.py @@ -41,4 +41,3 @@ def gym_space_to_size_hint(space: gym.Space) -> SizeHints: def size_hint_from_shape(shape: typing.Tuple[int]) -> SizeHints: """ Convert tensor shape (without batch dimension) into a size hint """ return SizeHints(SizeHint(*([None] + list(shape)))) - From 7fb9375ca85cef761be90058dd3640014137e114 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 10 Oct 2019 17:08:12 -0700 Subject: [PATCH 127/162] Update to PyTorch 1.3 --- README.md | 2 +- requirements.in | 2 +- requirements.txt | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 57237231..22075ae9 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ pip install -e . ``` from the repository root directory. -This project requires Python at least 3.6 and PyTorch 1.2. +This project requires Python at least 3.6 and PyTorch 1.3. If you want to run YAML config examples, you'll also need a **project configuration file** `.velproject.yaml`. An example is included in this repository. diff --git a/requirements.in b/requirements.in index 70eebbb8..416b5ede 100644 --- a/requirements.in +++ b/requirements.in @@ -12,6 +12,6 @@ pyyaml scikit-learn torchtext torchvision -torch~=1.2 +torch~=1.3 tqdm visdom diff --git a/requirements.txt b/requirements.txt index 28dbf680..5776b12b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,8 +13,8 @@ chardet==3.0.4 # via requests cloudpickle==1.2.2 cycler==0.10.0 # via matplotlib dnspython==1.16.0 -future==0.17.1 # via pyglet -gym[atari,box2d,classic_control]==0.14.0 +future==0.18.0 # via pyglet +gym[atari,box2d,classic_control]==0.15.3 idna==2.8 # via requests importlib-metadata==0.23 # via pluggy, pytest joblib==0.14.0 # via scikit-learn @@ -42,10 +42,10 @@ requests==2.22.0 # via torchtext, visdom scikit-learn==0.21.3 scipy==1.3.1 # via gym, scikit-learn, visdom six==1.12.0 # via atari-py, cycler, gym, packaging, python-dateutil, torchtext, torchvision, visdom, websocket-client -torch==1.2.0 +torch==1.3.0 torchfile==0.1.0 # via visdom torchtext==0.4.0 -torchvision==0.4.0 +torchvision==0.4.1 tornado==6.0.3 # via visdom tqdm==4.36.1 urllib3==1.25.6 # via requests From 9f36e733ded91ec911ba0e3af8d2d1b876633f90 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Mon, 14 Oct 2019 11:04:41 -0700 Subject: [PATCH 128/162] Another refactoring of remaining examples. --- .../cats_vs_dogs_resnet34.yaml | 5 +- .../classification/imdb_sentiment_gru.yaml | 58 ++++-- examples-configs/rl/mujoco/mujoco_a2c.yaml | 2 +- examples-configs/rl/mujoco/mujoco_ddpg.yaml | 14 +- examples-configs/rl/mujoco/mujoco_ppo.yaml | 2 +- examples-configs/rl/mujoco/mujoco_trpo.yaml | 3 +- vel/api/optimizer.py | 88 +++++++-- vel/api/vmodule.py | 4 + vel/data/bucket_loader.py | 5 + vel/data/source/nlp/imdb.py | 18 +- vel/model/imagenet/resnet34.py | 27 ++- vel/model/nlp/language_model.py | 5 +- vel/model/nlp/sequence_classification.py | 91 ++++++++++ .../multilayer_rnn_sequence_classification.py | 167 ------------------ ...observations.py => normalize_expanding.py} | 4 +- vel/module/rnn_layer.py | 159 ++++++++--------- vel/net/layer/arch/parallel.py | 42 +++-- vel/net/layer/dropout.py | 21 +-- vel/net/layer/input/image_to_tensor.py | 18 +- vel/net/layer/input/normalize.py | 49 ----- vel/net/layer/input/normalize_expanding.py | 50 ++++++ vel/net/layer/mlp.py | 35 ++-- vel/net/layer/nlp/alphabet_embedding.py | 21 ++- vel/net/layer/nlp/alphabet_one_hot_encode.py | 22 ++- vel/net/layer/nlp/pretrained_embedding.py | 61 +++++++ vel/net/layer/nlp/select_final_features.py | 65 +++++++ vel/net/layer/rnn.py | 41 +++-- vel/net/layer/util/concat.py | 17 +- vel/net/layer/util/repeat.py | 16 +- vel/net/layer_base.py | 97 +++++++++- vel/net/modular.py | 43 +++-- vel/net/sequence.py | 76 ++++++++ vel/optimizer/adadelta.py | 26 +-- vel/optimizer/adam.py | 28 +-- vel/optimizer/radam.py | 28 +-- vel/optimizer/ranger.py | 69 +------- vel/optimizer/rmsprop.py | 28 +-- vel/optimizer/rmsprop_tf.py | 28 +-- vel/optimizer/sgd.py | 33 +--- vel/rl/layer/double_nature_cnn.py | 15 +- vel/rl/layer/double_noisy_nature_cnn.py | 16 +- vel/{model/rnn => rl/layer/input}/__init__.py | 0 vel/rl/layer/nature_cnn.py | 15 +- vel/rl/layer/nature_cnn_small.py | 15 +- vel/rl/layer/rnn_cell.py | 16 +- vel/rl/module/actor_critic_policy.py | 10 ++ vel/rl/policy/ddpg.py | 3 +- vel/rl/vecenv/dummy.py | 4 +- vel/train/phase/cycle.py | 7 + vel/train/phase/freeze.py | 8 +- vel/util/module_util.py | 5 + 51 files changed, 984 insertions(+), 696 deletions(-) create mode 100644 vel/model/nlp/sequence_classification.py delete mode 100644 vel/model/rnn/multilayer_rnn_sequence_classification.py rename vel/module/input/{normalize_observations.py => normalize_expanding.py} (93%) delete mode 100644 vel/net/layer/input/normalize.py create mode 100644 vel/net/layer/input/normalize_expanding.py create mode 100644 vel/net/layer/nlp/pretrained_embedding.py create mode 100644 vel/net/layer/nlp/select_final_features.py create mode 100644 vel/net/sequence.py rename vel/{model/rnn => rl/layer/input}/__init__.py (100%) diff --git a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml index a53623fb..da2f8b8f 100644 --- a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml +++ b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml @@ -67,6 +67,7 @@ commands: name: vel.command.phase_train_command phases: - name: vel.train.phase.freeze + groups: ['top', 'mid'] - name: vel.train.phase.cycle init_lr: 0.001 init_iter: 20 @@ -80,8 +81,8 @@ commands: init_lr: 0.001 init_iter: 20 - max_lr: [1.0e-4, 1.0e-3, 1.0e-2] - min_lr: [0.0, 0.0, 0.0] + max_lr: {"top": 1.0e-4, "mid": 1.0e-3, "bottom": 1.0e-2} + min_lr: {"top": 0.0, "mid": 0.0, "bottom": 0.0} interpolate: 'cosine' cycles: 3 cycle_len: 1 diff --git a/examples-configs/nlp/classification/imdb_sentiment_gru.yaml b/examples-configs/nlp/classification/imdb_sentiment_gru.yaml index 3e85dac8..cb3c9e2a 100644 --- a/examples-configs/nlp/classification/imdb_sentiment_gru.yaml +++ b/examples-configs/nlp/classification/imdb_sentiment_gru.yaml @@ -3,7 +3,7 @@ name: 'imdb_sentiment_gru' source: name: vel.data.source.nlp.imdb - vectors: "glove.6B.100d" # precomputed 100-dimensional embeddings + vocab_size: 25_000 loader: @@ -11,37 +11,57 @@ loader: batch_size: 32 - model: - name: vel.model.rnn.multilayer_rnn_sequence_classification + name: vel.model.nlp.sequence_classification + output_dim: 2 # Positive or negative sentiment - input_block: - name: vel.module.input.embedding - alphabet_size: 25_002 # Size of the alphabet + net: + name: vel.net.modular + layers: + - name: vel.net.layer.nlp.pretrained_embedding + group: "embedding" + vectors: "glove.6B.100d" # precomputed 100-dimensional embeddings - output_dim: 100 # Embedding dimension + - name: vel.net.layer.rnn + group: "rnn" + hidden_size: 256 + rnn_type: 'gru' + bidirectional: true - pretrained: True -# frozen: True + - name: vel.net.layer.dropout + p: 0.3 - rnn_type: 'gru' - rnn_layers: [256, 128] - rnn_dropout: 0.5 - bidirectional: True + - name: vel.net.layer.rnn + group: "rnn" + hidden_size: 128 + rnn_type: 'gru' + bidirectional: true - linear_layers: [64] - linear_dropout: 0.2 + # For sequence classification we only want outputs for the last hidden state + - name: vel.net.layer.nlp.select_final_features + bidirectional: true - output_dim: 2 # Positive or negative sentiment + - name: vel.net.layer.dropout + p: 0.3 + + - name: vel.net.layer.mlp + hidden_layers: [64] + activation: 'relu' + + - name: vel.net.layer.dropout + p: 0.2 optimizer: name: vel.optimizer.adam - lr: [1.0e-4, 1.0e-3, 1.0e-2, 1.0e-2] - weight_decay: [0.0, 0.0001, 0.001, 0.001] + lr: 1.0e-2 + weight_decay: 0.001 epsilon: 1.0e-5 betas: [0.7, 0.99] - layer_groups: true + + parameter_groups: + embedding: {"lr": 1.0e-4, "weight_decay": 0.0} + rnn: {"lr": 1.0e-3, "weight_decay": 0.0001} commands: diff --git a/examples-configs/rl/mujoco/mujoco_a2c.yaml b/examples-configs/rl/mujoco/mujoco_a2c.yaml index 504871e1..9f89431a 100644 --- a/examples-configs/rl/mujoco/mujoco_a2c.yaml +++ b/examples-configs/rl/mujoco/mujoco_a2c.yaml @@ -22,7 +22,7 @@ model: net: name: vel.net.modular layers: - - name: vel.net.layer.input.normalize + - name: vel.net.layer.input.normalize_expanding - name: vel.net.layer.mlp hidden_layers: [64, 64] activation: 'tanh' diff --git a/examples-configs/rl/mujoco/mujoco_ddpg.yaml b/examples-configs/rl/mujoco/mujoco_ddpg.yaml index d82f0baf..f75ef7e3 100644 --- a/examples-configs/rl/mujoco/mujoco_ddpg.yaml +++ b/examples-configs/rl/mujoco/mujoco_ddpg.yaml @@ -1,5 +1,6 @@ name: 'mujoco_ddpg' + env: name: vel.rl.env.mujoco game: !param game = 'Reacher-v2' @@ -20,10 +21,11 @@ model: input_net: name: vel.net.modular layers: - - name: vel.net.layer.input.normalize + - name: vel.net.layer.input.normalize_expanding actor_net: name: vel.net.modular + group: 'actor' layers: - name: vel.net.layer.mlp hidden_layers: [64, 64] @@ -31,6 +33,7 @@ model: critic_net: name: vel.net.modular + group: 'critic' layers: - name: vel.net.layer.util.concat # Concatenate observation and action - name: vel.net.layer.mlp @@ -63,9 +66,14 @@ optimizer: name: vel.optimizer.adam # OpenAI has two different optimizers optimizing each network separately. # As far as I know it should be equivalent to optimizing two separate networks together with a sum of loss functions - lr: [1.0e-4, 1.0e-3] - weight_decay: [0.0, 0.0] + lr: 1.0e-3 + weight_decay: 0.0 epsilon: 1.0e-4 + parameter_groups: + actor: + lr: 1.0e-4 + critic: + lr: 1.0e-3 commands: diff --git a/examples-configs/rl/mujoco/mujoco_ppo.yaml b/examples-configs/rl/mujoco/mujoco_ppo.yaml index 63de365a..975eabf6 100644 --- a/examples-configs/rl/mujoco/mujoco_ppo.yaml +++ b/examples-configs/rl/mujoco/mujoco_ppo.yaml @@ -25,7 +25,7 @@ model: net: name: vel.net.modular layers: - - name: vel.net.layer.input.normalize + - name: vel.net.layer.input.normalize_expanding - name: vel.net.layer.util.repeat times: 2 # Need to repeat output twice, to consume by the 'parallel' layers - name: vel.net.layer.arch.parallel diff --git a/examples-configs/rl/mujoco/mujoco_trpo.yaml b/examples-configs/rl/mujoco/mujoco_trpo.yaml index 743877f1..f88fc5ba 100644 --- a/examples-configs/rl/mujoco/mujoco_trpo.yaml +++ b/examples-configs/rl/mujoco/mujoco_trpo.yaml @@ -1,5 +1,6 @@ name: 'mujoco_trpo' + env: name: vel.rl.env.mujoco game: !param game = 'Reacher-v2' @@ -27,7 +28,7 @@ model: input_net: name: vel.net.modular layers: - - name: vel.net.layer.input.normalize + - name: vel.net.layer.input.normalize_expanding policy_net: name: vel.net.modular diff --git a/vel/api/optimizer.py b/vel/api/optimizer.py index e9f97f83..e5d8a50c 100644 --- a/vel/api/optimizer.py +++ b/vel/api/optimizer.py @@ -49,22 +49,39 @@ def create_scheduler(self, scheduler_factory: SchedulerFactory, last_epoch: int class VelOptimizerProxy(VelOptimizer): """ Proxy PyTorch optimizer into a Vel optimizer """ - def __init__(self, optimizer: Optimizer, max_grad_norm: typing.Optional[float] = None): + def __init__(self, optimizer: Optimizer, group_names: [str], max_grad_norm: typing.Optional[float] = None): self.optimizer = optimizer + self.group_names = group_names self.max_grad_norm = max_grad_norm + if 'default' in self.group_names: + self.main_idx = self.group_names.index('default') + else: + self.main_idx = len(self.group_names) - 1 + + assert len(self.optimizer.param_groups) == len(self.group_names), \ + "There must be equal number of parameter groups and group names" + + self.initial_lrs = [x['lr'] for x in self.optimizer.param_groups] + def get_lr(self) -> float: """ Return current learning rate of the optimizer """ - return self.optimizer.param_groups[-1]['lr'] + return self.optimizer.param_groups[self.main_idx]['lr'] def set_lr(self, lr: float): """ Set current learning rate of the optimizer """ if isinstance(lr, list): for group_lr, param_group in zip(lr, self.optimizer.param_groups): param_group['lr'] = group_lr + elif isinstance(lr, dict): + for idx, name in enumerate(self.group_names): + self.optimizer.param_groups[idx]['lr'] = lr[name] else: - for param_group in self.optimizer.param_groups: - param_group['lr'] = lr + canonical_lr = self.initial_lrs[0] + + for idx, param_group in enumerate(self.optimizer.param_groups): + opt_lr = self.initial_lrs[idx] / canonical_lr * lr + param_group['lr'] = opt_lr def state_dict(self) -> dict: return self.optimizer.state_dict() @@ -112,7 +129,10 @@ def __init__(self, optimizers: typing.Dict[str, VelOptimizer], canonical_name: t self.optimizers = optimizers # Canonical, chosen optimizer - self.canonical_name = list(optimizers.keys())[0] + if canonical_name is None: + self.canonical_name = list(optimizers.keys())[0] + else: + self.canonical_name = canonical_name self.initial_lrs = { name: optimizer.get_lr() @@ -126,11 +146,18 @@ def get_lr(self) -> float: return self.optimizers[self.canonical_name].get_lr() def set_lr(self, lr: float): - canonical_lr = self.initial_lrs[self.canonical_name] + if isinstance(lr, list): + # TODO: implement + raise NotImplementedError + elif isinstance(lr, dict): + # TODO: implement + raise NotImplementedError + else: + canonical_lr = self.initial_lrs[self.canonical_name] - for name, optimizer in self.optimizers.items(): - opt_lr = self.initial_lrs[name] / canonical_lr * lr - optimizer.set_lr(opt_lr) + for name, optimizer in self.optimizers.items(): + opt_lr = self.initial_lrs[name] / canonical_lr * lr + optimizer.set_lr(opt_lr) def state_dict(self) -> dict: output = {} @@ -173,11 +200,48 @@ def metrics(self) -> list: class OptimizerFactory: """ Base class for optimizer factories """ + def __init__(self): + self.parameter_groups = None - def instantiate(self, parameters) -> VelOptimizer: - raise NotImplementedError + def with_parameter_groups(self, parameter_groups=None): + """ Set `parameter_groups` for this factory """ + self.parameter_groups = parameter_groups + return self - def instantiate_parameter_groups(self, parameters) -> VelOptimizer: + def preprocess(self, parameters): + """ Preprocess given parameters input into proper optimizer parameter groups, with their names """ + parameters = list(parameters) + + # Make sure parameters have right format + if parameters: + if not isinstance(parameters[0], collections.Sequence) or not isinstance(parameters[0][0], str): + parameters = [("default", parameters)] + + groups = collections.defaultdict(list) + + for name, group in parameters: + group = [x for x in group if x.requires_grad] + if group: # Must have at least 1 element + groups[name].extend(group) + + group_names = [] + sorted_groups = [] + + for name in sorted(groups.keys()): + parameter_group = { + 'params': groups[name] + } + + if self.parameter_groups and name in self.parameter_groups: + parameter_group.update(self.parameter_groups[name]) + + sorted_groups.append(parameter_group) + group_names.append(name) + + return sorted_groups, group_names + + def instantiate(self, parameters) -> VelOptimizer: + """ Instantiate VelOptimizer for iterable of parameters or iterable of (parameter, group) """ raise NotImplementedError def instantiate_multi(self, parameter_dict: dict) -> VelMultiOptimizer: diff --git a/vel/api/vmodule.py b/vel/api/vmodule.py index 1374258e..4c6d9c50 100644 --- a/vel/api/vmodule.py +++ b/vel/api/vmodule.py @@ -19,6 +19,10 @@ def zero_state(self, batch_size): """ Potential state for the model """ return None + def grouped_parameters(self): + """ Return iterable of parameters (group, parameters) """ + return [("default", self.parameters())] + class BackboneModule(VModule): """ Network, whose output feeds into other models. Needs to provide size hints. """ diff --git a/vel/data/bucket_loader.py b/vel/data/bucket_loader.py index 46f70a4b..740d83c1 100644 --- a/vel/data/bucket_loader.py +++ b/vel/data/bucket_loader.py @@ -58,6 +58,11 @@ def size(self): """ Get a dict of sizes of each loader """ return self._loader_sizes + @property + def alphabet_size(self): + """ Size of the text alphabet """ + return self.source.metadata.get('alphabet_size', 0) + def create(model_config: ModelConfig, source: LanguageSource, batch_size: int): """ Vel factory function """ diff --git a/vel/data/source/nlp/imdb.py b/vel/data/source/nlp/imdb.py index 71168756..badf59c7 100644 --- a/vel/data/source/nlp/imdb.py +++ b/vel/data/source/nlp/imdb.py @@ -45,7 +45,7 @@ def __init__(self, path, text_field, label_field, **kwargs): data.Dataset.__init__(self, examples, fields, **kwargs) -def create(model_config, data_dir='imdb', vectors=None): +def create(model_config, vocab_size: int, data_dir='imdb', vectors=None): """ Create an IMDB dataset """ path = model_config.data_dir(data_dir) @@ -58,7 +58,7 @@ def create(model_config, data_dir='imdb', vectors=None): label_field=label_field ) - text_field.build_vocab(train_source, max_size=25_000, vectors=vectors) + text_field.build_vocab(train_source, max_size=vocab_size, vectors=vectors) label_field.build_vocab(train_source) return LanguageSource( @@ -68,16 +68,8 @@ def create(model_config, data_dir='imdb', vectors=None): mapping={ 'x': 'text', 'y': 'label' + }, + metadata={ + 'alphabet_size': vocab_size+2 } ) - - # train_iterator, test_iterator = data.BucketIterator.splits( - # (train_source, test_source), - # batch_size=batch_size, - # device=model_config.torch_device(), - # shuffle=True - # ) - - # return SupervisedTextData( - # train_source, test_source, train_iterator, test_iterator, text_field, label_field - # ) diff --git a/vel/model/imagenet/resnet34.py b/vel/model/imagenet/resnet34.py index 3537a226..6139a44d 100644 --- a/vel/model/imagenet/resnet34.py +++ b/vel/model/imagenet/resnet34.py @@ -63,14 +63,16 @@ def __init__(self, fc_layers=None, dropout=None, pretrained=True): self.model = final_model - def freeze(self, number=None): + def freeze(self, groups=None): """ Freeze given number of layers in the model """ - if number is None: - number = self.head_layers + layer_groups = dict(self.layer_groups()) - for idx, child in enumerate(self.model.children()): - if idx < number: - mu.freeze_layer(child) + if groups is None: + groups = layer_groups.keys() + + for group in groups: + for module in layer_groups[group]: + mu.freeze_layer(module) def unfreeze(self): """ Unfreeze model layers """ @@ -82,11 +84,18 @@ def layer_groups(self): g1 = list(self.model[:self.group_cut_layers[0]]) g2 = list(self.model[self.group_cut_layers[0]:self.group_cut_layers[1]]) g3 = list(self.model[self.group_cut_layers[1]:]) - return [g1, g2, g3] + + return [ + ('top', g1), + ('mid', g2), + ('bottom', g3) + ] + + def parameter_groups(self): + return [(name, mu.module_list_to_param_list(m)) for name, m in self.layer_groups()] def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: - parameters = mu.to_parameter_groups(self.layer_groups()) - return optimizer_factory.instantiate_parameter_groups(parameters) + return optimizer_factory.instantiate(self.parameter_groups()) def forward(self, x): """ Calculate model value """ diff --git a/vel/model/nlp/language_model.py b/vel/model/nlp/language_model.py index e27835b9..def34db5 100644 --- a/vel/model/nlp/language_model.py +++ b/vel/model/nlp/language_model.py @@ -11,7 +11,6 @@ class LanguageModel(LossFunctionModel): def __init__(self, alphabet_size: int, net: BackboneModule): super().__init__() - self.net = net self.alphabet_size = alphabet_size self.output_dim = self.alphabet_size + 1 @@ -43,11 +42,11 @@ def forward(self, input_data: torch.Tensor, state=None) -> torch.Tensor: """ if self.net.is_stateful: output, new_state = self.net(input_data, state=state) + return F.log_softmax(self.output_layer(output), dim=-1), new_state else: output = self.net(input_data) - new_state = state + return F.log_softmax(self.output_layer(output), dim=-1) - return F.log_softmax(self.output_layer(output), dim=-1), new_state def loss_value(self, x_data, y_true, y_pred) -> torch.tensor: """ Calculate a value of loss function """ diff --git a/vel/model/nlp/sequence_classification.py b/vel/model/nlp/sequence_classification.py new file mode 100644 index 00000000..1aa6bd3e --- /dev/null +++ b/vel/model/nlp/sequence_classification.py @@ -0,0 +1,91 @@ +import itertools as it + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from vel.api import ( + LossFunctionModel, ModuleFactory, VModule, BackboneModule, SizeHints, SizeHint, OptimizerFactory, + VelOptimizer +) +from vel.metric.accuracy import Accuracy +from vel.metric.loss_metric import Loss + + +class SequenceClassification(LossFunctionModel): + """ NLP (text) sequence classification """ + + def __init__(self, net: BackboneModule, output_size: int): + super().__init__() + + self.net = net + self.output_layer = nn.Linear( + in_features=self.net.size_hints().assert_single().last(), + out_features=output_size + ) + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return self.net.is_stateful + + def zero_state(self, batch_size): + """ Potential state for the model """ + return self.net.zero_state(batch_size) + + def forward(self, input_data: torch.Tensor, state=None) -> torch.Tensor: + r"""Defines the computation performed at every call. + + Should be overridden by all subclasses. + + .. note:: + Although the recipe for forward pass needs to be defined within + this function, one should call the :class:`Module` instance afterwards + instead of this since the former takes care of running the + registered hooks while the latter silently ignores them. + """ + if self.net.is_stateful: + output, new_state = self.net(input_data, state=state) + output = F.log_softmax(self.output_layer(output), dim=-1) + return output, new_state + else: + output = self.net(input_data) + output = F.log_softmax(self.output_layer(output), dim=-1) + return output + + def loss_value(self, x_data, y_true, y_pred) -> torch.tensor: + """ Calculate a value of loss function """ + return F.nll_loss(y_pred, y_true) + + def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: + grouped = self.net.grouped_parameters() + parameters = it.chain(grouped, [("output", self.output_layer.parameters())]) + return optimizer_factory.instantiate(parameters) + + def metrics(self) -> list: + """ Set of metrics for this model """ + return [Loss(), Accuracy()] + + +class SequenceClassificationFactory(ModuleFactory): + def __init__(self, net_factory: ModuleFactory, alphabet_size: int, output_dim: int): + self.net_factory = net_factory + self.output_dim = output_dim + self.alphabet_size = alphabet_size + + def instantiate(self, **extra_args) -> VModule: + size_hint = SizeHints(SizeHint(None, None)) + net = self.net_factory.instantiate(alphabet_size=self.alphabet_size, size_hint=size_hint) + + return SequenceClassification( + net=net, output_size=self.output_dim + ) + + +def create(loader, net: ModuleFactory, output_dim: int): + """ Vel factory function """ + return SequenceClassificationFactory( + net_factory=net, + alphabet_size=loader.alphabet_size, + output_dim=output_dim + ) diff --git a/vel/model/rnn/multilayer_rnn_sequence_classification.py b/vel/model/rnn/multilayer_rnn_sequence_classification.py deleted file mode 100644 index 82953b5b..00000000 --- a/vel/model/rnn/multilayer_rnn_sequence_classification.py +++ /dev/null @@ -1,167 +0,0 @@ -import typing - -import torch -import torch.nn.functional as F -import torch.nn as nn - -import vel.util.module_util as mu - -from vel.api import LossFunctionModel, ModuleFactory, LinearBackboneModel, OptimizerFactory, VelOptimizer -from vel.metric.accuracy import Accuracy -from vel.metric.loss_metric import Loss -from vel.module.rnn_layer import RnnLayer - - -class MultilayerRnnSequenceClassification(LossFunctionModel): - """ Multilayer RNN network for sequence modeling (n:1) """ - - def __init__(self, input_block: LinearBackboneModel, rnn_type: str, output_dim: int, - rnn_layers: typing.List[int], rnn_dropout: float = 0.0, bidirectional: bool = False, - linear_layers: typing.List[int] = None, linear_dropout: float = 0.0): - super().__init__() - - self.output_dim = output_dim - - self.rnn_layers_sizes = rnn_layers - self.rnn_dropout = rnn_dropout - self.linear_layers_sizes = linear_layers - self.linear_dropout = linear_dropout - - self.bidirectional = bidirectional - self.input_block = input_block - - current_dim = self.input_block.output_dim - - self.rnn_layers = [] - self.rnn_dropout_layers = [] - - bidirectional_multiplier = 1 - - for idx, current_layer in enumerate(rnn_layers, 1): - rnn = RnnLayer( - input_size=current_dim * bidirectional_multiplier, - hidden_size=current_layer, - rnn_type=rnn_type, - bidirectional=bidirectional, - ) - - self.add_module('{}{:02}'.format(rnn_type, idx), rnn) - self.rnn_layers.append(rnn) - - if self.rnn_dropout > 0.0: - dropout_layer = nn.Dropout(p=self.rnn_dropout) - - self.add_module('rnn_dropout{:02}'.format(idx), dropout_layer) - self.rnn_dropout_layers.append(dropout_layer) - - current_dim = current_layer - - if self.bidirectional: - bidirectional_multiplier = 2 - else: - bidirectional_multiplier = 1 - - self.linear_layers = [] - self.linear_dropout_layers = [] - - for idx, current_layer in enumerate(linear_layers, 1): - linear_layer = nn.Linear(current_dim * bidirectional_multiplier, current_layer) - - self.add_module('linear{:02}'.format(idx), linear_layer) - self.linear_layers.append(linear_layer) - - if self.linear_dropout > 0.0: - dropout_layer = nn.Dropout(p=self.linear_dropout) - - self.add_module('linear_dropout{:02}'.format(idx), dropout_layer) - self.linear_dropout_layers.append(dropout_layer) - - bidirectional_multiplier = 1 - current_dim = current_layer - - if self.bidirectional: - self.output_layer = nn.Linear(bidirectional_multiplier * current_dim, output_dim) - else: - self.output_layer = nn.Linear(current_dim, output_dim) - - self.output_activation = nn.LogSoftmax(dim=1) - - def reset_weights(self): - self.input_block.reset_weights() - - for layer in self.linear_layers: - nn.init.kaiming_normal_(layer.weight, nonlinearity='relu') - nn.init.zeros_(layer.bias) - - nn.init.kaiming_normal_(self.output_layer.weight, nonlinearity='relu') - nn.init.zeros_(self.output_layer.bias) - - def forward(self, sequence): - """ Forward propagate batch of sequences through the network, without accounting for the state """ - data = self.input_block(sequence) - - for idx in range(len(self.rnn_layers)): - data, _ = self.rnn_layers[idx](data) - - if self.rnn_dropout_layers: - data = self.rnn_dropout_layers[idx](data) - - # We are interested only in the last element of the sequence - if self.bidirectional: - last_hidden_size = self.rnn_layers_sizes[-1] - data = torch.cat([data[:, -1, :last_hidden_size], data[:, 0, last_hidden_size:]], dim=1) - else: - data = data[:, -1] - - for idx in range(len(self.linear_layers_sizes)): - data = F.relu(self.linear_layers[idx](data)) - - if self.linear_dropout_layers: - data = self.linear_dropout_layers[idx](data) - - data = self.output_layer(data) - - return self.output_activation(data) - - def layer_groups(self): - return [ - self.input_block, - self.rnn_layers, - self.linear_layers, - self.output_layer - ] - - def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: - """ Create optimizer for the purpose of optimizing this model """ - parameters = mu.to_parameter_groups(self.layer_groups()) - return optimizer_factory.instantiate_parameter_groups(parameters) - - @property - def state_dim(self) -> int: - """ Dimension of model state """ - return sum(x.state_dim for x in self.gru_layers) - - def loss_value(self, x_data, y_true, y_pred): - """ Calculate a value of loss function """ - return F.nll_loss(y_pred, y_true) - - def metrics(self) -> list: - """ Set of metrics for this model """ - return [Loss(), Accuracy()] - - -def create(input_block: ModuleFactory, rnn_type: str, output_dim: int, - rnn_layers: typing.List[int], rnn_dropout: float = 0.0, bidirectional: bool = False, - linear_layers: typing.List[int] = None, linear_dropout: float = 0.0): - """ Vel factory function """ - if linear_layers is None: - linear_layers = [] - - def instantiate(**_): - return MultilayerRnnSequenceClassification( - input_block=input_block.instantiate(), rnn_type=rnn_type, output_dim=output_dim, - rnn_layers=rnn_layers, rnn_dropout=rnn_dropout, bidirectional=bidirectional, - linear_layers=linear_layers, linear_dropout=linear_dropout - ) - - return ModuleFactory.generic(instantiate) diff --git a/vel/module/input/normalize_observations.py b/vel/module/input/normalize_expanding.py similarity index 93% rename from vel/module/input/normalize_observations.py rename to vel/module/input/normalize_expanding.py index a7dca4be..b649594f 100644 --- a/vel/module/input/normalize_observations.py +++ b/vel/module/input/normalize_expanding.py @@ -3,8 +3,8 @@ from vel.api import VModule -class NormalizeObservations(VModule): - """ Normalize a vector of observations """ +class NormalizeExpanding(VModule): + """ Normalize a vector of observations - across the batch dim """ def __init__(self, input_shape, epsilon=1e-6): super().__init__() diff --git a/vel/module/rnn_layer.py b/vel/module/rnn_layer.py index 0c5b2c85..1e7a8fa4 100644 --- a/vel/module/rnn_layer.py +++ b/vel/module/rnn_layer.py @@ -1,79 +1,80 @@ -import torch -import torch.nn as nn -import torch.nn.init as init - - -from vel.api import LinearBackboneModel - - -class RnnLayer(LinearBackboneModel): - """ Generalization of RNN layer (Simple RNN, LSTM or GRU) """ - - def __init__(self, input_size, hidden_size, rnn_type, bias=True, bidirectional=False, nonlinearity='tanh'): - super().__init__() - - assert rnn_type in {'rnn', 'lstm', 'gru'}, "RNN type {} is not supported".format(rnn_type) - - self.input_size = input_size - self.hidden_size = hidden_size - self.rnn_type = rnn_type - self.bidirectional = bidirectional - - if self.rnn_type == 'rnn': - self.rnn_cell = nn.RNN( - input_size=input_size, hidden_size=hidden_size, bias=bias, nonlinearity=nonlinearity, - bidirectional=bidirectional, batch_first=True - ) - elif self.rnn_type == 'lstm': - self.rnn_cell = nn.LSTM( - input_size=input_size, hidden_size=hidden_size, bias=bias, - bidirectional=bidirectional, batch_first=True - ) - elif self.rnn_type == 'gru': - self.rnn_cell = nn.GRU( - input_size=input_size, hidden_size=hidden_size, bias=bias, - bidirectional=bidirectional, batch_first=True - ) - - def reset_weights(self): - init.xavier_normal_(self.rnn_cell.weight_hh) - init.xavier_normal_(self.rnn_cell.weight_ih) - init.zeros_(self.rnn_cell.bias_ih) - init.zeros_(self.rnn_cell.bias_hh) - - @property - def output_dim(self) -> int: - """ Final dimension of model output """ - if self.bidirectional: - return 2.0 * self.hidden_size - else: - return self.hidden_size - - @property - def state_dim(self) -> int: - """ Dimension of model state """ - if self.rnn_type == 'lstm': - return 2 * self.hidden_size - else: - return self.hidden_size - - def zero_state(self, batch_size): - """ State for the model """ - return torch.zeros(batch_size, self.state_dim) - - def forward(self, input_data, state=None): - if state is None: - if self.bidirectional: - state = self.zero_state(input_data.size(0)).unsqueeze(0).repeat(2, 1, 1).to(input_data.device) - else: - state = self.zero_state(input_data.size(0)).unsqueeze(0).to(input_data.device) - - if self.rnn_type == 'lstm': - hidden_state, cell_state = torch.split(state, self.hidden_size, 2) - hidden_state = hidden_state.contiguous() - cell_state = cell_state.contiguous() - output, (hidden_state, cell_state) = self.rnn_cell(input_data, (hidden_state, cell_state)) - new_state = torch.cat([hidden_state, cell_state], dim=2) - return output, new_state - else: - return self.rnn_cell(input_data, state) +# Temporarily commented out as it's an invalid code at the moment, may be deleted later +# import torch +# import torch.nn as nn +# import torch.nn.init as init +# +# +# from vel.api import LinearBackboneModel +# +# +# class RnnLayer(LinearBackboneModel): +# """ Generalization of RNN layer (Simple RNN, LSTM or GRU) """ +# +# def __init__(self, input_size, hidden_size, rnn_type, bias=True, bidirectional=False, nonlinearity='tanh'): +# super().__init__() +# +# assert rnn_type in {'rnn', 'lstm', 'gru'}, "RNN type {} is not supported".format(rnn_type) +# +# self.input_size = input_size +# self.hidden_size = hidden_size +# self.rnn_type = rnn_type +# self.bidirectional = bidirectional +# +# if self.rnn_type == 'rnn': +# self.rnn_cell = nn.RNN( +# input_size=input_size, hidden_size=hidden_size, bias=bias, nonlinearity=nonlinearity, +# bidirectional=bidirectional, batch_first=True +# ) +# elif self.rnn_type == 'lstm': +# self.rnn_cell = nn.LSTM( +# input_size=input_size, hidden_size=hidden_size, bias=bias, +# bidirectional=bidirectional, batch_first=True +# ) +# elif self.rnn_type == 'gru': +# self.rnn_cell = nn.GRU( +# input_size=input_size, hidden_size=hidden_size, bias=bias, +# bidirectional=bidirectional, batch_first=True +# ) +# +# def reset_weights(self): +# init.xavier_normal_(self.rnn_cell.weight_hh) +# init.xavier_normal_(self.rnn_cell.weight_ih) +# init.zeros_(self.rnn_cell.bias_ih) +# init.zeros_(self.rnn_cell.bias_hh) +# +# @property +# def output_dim(self) -> int: +# """ Final dimension of model output """ +# if self.bidirectional: +# return 2.0 * self.hidden_size +# else: +# return self.hidden_size +# +# @property +# def state_dim(self) -> int: +# """ Dimension of model state """ +# if self.rnn_type == 'lstm': +# return 2 * self.hidden_size +# else: +# return self.hidden_size +# +# def zero_state(self, batch_size): +# """ State for the model """ +# return torch.zeros(batch_size, self.state_dim) +# +# def forward(self, input_data, state=None): +# if state is None: +# if self.bidirectional: +# state = self.zero_state(input_data.size(0)).unsqueeze(0).repeat(2, 1, 1).to(input_data.device) +# else: +# state = self.zero_state(input_data.size(0)).unsqueeze(0).to(input_data.device) +# +# if self.rnn_type == 'lstm': +# hidden_state, cell_state = torch.split(state, self.hidden_size, 2) +# hidden_state = hidden_state.contiguous() +# cell_state = cell_state.contiguous() +# output, (hidden_state, cell_state) = self.rnn_cell(input_data, (hidden_state, cell_state)) +# new_state = torch.cat([hidden_state, cell_state], dim=2) +# return output, new_state +# else: +# return self.rnn_cell(input_data, state) diff --git a/vel/net/layer/arch/parallel.py b/vel/net/layer/arch/parallel.py index c25cc0ec..cebe1e5e 100644 --- a/vel/net/layer/arch/parallel.py +++ b/vel/net/layer/arch/parallel.py @@ -1,14 +1,15 @@ import torch.nn as nn +import typing from vel.api import SizeHints -from vel.net.layer_base import LayerFactory, Layer +from vel.net.layer_base import LayerFactory, Layer, LayerInfo, LayerFactoryContext class ParallelLayer(Layer): """ Network that consists of parallel "towers" """ - def __init__(self, name: str, layers: [Layer]): - super().__init__(name) + def __init__(self, info: LayerInfo, layers: [Layer]): + super().__init__(info) self.layers = nn.ModuleList(layers) self._size_hints = SizeHints(tuple(layer.size_hints().unwrap() for layer in self.layers)) @@ -22,11 +23,16 @@ def forward(self, direct, state: dict = None, context: dict = None): results = [layer(x, state, context) for layer, x in zip(self.layers, direct)] return tuple(results) + def grouped_parameters(self) -> typing.Iterable[(str, object)]: + """ Return iterable of pairs (group, parameters) """ + raise NotImplementedError + class ParallelLayerFactory(LayerFactory): """ Factory for Parallel layer """ def __init__(self, layers: [LayerFactory]): + super().__init__() self.layers = layers @property @@ -34,22 +40,38 @@ def name_base(self) -> str: """ Base of layer name """ return "parallel" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ hints = direct_input.assert_tuple(len(self.layers)) layers = [] + info = self.make_info(context) + for idx, (size_hint, layer_factory) in enumerate(zip(hints, self.layers)): counter = idx + 1 - local_name = "{}_{:04d}".format(layer_factory.name_base, counter) - global_name = f"{name}/{local_name}" - layer = layer_factory.instantiate(name=global_name, direct_input=SizeHints(size_hint), context=context) + child_context = LayerFactoryContext( + idx=counter, + parent_group=info.group, + parent_name=info.name, + data=context.data + ) + + layer = layer_factory.instantiate( + direct_input=SizeHints(size_hint), + context=child_context, + extra_args=extra_args + ) + layers.append(layer) - return ParallelLayer(name, layers) + return ParallelLayer( + info=info, + layers=layers + ) -def create(layers: [LayerFactory]): +def create(layers: [LayerFactory], label=None, group=None): """ Vel factory function """ - return ParallelLayerFactory(layers=layers) + return ParallelLayerFactory(layers=layers).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/dropout.py b/vel/net/layer/dropout.py index 3d6df1f9..ec68f9aa 100644 --- a/vel/net/layer/dropout.py +++ b/vel/net/layer/dropout.py @@ -1,6 +1,6 @@ import torch.nn.functional as F from vel.api import SizeHints -from vel.net.layer_base import Layer, LayerFactory +from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext, LayerInfo class DropoutLayer(Layer): @@ -11,18 +11,18 @@ class DropoutLayer(Layer): See :class:`~torch.nn.Dropout` for details. """ - def __init__(self, name: str, input_size: SizeHints, p: float): - super().__init__(name) + def __init__(self, info: LayerInfo, input_shape: SizeHints, p: float): + super().__init__(info) self.p = p - self.input_size = input_size + self.input_shape = input_shape def forward(self, direct, state: dict = None, context: dict = None): return F.dropout(direct, p=self.p, training=self.training) def size_hints(self) -> SizeHints: """ Size hints for this network """ - return self.input_size + return self.input_shape def extra_repr(self) -> str: """Set the extra representation of the module""" @@ -33,6 +33,7 @@ class DropoutLayerFactory(LayerFactory): """ Factory class for the Dropout layer """ def __init__(self, p: float): + super().__init__() self.p = p @property @@ -40,15 +41,15 @@ def name_base(self) -> str: """ Base of layer name """ return "dropout" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: """ Create a given layer object """ return DropoutLayer( - name=name, - input_size=direct_input, + info=self.make_info(context), + input_shape=direct_input, p=self.p ) -def create(p: float): +def create(p: float, label=None, group=None): """ Vel factory function """ - return DropoutLayerFactory(p) + return DropoutLayerFactory(p).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/input/image_to_tensor.py b/vel/net/layer/input/image_to_tensor.py index 1f5adb9c..ed073f7f 100644 --- a/vel/net/layer/input/image_to_tensor.py +++ b/vel/net/layer/input/image_to_tensor.py @@ -1,6 +1,6 @@ from vel.api import SizeHints, SizeHint from vel.module.input.image_to_tensor import image_to_tensor -from vel.net.layer_base import LayerFactory, Layer +from vel.net.layer_base import LayerFactory, Layer, LayerFactoryContext, LayerInfo class ImageToTensorLayer(Layer): @@ -9,8 +9,8 @@ class ImageToTensorLayer(Layer): Flip channels to a [C, W, H] order and potentially convert 8-bit color values to floats """ - def __init__(self, name: str, shape: tuple = None): - super().__init__(name) + def __init__(self, info: LayerInfo, shape: tuple = None): + super().__init__(info) if shape is not None: assert len(shape) == 3, "Images must have three dimensions" @@ -27,6 +27,7 @@ def size_hints(self) -> SizeHints: class ImageToTensorLayerFactory(LayerFactory): def __init__(self, shape: tuple = None): + super().__init__() self.shape = shape @property @@ -34,16 +35,19 @@ def name_base(self) -> str: """ Base of layer name """ return "image_to_tensor" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: """ Create a given layer object """ if self.shape is None: shape = direct_input.assert_single().shape() else: shape = self.shape - return ImageToTensorLayer(name=name, shape=shape) + return ImageToTensorLayer( + info=self.make_info(context), + shape=shape + ) -def create(shape: tuple = None): +def create(shape: tuple = None, label=None, group=None): """ Vel factory function """ - return ImageToTensorLayerFactory(shape=shape) + return ImageToTensorLayerFactory(shape=shape).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/input/normalize.py b/vel/net/layer/input/normalize.py deleted file mode 100644 index 91766a38..00000000 --- a/vel/net/layer/input/normalize.py +++ /dev/null @@ -1,49 +0,0 @@ -import collections.abc as abc - -from vel.api import SizeHints, SizeHint -from vel.module.input.normalize_observations import NormalizeObservations -from vel.net.layer_base import LayerFactory, Layer - - -class NormalizeLayer(Layer): - """ Layer that normalizes the inputs """ - - def __init__(self, name: str, shape): - super().__init__(name) - if not isinstance(shape, abc.Sequence): - self.shape = (shape,) - else: - self.shape = shape - - self.normalize = NormalizeObservations(input_shape=shape) - - def forward(self, direct, state: dict = None, context: dict = None): - return self.normalize(direct) - - def size_hints(self) -> SizeHints: - return SizeHints(SizeHint(*([None] + list(self.shape)))) - - -class NormalizeLayerFactory(LayerFactory): - def __init__(self, shape=None): - self.shape = shape - - @property - def name_base(self) -> str: - """ Base of layer name """ - return "image_to_tensor" - - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: - """ Create a given layer object """ - # Potential improvement here is to use either direct input or size parameter - if self.shape is None: - shape = direct_input.assert_single().shape() - else: - shape = self.shape - - return NormalizeLayer(name=name, shape=shape) - - -def create(shape=None): - """ Vel factory function """ - return NormalizeLayerFactory(shape=shape) diff --git a/vel/net/layer/input/normalize_expanding.py b/vel/net/layer/input/normalize_expanding.py new file mode 100644 index 00000000..35dc2abe --- /dev/null +++ b/vel/net/layer/input/normalize_expanding.py @@ -0,0 +1,50 @@ +from vel.api import SizeHints, SizeHint +from vel.module.input.normalize_expanding import NormalizeExpanding +from vel.net.layer_base import LayerFactory, Layer, LayerFactoryContext, LayerInfo + + +class NormalizeLayer(Layer): + """ Layer that normalizes the inputs """ + + def __init__(self, info: LayerInfo, input_shape: SizeHints): + super().__init__(info) + + self.input_shape = input_shape + + self.normalize = NormalizeExpanding( + input_shape=self.input_shape.assert_single()[1:] # Remove batch axis + ) + + def forward(self, direct, state: dict = None, context: dict = None): + return self.normalize(direct) + + def size_hints(self) -> SizeHints: + return self.input_shape + + +class NormalizeLayerFactory(LayerFactory): + def __init__(self, shape=None): + super().__init__() + self.shape = shape + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "image_to_tensor" + + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ + if self.shape is None: + input_shape = direct_input + else: + input_shape = SizeHints(SizeHint(*([None] + list(self.shape)))) + + return NormalizeLayer( + info=self.make_info(context), + input_shape=input_shape + ) + + +def create(shape=None): + """ Vel factory function """ + return NormalizeLayerFactory(shape=shape) diff --git a/vel/net/layer/mlp.py b/vel/net/layer/mlp.py index 551bf7f5..0e7d27b3 100644 --- a/vel/net/layer/mlp.py +++ b/vel/net/layer/mlp.py @@ -13,17 +13,17 @@ import vel.util.network as net_util from vel.api import SizeHints -from vel.net.layer_base import LayerFactory, Layer +from vel.net.layer_base import LayerFactory, Layer, LayerInfo, LayerFactoryContext class MLP(Layer): """ Simple Multi-Layer-Perceptron network """ - def __init__(self, name: str, input_size: SizeHints, hidden_layers: typing.List[int], activation: str = 'tanh', - normalization: typing.Optional[str] = None): - super().__init__(name) + def __init__(self, info: LayerInfo, input_shape: SizeHints, hidden_layers: typing.List[int], + activation: str = 'tanh', normalization: typing.Optional[str] = None): + super().__init__(info) - self.input_size = input_size - self.input_length = input_size.assert_single().last() + self.input_shape = input_shape + self.input_length = input_shape.assert_single().last() self.hidden_layers = hidden_layers self.activation = activation self.normalization = normalization @@ -31,18 +31,18 @@ def __init__(self, name: str, input_size: SizeHints, hidden_layers: typing.List[ layer_objects = [] layer_sizes = zip([self.input_length] + hidden_layers, hidden_layers) - for input_size, output_size in layer_sizes: - layer_objects.append(nn.Linear(input_size, output_size)) + for i_size, o_size in layer_sizes: + layer_objects.append(nn.Linear(i_size, o_size)) if self.normalization: - layer_objects.append(net_util.normalization(normalization)(output_size)) + layer_objects.append(net_util.normalization(normalization)(o_size)) layer_objects.append(net_util.activation(activation)()) self.model = nn.Sequential(*layer_objects) self.hidden_units = hidden_layers[-1] if hidden_layers else self.input_length - self.output_size = input_size.assert_single().drop_last().append(self.hidden_units) + self.output_shape = SizeHints(input_shape.assert_single().drop_last().append(self.hidden_units)) def reset_weights(self): """ Call proper initializers for the weights """ @@ -57,12 +57,13 @@ def forward(self, direct, state: dict = None, context: dict = None): def size_hints(self) -> SizeHints: """ Size hints for this network """ - return SizeHints(self.output_size) + return self.output_shape class MLPFactory(LayerFactory): def __init__(self, hidden_layers: typing.List[int], activation: str = 'tanh', normalization: typing.Optional[str] = None): + super().__init__() self.hidden_layers = hidden_layers self.activation = activation self.normalization = normalization @@ -72,17 +73,19 @@ def name_base(self) -> str: """ Base of layer name """ return "mlp" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: """ Create a given layer object """ return MLP( - name=name, - input_size=direct_input, + info=self.make_info(context), + input_shape=direct_input, hidden_layers=self.hidden_layers, activation=self.activation, normalization=self.normalization ) -def create(hidden_layers, activation='tanh', normalization=None): +def create(hidden_layers: [int], activation='tanh', normalization=None, label=None, group=None): """ Vel factory function """ - return MLPFactory(hidden_layers=hidden_layers, activation=activation, normalization=normalization) + return MLPFactory( + hidden_layers=hidden_layers, activation=activation, normalization=normalization + ).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/nlp/alphabet_embedding.py b/vel/net/layer/nlp/alphabet_embedding.py index 2fec003a..2e61b86d 100644 --- a/vel/net/layer/nlp/alphabet_embedding.py +++ b/vel/net/layer/nlp/alphabet_embedding.py @@ -1,34 +1,36 @@ import torch.nn as nn from vel.api import SizeHints -from vel.net.layer_base import Layer, LayerFactory +from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext, LayerInfo class AlphabetEmbeddingLayer(Layer): """ Encode incoming tensor encoded using certain alphabet into one-hot encoding """ - def __init__(self, name: str, alphabet_size: int, dim: int, input_shape: SizeHints): - super().__init__(name) + def __init__(self, info: LayerInfo, alphabet_size: int, dim: int, input_shape: SizeHints): + super().__init__(info) self.alphabet_size = alphabet_size self.dim = dim - self.output_size = SizeHints(input_shape.assert_single().append(self.dim)) + self.output_shape = SizeHints(input_shape.assert_single().append(self.dim)) self.layer = nn.Embedding(self.alphabet_size + 1, self.dim) def forward(self, direct, state: dict = None, context: dict = None): + """ Forward propagation of a single layer """ return self.layer(direct) def size_hints(self) -> SizeHints: """ Size hints for this network """ - return self.output_size + return self.output_shape class AlphabetEmbeddingLayerFactory(LayerFactory): """ Factory class for the AlphabetOneHotEncode layer """ def __init__(self, dim: int): + super().__init__() self.dim = dim @property @@ -36,17 +38,18 @@ def name_base(self) -> str: """ Base of layer name """ return "alphabet_embedding" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ alphabet_size = extra_args['alphabet_size'] return AlphabetEmbeddingLayer( - name=name, + info=self.make_info(context), alphabet_size=alphabet_size, dim=self.dim, input_shape=direct_input ) -def create(dim: int): +def create(dim: int, label=None, group=None): """ Vel factory function """ - return AlphabetEmbeddingLayerFactory(dim) + return AlphabetEmbeddingLayerFactory(dim).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/nlp/alphabet_one_hot_encode.py b/vel/net/layer/nlp/alphabet_one_hot_encode.py index b26745c5..1198369b 100644 --- a/vel/net/layer/nlp/alphabet_one_hot_encode.py +++ b/vel/net/layer/nlp/alphabet_one_hot_encode.py @@ -1,5 +1,5 @@ from vel.api import SizeHints -from vel.net.layer_base import Layer, LayerFactory +from vel.net.layer_base import Layer, LayerFactory, LayerInfo, LayerFactoryContext from vel.util.tensor_util import one_hot_encoding @@ -8,40 +8,38 @@ class AlphabetOneHotEncodeLayer(Layer): """ Encode incoming tensor encoded using certain alphabet into one-hot encoding """ - def __init__(self, name: str, alphabet_size: int, input_shape: SizeHints): - super().__init__(name) + def __init__(self, info: LayerInfo, alphabet_size: int, input_shape: SizeHints): + super().__init__(info) self.alphabet_size = alphabet_size - self.output_size = SizeHints(input_shape.assert_single().append(self.alphabet_size + 1)) + self.output_shape = SizeHints(input_shape.assert_single().append(self.alphabet_size + 1)) def forward(self, direct, state: dict = None, context: dict = None): return one_hot_encoding(direct, num_labels=self.alphabet_size + 1) def size_hints(self) -> SizeHints: """ Size hints for this network """ - return self.output_size + return self.output_shape class AlphabetOneHotEncodeLayerFactory(LayerFactory): """ Factory class for the AlphabetoneHotEncode layer """ - def __init__(self): - pass - @property def name_base(self) -> str: """ Base of layer name """ return "alphabet_one_hot_encode" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ alphabet_size = extra_args['alphabet_size'] return AlphabetOneHotEncodeLayer( - name=name, + info=self.make_info(context), alphabet_size=alphabet_size, input_shape=direct_input ) -def create(): +def create(label=None, group=None): """ Vel factory function """ - return AlphabetOneHotEncodeLayerFactory() + return AlphabetOneHotEncodeLayerFactory().with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/nlp/pretrained_embedding.py b/vel/net/layer/nlp/pretrained_embedding.py new file mode 100644 index 00000000..45350838 --- /dev/null +++ b/vel/net/layer/nlp/pretrained_embedding.py @@ -0,0 +1,61 @@ +import numpy as np + +import torch.nn as nn + +from vel.api import SizeHints, LanguageSource +from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext, LayerInfo + + +class PretrainedEmbeddingLayer(Layer): + """ Load a pretrained word embedding """ + def __init__(self, info: LayerInfo, vectors: np.ndarray, input_shape: SizeHints, freeze: bool = False): + super().__init__(info) + + self.output_shape = SizeHints(input_shape.assert_single().append(vectors.shape[1])) + + self.layer = nn.Embedding(vectors.shape[0], vectors.shape[1]) + self.layer.weight.data.copy_(vectors) + + self.freeze = freeze + + if self.freeze: + self.layer.weight.requires_grad_(False) + + def forward(self, direct, state: dict = None, context: dict = None): + return self.layer(direct) + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return self.output_shape + + +class PretrainedEmbeddingLayerFactory(LayerFactory): + """ Load a pretrained word embedding """ + def __init__(self, source: LanguageSource, vectors: str, freeze: bool): + super().__init__() + self.vectors = vectors + self.source = source + self.freeze = freeze + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "pretrained_embedding" + + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + vocab = self.source.fields[self.source.mapping['x']].vocab + vocab.load_vectors(self.vectors) + + return PretrainedEmbeddingLayer( + info=self.make_info(context), + vectors=vocab.vectors, + freeze=self.freeze, + input_shape=direct_input, + ) + + +def create(source: LanguageSource, vectors: str, freeze: bool = False, label=None, group=None): + """ Vel factory function """ + return PretrainedEmbeddingLayerFactory( + source, vectors, freeze=freeze + ).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/nlp/select_final_features.py b/vel/net/layer/nlp/select_final_features.py new file mode 100644 index 00000000..4b55b303 --- /dev/null +++ b/vel/net/layer/nlp/select_final_features.py @@ -0,0 +1,65 @@ +import torch + +from vel.api import SizeHints, SizeHint +from vel.net.layer_base import Layer, LayerFactory, LayerInfo, LayerFactoryContext + + +class SelectFinalFeaturesLayer(Layer): + """ + For many sequence processing tasks we only care about the output from the final element + """ + def __init__(self, info: LayerInfo, bidirectional: bool, input_shape: SizeHints): + super().__init__(info) + + self.bidirectional = bidirectional + + b, s, x = input_shape.assert_single(3) + self.output_shape = SizeHints(SizeHint(b, x)) + + def forward(self, direct, state: dict = None, context: dict = None): + if self.bidirectional: + final_shape = direct.shape[-1] + assert final_shape % 2 == 0 + half_final_shape = final_shape // 2 + + # dimensions are: batch, seq, features + # first one is from forward pass + # second one is backward pass + part1 = direct[:, -1, :half_final_shape] + part2 = direct[:, 0, half_final_shape:] + + return torch.cat([part1, part2], dim=1) + else: + return direct[:, -1, :] + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return self.output_shape + + +class SelectFinalFeaturesLayerFactory(LayerFactory): + """ Factory for the SelectFinalFeatures layer """ + + def __init__(self, bidirectional: bool = False): + super().__init__() + self.bidirectional = bidirectional + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "select_final_features" + + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ + return SelectFinalFeaturesLayer( + info=self.make_info(context), + bidirectional=self.bidirectional, + input_shape=direct_input + ) + + +def create(bidirectional=False, label=None, group=None): + """ Vel factory function """ + return SelectFinalFeaturesLayerFactory( + bidirectional=bidirectional + ).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/rnn.py b/vel/net/layer/rnn.py index 7cbf9e13..bceb9f3d 100644 --- a/vel/net/layer/rnn.py +++ b/vel/net/layer/rnn.py @@ -3,17 +3,17 @@ import torch.nn.init as init from vel.api import SizeHints -from vel.net.layer_base import Layer, LayerFactory +from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext, LayerInfo class RnnLayer(Layer): """ Single Recurrent Layer """ - def __init__(self, name: str, input_size: SizeHints, hidden_size: int, rnn_type: str, + def __init__(self, info: LayerInfo, input_shape: SizeHints, hidden_size: int, rnn_type: str, bias: bool = True, bidirectional: bool = False, nonlinearity: str = 'tanh'): - super().__init__(name) + super().__init__(info) - self.input_size = input_size - self.input_length = input_size.assert_single().last() + self.input_shape = input_shape + self.input_length = input_shape.assert_single().last() self.hidden_size = hidden_size self.rnn_type = rnn_type @@ -37,7 +37,10 @@ def __init__(self, name: str, input_size: SizeHints, hidden_size: int, rnn_type: bidirectional=bidirectional, batch_first=True ) - self.output_size = input_size.assert_single().drop_last().append(self.hidden_size) + if self.bidirectional: + self.output_shape = SizeHints(input_shape.assert_single().drop_last().append(2 * self.hidden_size)) + else: + self.output_shape = SizeHints(input_shape.assert_single().drop_last().append(self.hidden_size)) def reset_weights(self): """ Call proper initializers for the weights """ @@ -61,27 +64,30 @@ def state_dim(self) -> int: def zero_state(self, batch_size): """ Potential state for the model """ - return {self.name: torch.zeros(batch_size, self.state_dim)} + if self.bidirectional: + return {self.global_name: torch.zeros(2, batch_size, self.state_dim)} + else: + return {self.global_name: torch.zeros(1, batch_size, self.state_dim)} def forward(self, input_data, state: dict, context: dict = None): """ Forward propagation of a single layer """ if self.rnn_type == 'lstm': - state_tensor = state[self.name].unsqueeze(0) + state_tensor = state[self.name] hidden_state, cell_state = torch.split(state_tensor, self.hidden_size, dim=2) output, (hidden_state, cell_state) = self.rnn_cell( input_data, (hidden_state.contiguous(), cell_state.contiguous()) ) new_state = torch.cat([hidden_state, cell_state], dim=2) - return output, {self.name: new_state[0]} + return output, {self.name: new_state} else: - state_tensor = state[self.name].unsqueeze(0) + state_tensor = state[self.name] output, new_state = self.rnn_cell(input_data, state_tensor) - return output, {self.name: new_state[0]} + return output, {self.name: new_state} def size_hints(self) -> SizeHints: """ Size hints for this network """ - return SizeHints(self.output_size) + return self.output_shape class RnnLayerFactory(LayerFactory): @@ -89,6 +95,7 @@ class RnnLayerFactory(LayerFactory): def __init__(self, hidden_size: int, rnn_type: str, bias: bool = True, bidirectional: bool = False, nonlinearity: str = 'tanh'): + super().__init__() self.hidden_size = hidden_size self.rnn_type = rnn_type @@ -101,11 +108,11 @@ def name_base(self) -> str: """ Base of layer name """ return "rnn" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: """ Create instance of 'RnnLayer' """ return RnnLayer( - name=name, - input_size=direct_input, + info=self.make_info(context), + input_shape=direct_input, hidden_size=self.hidden_size, rnn_type=self.rnn_type, bias=self.bias, @@ -115,7 +122,7 @@ def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_a def create(hidden_size: int, rnn_type: str, bias: bool = True, bidirectional: bool = False, - nonlinearity: str = 'tanh'): + nonlinearity: str = 'tanh', label=None, group=None): """ Vel factory function """ return RnnLayerFactory( hidden_size=hidden_size, @@ -123,4 +130,4 @@ def create(hidden_size: int, rnn_type: str, bias: bool = True, bidirectional: bo bias=bias, bidirectional=bidirectional, nonlinearity=nonlinearity - ) + ).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/util/concat.py b/vel/net/layer/util/concat.py index 7d2d7b57..511b8c66 100644 --- a/vel/net/layer/util/concat.py +++ b/vel/net/layer/util/concat.py @@ -1,14 +1,14 @@ import torch from vel.api import SizeHints, SizeHint -from vel.net.layer_base import LayerFactory, Layer +from vel.net.layer_base import LayerFactory, Layer, LayerFactoryContext, LayerInfo class Concat(Layer): """ Repeat single tensor multiple times """ - def __init__(self, name: str, size_hints: SizeHints, axis: int = -1): - super().__init__(name) + def __init__(self, info: LayerInfo, size_hints: SizeHints, axis: int = -1): + super().__init__(info) self.axis = axis self._size_hints = size_hints @@ -21,7 +21,9 @@ def size_hints(self) -> SizeHints: class ConcatFactory(LayerFactory): + """ Factory for Concat Layer """ def __init__(self, axis: int = -1): + super().__init__() self.axis = axis @property @@ -29,7 +31,8 @@ def name_base(self) -> str: """ Base of layer name """ return "concat" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ inputs = direct_input.assert_tuple() result = [] @@ -48,12 +51,12 @@ def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_a result.append(inputs[0][i]) return Concat( - name=name, + info=self.make_info(context), axis=self.axis, size_hints=SizeHints(SizeHint(*result)) ) -def create(axis: int = -1): +def create(axis: int = -1, label=None, group=None): """ Vel factory function """ - return ConcatFactory(axis=axis) + return ConcatFactory(axis=axis).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/util/repeat.py b/vel/net/layer/util/repeat.py index 9fda8050..8041ebde 100644 --- a/vel/net/layer/util/repeat.py +++ b/vel/net/layer/util/repeat.py @@ -1,12 +1,13 @@ from vel.api import SizeHints, SizeHint -from vel.net.layer_base import LayerFactory, Layer +from vel.net.layer_base import LayerFactory, Layer, LayerInfo, LayerFactoryContext class RepeatTensor(Layer): """ Repeat single tensor multiple times """ - def __init__(self, name: str, times: int, size_hint: SizeHint): - super().__init__(name) + def __init__(self, info: LayerInfo, times: int, size_hint: SizeHint): + super().__init__(info) + self.times = times self.size_hint = size_hint @@ -19,6 +20,7 @@ def size_hints(self) -> SizeHints: class RepeatTensorFactory(LayerFactory): def __init__(self, times: int): + super().__init__() self.times = times @property @@ -26,14 +28,14 @@ def name_base(self) -> str: """ Base of layer name """ return "repeat_tensor" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: return RepeatTensor( - name=name, + info=self.make_info(context), times=self.times, size_hint=direct_input.assert_single() ) -def create(times: int): +def create(times: int, label=None, group=None): """ Vel factory function """ - return RepeatTensorFactory(times=times) + return RepeatTensorFactory(times=times).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer_base.py b/vel/net/layer_base.py index cdc90487..591041c6 100644 --- a/vel/net/layer_base.py +++ b/vel/net/layer_base.py @@ -1,24 +1,113 @@ +import attr +import typing + from vel.api import BackboneModule, SizeHints +@attr.s(auto_attribs=True) +class LayerInfo: + """ Information about the layer """ + name: str + global_name: str + group: str + + class Layer(BackboneModule): - def __init__(self, name: str): + """ Layer class that fits into modular network framework """ + def __init__(self, info: LayerInfo): super().__init__() - self.name = name - def forward(self, direct, state: dict = None, context: dict = None): + self.info = info + + @property + def name(self) -> str: + """ Name of this layer """ + return self.info.name + + @property + def global_name(self) -> str: + """ Name of this layer - globally unique version """ + return self.info.global_name + + @property + def group(self) -> str: + """ Group of this layer """ + return self.info.group + + def forward(self, direct, state: dict, context: dict): """ Forward propagation of a single layer """ raise NotImplementedError + def grouped_parameters(self): + """ Return iterable of pairs (group, parameters) """ + return [(self.group, self.parameters())] + + +@attr.s(auto_attribs=True) +class LayerFactoryContext: + """ Context information about the layer being currently created """ + + idx: int + """ Index of this layer within parent """ + + parent_group: str + """ Group of the parent layer """ + + parent_name: typing.Optional[str] = None + """ Name of the parent - None if it's a top level layer """ + + data: dict = {} + """ Generic information potentially passed by layer in a hierarchy """ + class LayerFactory: """ Factory for layers """ + def __init__(self): + self.given_name = None + self.given_group = None + + def with_given_name(self, given_name) -> 'LayerFactory': + """ Set given name """ + self.given_name = given_name + return self + + def with_given_group(self, given_group) -> 'LayerFactory': + """ Set given group """ + self.given_group = given_group + return self + + def suggested_name(self, idx: int): + """ Reasonable layer name suggestion """ + return "{}_{:04d}".format(self.name_base, idx) + + def make_info(self, context: LayerFactoryContext) -> LayerInfo: + """ Make info for child layer """ + if self.given_name is not None: + name = self.given_name + else: + name = self.suggested_name(context.idx) + + if self.given_group is not None: + group = self.given_group + else: + group = context.parent_group + + if context.parent_name is None: + global_name = name + else: + global_name = f"{context.parent_name}/{name}" + + return LayerInfo( + name=name, + group=group, + global_name=global_name + ) @property def name_base(self) -> str: """ Base of layer name """ raise NotImplementedError - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: """ Create a given layer object """ raise NotImplementedError diff --git a/vel/net/modular.py b/vel/net/modular.py index c17fd81a..ca9efecc 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -1,10 +1,11 @@ +import itertools as it import collections import torch.nn as nn from vel.api import BackboneModule, ModuleFactory, SizeHints from vel.util.tensor_util import to_device -from .layer_base import LayerFactory +from .layer_base import LayerFactory, LayerFactoryContext class ModularSequential(nn.Module): @@ -30,19 +31,25 @@ def forward(self, direct, state: dict = None, context: dict = None): return direct -def instantiate_layers(layers: [LayerFactory], size_hint: SizeHints, extra_args: dict) -> nn.Module: +def instantiate_layers(layers: [LayerFactory], group: str, size_hint: SizeHints, extra_args: dict) -> nn.Module: """ Instantiate list of layer factories into PyTorch Module """ module_dict = collections.OrderedDict() - context = {} + context_data = {} for idx, layer_factory in enumerate(layers): counter = idx + 1 - name = "{}_{:04d}".format(layer_factory.name_base, counter) - layer = layer_factory.instantiate(name=name, direct_input=size_hint, context=context, extra_args=extra_args) + context = LayerFactoryContext( + idx=counter, + parent_group=group, + parent_name=None, + data=context_data + ) + + layer = layer_factory.instantiate(direct_input=size_hint, context=context, extra_args=extra_args) size_hint = layer.size_hints() - module_dict[name] = layer + module_dict[layer.name] = layer return ModularSequential(module_dict) @@ -79,7 +86,11 @@ def reset_state(self, state, dones): def forward(self, input_data, state=None): context = {} - return self.layers(input_data, context=context) + return self.layers(input_data, state=None, context=context) + + def grouped_parameters(self): + """ Return iterable of pairs (group, parameters) """ + return it.chain.from_iterable(l.grouped_parameters() for l in self.layers) class StatefulModularNetwork(BackboneModule): @@ -125,6 +136,7 @@ def forward(self, input_data, state=None): output_state = {} if state is None: + # input_data.device here may break. Should be fixed at some point state = to_device(self.zero_state(input_data.size(0)), input_data.device) for layer in self.layers: @@ -136,18 +148,27 @@ def forward(self, input_data, state=None): return data, output_state + def grouped_parameters(self): + """ Return iterable of pairs (group, parameters) """ + return it.chain.from_iterable(l.grouped_parameters() for l in self.layers) + class ModularNetworkFactory(ModuleFactory): """ Factory class for the modular network """ - def __init__(self, layers: [LayerFactory]): + def __init__(self, layers: [LayerFactory], group=None): self.layers = layers + if group is None: + self.group = "default" + else: + self.group = group + def instantiate(self, size_hint=None, **extra_args) -> BackboneModule: """ Create either stateful or not modular network instance """ if size_hint is None: size_hint = SizeHints() - layers = instantiate_layers(self.layers, size_hint=size_hint, extra_args=extra_args) + layers = instantiate_layers(self.layers, self.group, size_hint=size_hint, extra_args=extra_args) is_stateful = any(l.is_stateful for l in layers) if is_stateful: @@ -156,6 +177,6 @@ def instantiate(self, size_hint=None, **extra_args) -> BackboneModule: return ModularNetwork(layers) -def create(layers: [LayerFactory]): +def create(layers: [LayerFactory], group=None): """ Vel factory function """ - return ModularNetworkFactory(layers) + return ModularNetworkFactory(layers, group) diff --git a/vel/net/sequence.py b/vel/net/sequence.py new file mode 100644 index 00000000..f759051c --- /dev/null +++ b/vel/net/sequence.py @@ -0,0 +1,76 @@ +import collections +import typing + +from vel.api import BackboneModule, SizeHints +from vel.exception import VelException +from vel.util.tensor_util import to_device + + +class GenericModularSequential(BackboneModule): + """ Modification of nn.Sequential for the purpose of modular networks """ + + def __init__(self, layers: typing.Union[collections.OrderedDict, collections.Sequence]): + super().__init__() + self._layers = [] + + if isinstance(layers, collections.OrderedDict): + for key, module in layers.items(): + self.add_module(key, module) + self._layers.append(module) + elif isinstance(layers, collections.Sequence): + for idx, module in enumerate(layers): + key = str(idx) + self.add_module(key, module) + self._layers.append(module) + else: + raise VelException("Incorrectly specified layers, must be a sequence or an ordered dict") + + self._is_stateful = any(l.is_stateful() for l in self._layers) + + def size_hints(self) -> SizeHints: + return self._layers[-1].size_hints() + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return self._is_stateful + + def zero_state(self, batch_size): + """ Potential state for the model """ + zero_state = {} + + for l in self.layers: + layer_zero_state = l.zero_state(batch_size) + if layer_zero_state is not None: + zero_state.update(layer_zero_state) + + return zero_state + + def __len__(self): + return len(self._layers) + + def __getitem__(self, item): + return self._layers[item] + + def forward(self, direct, state: dict = None, context: dict = None): + if not self.is_stateful: + for layer in self._layers: + direct = layer(direct, state=state, context=context) + return direct + else: + output_state = {} + + if state is None: + # direct.device here may break. Should be fixed at some point + state = to_device(self.zero_state(direct.size(0)), direct.device) + + data = direct + + for layer in self.layers: + if layer.is_stateful: + data, new_state = layer(data, state=state, context=context) + output_state.update(new_state) + else: + data = layer(data, state=state, context=context) + + return data, output_state diff --git a/vel/optimizer/adadelta.py b/vel/optimizer/adadelta.py index e5e01f0e..108cd72e 100644 --- a/vel/optimizer/adadelta.py +++ b/vel/optimizer/adadelta.py @@ -2,8 +2,6 @@ from torch.optim.adadelta import Adadelta -import vel.util.module_util as mu - from vel.api import OptimizerFactory, VelOptimizerProxy, VelOptimizer @@ -12,6 +10,7 @@ class AdadeltaFactory(OptimizerFactory): def __init__(self, lr: float = 1.0, rho: float = 0.9, eps: float = 1e-6, weight_decay: float = 0.0, max_grad_norm: typing.Optional[float] = None): + super().__init__() self.lr = lr self.rho = rho self.eps = eps @@ -19,27 +18,16 @@ def __init__(self, lr: float = 1.0, rho: float = 0.9, eps: float = 1e-6, weight_ self.max_grad_norm = max_grad_norm def instantiate(self, parameters) -> VelOptimizer: + optimizer_params, group_names = self.preprocess(parameters) + return VelOptimizerProxy(Adadelta( - parameters, + optimizer_params, lr=self.lr, rho=self.rho, eps=self.eps, weight_decay=self.weight_decay - ), self.max_grad_norm) - - def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: - settings_dict = { - 'lr': self.lr, - 'rho': self.rho, - 'eps': self.eps, - 'weight_decay': self.weight_decay - } - - out_parameters = out_parameters.copy() - out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) - - return VelOptimizerProxy(Adadelta(out_parameters, **out_settings_dict), self.max_grad_norm) + ), group_names, self.max_grad_norm) def create(lr: float = 1.0, rho: float = 0.9, eps: float = 1e-6, weight_decay: float = 0.0, - max_grad_norm: typing.Optional[float] = None): + max_grad_norm: typing.Optional[float] = None, parameter_groups=None): """ Vel factory function """ return AdadeltaFactory( lr=lr, @@ -47,4 +35,4 @@ def create(lr: float = 1.0, rho: float = 0.9, eps: float = 1e-6, weight_decay: f eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm - ) + ).with_parameter_groups(parameter_groups) diff --git a/vel/optimizer/adam.py b/vel/optimizer/adam.py index 46ad3f06..8e9fb575 100644 --- a/vel/optimizer/adam.py +++ b/vel/optimizer/adam.py @@ -2,8 +2,6 @@ from torch.optim.adam import Adam -import vel.util.module_util as mu - from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy @@ -12,6 +10,7 @@ class AdamFactory(OptimizerFactory): def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, max_grad_norm: typing.Optional[float] = None): + super().__init__() self.lr = lr self.betas = betas self.eps = eps @@ -20,25 +19,16 @@ def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgra self.max_grad_norm = max_grad_norm def instantiate(self, parameters) -> VelOptimizer: + optimizer_params, group_names = self.preprocess(parameters) + return VelOptimizerProxy(Adam( - parameters, + optimizer_params, lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay, amsgrad=self.amsgrad - ), self.max_grad_norm) - - def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: - settings_dict = { - 'lr': self.lr, - 'eps': self.eps, - 'weight_decay': self.weight_decay, - 'amsgrad': self.amsgrad - } - - out_parameters = out_parameters.copy() - out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) - - return VelOptimizerProxy(Adam(out_parameters, betas=self.betas, **out_settings_dict), self.max_grad_norm) + ), group_names, self.max_grad_norm) -def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, max_grad_norm=None): +def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, max_grad_norm=None, parameter_groups=None): """ Vel factory function """ - return AdamFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, max_grad_norm=max_grad_norm) + return AdamFactory( + lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, max_grad_norm=max_grad_norm, + ).with_parameter_groups(parameter_groups) diff --git a/vel/optimizer/radam.py b/vel/optimizer/radam.py index 7abc4959..b6c6e825 100644 --- a/vel/optimizer/radam.py +++ b/vel/optimizer/radam.py @@ -1,15 +1,12 @@ """ RAdam implementation from: https://github.com/LiyuanLucasLiu/RAdam/blob/master/cifar_imagenet/utils/radam.py """ -import collections import math import torch import typing from torch.optim.optimizer import Optimizer -import vel.util.module_util as mu - from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy @@ -93,6 +90,7 @@ class RAdamFactory(OptimizerFactory): def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, max_grad_norm: typing.Optional[float] = None): + super().__init__() self.lr = lr self.betas = betas self.eps = eps @@ -100,24 +98,16 @@ def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, self.max_grad_norm = max_grad_norm def instantiate(self, parameters) -> VelOptimizer: + optimizer_params, group_names = self.preprocess(parameters) + return VelOptimizerProxy(RAdam( - parameters, + optimizer_params, lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay - ), self.max_grad_norm) - - def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: - settings_dict = { - 'lr': self.lr, - 'eps': self.eps, - 'weight_decay': self.weight_decay - } - - out_parameters = out_parameters.copy() - out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) - - return VelOptimizerProxy(RAdam(out_parameters, betas=self.betas, **out_settings_dict), self.max_grad_norm) + ), group_names, self.max_grad_norm) -def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, max_grad_norm=None): +def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, max_grad_norm=None, parameter_groups=None): """ Vel factory function """ - return RAdamFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, max_grad_norm=max_grad_norm) + return RAdamFactory( + lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, max_grad_norm=max_grad_norm + ).with_parameter_groups(parameter_groups) diff --git a/vel/optimizer/ranger.py b/vel/optimizer/ranger.py index 9f688ead..1c3143b6 100644 --- a/vel/optimizer/ranger.py +++ b/vel/optimizer/ranger.py @@ -7,8 +7,6 @@ from torch.optim.optimizer import Optimizer -import vel.util.module_util as mu - from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy @@ -154,53 +152,12 @@ def step(self, closure=None): return loss -# class RangerFactory(OptimizerFactory): -# """ RAdam optimizer factory """ -# -# def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, layer_groups=False): -# self.lr = lr -# self.betas = betas -# self.eps = eps -# self.weight_decay = weight_decay -# self.layer_groups = layer_groups -# -# def instantiate(self, model: Model) -> Ranger: -# if self.layer_groups: -# parameters = mu.to_parameter_groups(model.get_layer_groups()) -# -# if isinstance(self.lr, collections.Sequence): -# for idx, lr in enumerate(self.lr): -# parameters[idx]['lr'] = lr -# -# default_lr = self.lr[0] -# else: -# default_lr = float(self.lr) -# -# if isinstance(self.weight_decay, collections.Sequence): -# for idx, weight_decay in enumerate(self.weight_decay): -# parameters[idx]['weight_decay'] = weight_decay -# -# default_weight_decay = self.weight_decay[0] -# else: -# default_weight_decay = self.weight_decay -# -# return Ranger( -# parameters, -# lr=default_lr, betas=self.betas, eps=self.eps, weight_decay=default_weight_decay, -# ) -# else: -# parameters = filter(lambda p: p.requires_grad, model.parameters()) -# -# return Ranger( -# parameters, -# lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay, -# ) - class RangerFactory(OptimizerFactory): """ Adam optimizer factory """ def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, max_grad_norm: typing.Optional[float] = None): + super().__init__() self.lr = lr self.betas = betas self.eps = eps @@ -208,24 +165,16 @@ def __init__(self, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, self.max_grad_norm = max_grad_norm def instantiate(self, parameters) -> VelOptimizer: + optimizer_params, group_names = self.preprocess(parameters) + return VelOptimizerProxy(Ranger( - parameters, + optimizer_params, lr=self.lr, betas=self.betas, eps=self.eps, weight_decay=self.weight_decay - ), self.max_grad_norm) - - def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: - settings_dict = { - 'lr': self.lr, - 'eps': self.eps, - 'weight_decay': self.weight_decay - } - - out_parameters = out_parameters.copy() - out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) - - return VelOptimizerProxy(Ranger(out_parameters, betas=self.betas, **out_settings_dict), self.max_grad_norm) + ), group_names, self.max_grad_norm) -def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, max_grad_norm=None): +def create(lr, betas=(0.9, 0.999), weight_decay=0, epsilon=1e-8, max_grad_norm=None, parameter_groups=None): """ Vel factory function """ - return RangerFactory(lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, max_grad_norm=max_grad_norm) + return RangerFactory( + lr=lr, betas=betas, weight_decay=weight_decay, eps=epsilon, max_grad_norm=max_grad_norm + ).with_parameter_groups(parameter_groups) diff --git a/vel/optimizer/rmsprop.py b/vel/optimizer/rmsprop.py index eacf02ac..c967431a 100644 --- a/vel/optimizer/rmsprop.py +++ b/vel/optimizer/rmsprop.py @@ -2,8 +2,6 @@ from torch.optim.rmsprop import RMSprop -import vel.util.module_util as mu - from vel.api import OptimizerFactory, VelOptimizerProxy, VelOptimizer @@ -12,6 +10,7 @@ class RMSpropFactory(OptimizerFactory): def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, max_grad_norm: typing.Optional[float] = None): + super().__init__() self.lr = lr self.alpha = alpha self.eps = eps @@ -21,31 +20,18 @@ def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, ce self.max_grad_norm = max_grad_norm def instantiate(self, parameters) -> VelOptimizer: + optimizer_params, group_names = self.preprocess(parameters) + return VelOptimizerProxy(RMSprop( - parameters, + optimizer_params, lr=self.lr, alpha=self.alpha, eps=self.eps, weight_decay=self.weight_decay, momentum=self.momentum, centered=self.centered - ), self.max_grad_norm) - - def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: - settings_dict = { - 'lr': self.lr, - 'alpha': self.alpha, - 'eps': self.eps, - 'weight_decay': self.weight_decay, - 'momentum': self.momentum, - 'centered': self.centered - } - - out_parameters = out_parameters.copy() - out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) - - return VelOptimizerProxy(RMSprop(out_parameters, **out_settings_dict), self.max_grad_norm) + ), group_names, self.max_grad_norm) -def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8, max_grad_norm=None): +def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8, max_grad_norm=None, parameter_groups=None): """ Vel factory function """ return RMSpropFactory( lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon), max_grad_norm=max_grad_norm - ) + ).with_parameter_groups(parameter_groups) diff --git a/vel/optimizer/rmsprop_tf.py b/vel/optimizer/rmsprop_tf.py index 934d5090..7fea5b97 100644 --- a/vel/optimizer/rmsprop_tf.py +++ b/vel/optimizer/rmsprop_tf.py @@ -3,8 +3,6 @@ from torch.optim.optimizer import Optimizer -import vel.util.module_util as mu - from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy @@ -120,6 +118,7 @@ class RMSpropTFFactory(OptimizerFactory): def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, max_grad_norm: typing.Optional[float] = None): + super().__init__() self.lr = lr self.alpha = alpha self.eps = eps @@ -129,31 +128,18 @@ def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, ce self.max_grad_norm = max_grad_norm def instantiate(self, parameters) -> VelOptimizer: + optimizer_params, group_names = self.preprocess(parameters) + return VelOptimizerProxy(RMSpropTF( - parameters, + optimizer_params, lr=self.lr, alpha=self.alpha, eps=self.eps, weight_decay=self.weight_decay, momentum=self.momentum, centered=self.centered - ), self.max_grad_norm) - - def instantiate_parameter_groups(self, out_parameters) -> VelOptimizer: - settings_dict = { - 'lr': self.lr, - 'alpha': self.alpha, - 'eps': self.eps, - 'weight_decay': self.weight_decay, - 'momentum': self.momentum, - 'centered': self.centered - } - - out_parameters = out_parameters.copy() - out_settings_dict = mu.optimizer_parameter_helper(out_parameters, settings_dict) - - return VelOptimizerProxy(RMSpropTF(out_parameters, **out_settings_dict), self.max_grad_norm) + ), group_names, self.max_grad_norm) -def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8, max_grad_norm=None): +def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8, max_grad_norm=None, parameter_groups=None): """ Vel factory function """ return RMSpropTFFactory( lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon), max_grad_norm=max_grad_norm - ) + ).with_parameter_groups(parameter_groups) diff --git a/vel/optimizer/sgd.py b/vel/optimizer/sgd.py index 383053d5..4479b41f 100644 --- a/vel/optimizer/sgd.py +++ b/vel/optimizer/sgd.py @@ -2,8 +2,6 @@ from torch.optim.sgd import SGD -import vel.util.module_util as mu - from vel.api import OptimizerFactory, VelOptimizer, VelOptimizerProxy @@ -12,6 +10,7 @@ class SgdFactory(OptimizerFactory): def __init__(self, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, max_grad_norm: typing.Optional[float] = None): + super().__init__() self.lr = lr self.momentum = momentum self.dampening = dampening @@ -20,33 +19,19 @@ def __init__(self, lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, self.max_grad_norm = max_grad_norm def instantiate(self, parameters) -> VelOptimizer: - return VelOptimizerProxy( - SGD( - parameters, - lr=self.lr, momentum=self.momentum, dampening=self.dampening, weight_decay=self.weight_decay, - nesterov=self.nesterov - ), self.max_grad_norm - ) - - def instantiate_parameter_groups(self, parameters) -> VelOptimizer: - settings_dict = { - 'lr': self.lr, - 'momentum': self.momentum, - 'dampening': self.dampening, - 'weight_decay': self.weight_decay, - 'nesterov': self.nesterov - } - - parameters = parameters.copy() - out_settings_dict = mu.optimizer_parameter_helper(parameters, settings_dict) + optimizer_params, group_names = self.preprocess(parameters) - return VelOptimizerProxy(SGD(parameters, **out_settings_dict), self.max_grad_norm) + return VelOptimizerProxy(SGD( + optimizer_params, + lr=self.lr, momentum=self.momentum, dampening=self.dampening, weight_decay=self.weight_decay, + nesterov=self.nesterov + ), group_names, self.max_grad_norm) def create(lr, momentum=0, dampening=0, weight_decay=0, nesterov=False, - max_grad_norm: typing.Optional[float] = None): + max_grad_norm: typing.Optional[float] = None, parameter_groups=None): """ Vel factory function """ return SgdFactory( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, max_grad_norm=max_grad_norm - ) + ).with_parameter_groups(parameter_groups) diff --git a/vel/rl/layer/double_nature_cnn.py b/vel/rl/layer/double_nature_cnn.py index 3f78db01..f053c3b2 100644 --- a/vel/rl/layer/double_nature_cnn.py +++ b/vel/rl/layer/double_nature_cnn.py @@ -13,7 +13,7 @@ import vel.util.network as net_util from vel.api import SizeHints, SizeHint -from vel.net.layer_base import Layer, LayerFactory +from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext, LayerInfo class DoubleNatureCnn(Layer): @@ -21,8 +21,8 @@ class DoubleNatureCnn(Layer): Neural network as defined in the paper 'Human-level control through deep reinforcement learning' but with two separate heads. """ - def __init__(self, name: str, input_width, input_height, input_channels, output_dim=512): - super().__init__(name) + def __init__(self, info: LayerInfo, input_width, input_height, input_channels, output_dim=512): + super().__init__(info) self.output_dim = output_dim @@ -103,6 +103,7 @@ class DoubleNatureCnnFactory(LayerFactory): """ Nature Cnn Network Factory """ def __init__(self, output_dim: int = 512): + super().__init__() self.output_dim = output_dim @property @@ -110,11 +111,11 @@ def name_base(self) -> str: """ Base of layer name """ return "double_nature_cnn" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: (b, c, w, h) = direct_input.assert_single(4) return DoubleNatureCnn( - name=name, + info=self.make_info(context), input_width=w, input_height=h, input_channels=c, @@ -122,6 +123,6 @@ def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_a ) -def create(output_dim: int = 512): +def create(output_dim: int = 512, label=None, group=None): """ Vel factory function """ - return DoubleNatureCnnFactory(output_dim=output_dim) + return DoubleNatureCnnFactory(output_dim=output_dim).with_given_name(label).with_given_group(group) diff --git a/vel/rl/layer/double_noisy_nature_cnn.py b/vel/rl/layer/double_noisy_nature_cnn.py index f0740be8..536eccce 100644 --- a/vel/rl/layer/double_noisy_nature_cnn.py +++ b/vel/rl/layer/double_noisy_nature_cnn.py @@ -14,7 +14,7 @@ from vel.api import SizeHints, SizeHint -from vel.net.layer_base import Layer, LayerFactory +from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext from vel.rl.module.noisy_linear import NoisyLinear @@ -23,9 +23,9 @@ class DoubleNoisyNatureCnn(Layer): Neural network as defined in the paper 'Human-level control through deep reinforcement learning' but with two separate heads and "noisy" linear layer. """ - def __init__(self, name: str, input_width, input_height, input_channels, output_dim=512, initial_std_dev=0.4, + def __init__(self, info: LayerInfo, input_width, input_height, input_channels, output_dim=512, initial_std_dev=0.4, factorized_noise=True): - super().__init__(name) + super().__init__(info) self.output_dim = output_dim @@ -119,6 +119,7 @@ class DoubleNoisyNatureCnnFactory(LayerFactory): """ Nature Cnn Network Factory """ def __init__(self, initial_std_dev: float = 0.4, factorized_noise: bool = True, output_dim: int = 512): + super().__init__() self.initial_std_dev = initial_std_dev self.factorized_noise = factorized_noise self.output_dim = output_dim @@ -128,11 +129,11 @@ def name_base(self) -> str: """ Base of layer name """ return "double_noisy_nature_cnn" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: (b, c, w, h) = direct_input.assert_single(4) return DoubleNoisyNatureCnn( - name=name, + info=self.make_info(context), input_width=w, input_height=h, input_channels=c, @@ -142,10 +143,11 @@ def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_a ) -def create(initial_std_dev: float = 0.4, factorized_noise: bool = True, output_dim: int = 512): +def create(initial_std_dev: float = 0.4, factorized_noise: bool = True, output_dim: int = 512, + label=None, group=None): """ Vel factory function """ return DoubleNoisyNatureCnnFactory( output_dim=output_dim, initial_std_dev=initial_std_dev, factorized_noise=factorized_noise - ) + ).with_given_name(label).with_given_group(group) diff --git a/vel/model/rnn/__init__.py b/vel/rl/layer/input/__init__.py similarity index 100% rename from vel/model/rnn/__init__.py rename to vel/rl/layer/input/__init__.py diff --git a/vel/rl/layer/nature_cnn.py b/vel/rl/layer/nature_cnn.py index b9845a16..a30b60ce 100644 --- a/vel/rl/layer/nature_cnn.py +++ b/vel/rl/layer/nature_cnn.py @@ -13,14 +13,14 @@ import vel.util.network as net_util from vel.api import SizeHint, SizeHints -from vel.net.layer_base import Layer, LayerFactory +from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext, LayerInfo class NatureCnn(Layer): """ Neural network as defined in the paper 'Human-level control through deep reinforcement learning' """ - def __init__(self, name: str, input_width, input_height, input_channels, output_dim=512): - super().__init__(name) + def __init__(self, info: LayerInfo, input_width, input_height, input_channels, output_dim=512): + super().__init__(info) self.output_dim = output_dim @@ -87,6 +87,7 @@ class NatureCnnFactory(LayerFactory): """ Nature Cnn Network Factory """ def __init__(self, output_dim: int = 512): + super().__init__() self.output_dim = output_dim @property @@ -94,11 +95,11 @@ def name_base(self) -> str: """ Base of layer name """ return "nature_cnn" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: (b, c, w, h) = direct_input.assert_single(4) return NatureCnn( - name=name, + info=self.make_info(context), input_width=w, input_height=h, input_channels=c, @@ -106,6 +107,6 @@ def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_a ) -def create(output_dim=512): +def create(output_dim=512, label=None, group=None): """ Vel factory function """ - return NatureCnnFactory(output_dim=output_dim) + return NatureCnnFactory(output_dim=output_dim).with_given_name(label).with_given_group(group) diff --git a/vel/rl/layer/nature_cnn_small.py b/vel/rl/layer/nature_cnn_small.py index e8bc8928..ec8d9497 100644 --- a/vel/rl/layer/nature_cnn_small.py +++ b/vel/rl/layer/nature_cnn_small.py @@ -13,7 +13,7 @@ import vel.util.network as net_util from vel.api import SizeHint, SizeHints -from vel.net.modular import Layer, LayerFactory +from vel.net.layer_base import LayerFactoryContext, Layer, LayerFactory, LayerInfo class NatureCnnSmall(Layer): @@ -21,8 +21,8 @@ class NatureCnnSmall(Layer): Neural network as defined in the paper 'Human-level control through deep reinforcement learning' Smaller version. """ - def __init__(self, name: str, input_width, input_height, input_channels, output_dim=128): - super().__init__(name) + def __init__(self, info: LayerInfo, input_width, input_height, input_channels, output_dim=128): + super().__init__(info) self.output_dim = output_dim @@ -81,6 +81,7 @@ class NatureCnnSmallFactory(LayerFactory): """ Nature Cnn Network Factory """ def __init__(self, output_dim: int = 128): + super().__init__() self.output_dim = output_dim @property @@ -88,11 +89,11 @@ def name_base(self) -> str: """ Base of layer name """ return "nature_cnn_small" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: (b, c, w, h) = direct_input.assert_single(4) return NatureCnnSmall( - name=name, + info=self.make_info(context), input_width=w, input_height=h, input_channels=c, @@ -100,6 +101,6 @@ def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_a ) -def create(output_dim: int = 128): +def create(output_dim: int = 128, label=None, group=None): """ Vel factory function """ - return NatureCnnSmallFactory(output_dim=output_dim) + return NatureCnnSmallFactory(output_dim=output_dim).with_given_name(label).with_given_group(group) diff --git a/vel/rl/layer/rnn_cell.py b/vel/rl/layer/rnn_cell.py index a509072d..2eadf942 100644 --- a/vel/rl/layer/rnn_cell.py +++ b/vel/rl/layer/rnn_cell.py @@ -4,15 +4,15 @@ from vel.api import SizeHint, SizeHints -from vel.net.layer_base import Layer, LayerFactory +from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext, LayerInfo class RnnCell(Layer): """ Generalization of RNN cell (Simple RNN, LSTM or GRU) """ - def __init__(self, name: str, input_size: int, hidden_size: int, rnn_type: str, bias: bool = True, + def __init__(self, info: LayerInfo, input_size: int, hidden_size: int, rnn_type: str, bias: bool = True, nonlinearity: str = 'tanh'): - super().__init__(name) + super().__init__(info) assert rnn_type in {'rnn', 'lstm', 'gru'}, "Rnn type {} is not supported".format(rnn_type) @@ -73,6 +73,7 @@ class RnnCellFactory(LayerFactory): """ Factory for the RnnCell layer """ def __init__(self, hidden_size: int, rnn_type: str, bias: bool = True, nonlinearity: str = 'tanh'): + super().__init__() self.hidden_size = hidden_size self.rnn_type = rnn_type self.bias = bias @@ -82,11 +83,12 @@ def __init__(self, hidden_size: int, rnn_type: str, bias: bool = True, nonlinear def name_base(self) -> str: return "rnn_cell" - def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_args: dict) -> Layer: + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ input_size = direct_input.assert_single().last() return RnnCell( - name=name, + info=self.make_info(context), input_size=input_size, hidden_size=self.hidden_size, rnn_type=self.rnn_type, @@ -95,11 +97,11 @@ def instantiate(self, name: str, direct_input: SizeHints, context: dict, extra_a ) -def create(hidden_size: int, rnn_type: str, bias: bool = True, nonlinearity: str = 'tanh'): +def create(hidden_size: int, rnn_type: str, bias: bool = True, nonlinearity: str = 'tanh', label=None, group=None): """ Vel factory function """ return RnnCellFactory( hidden_size=hidden_size, rnn_type=rnn_type, bias=bias, nonlinearity=nonlinearity - ) + ).with_given_name(label).with_given_group(group) diff --git a/vel/rl/module/actor_critic_policy.py b/vel/rl/module/actor_critic_policy.py index cb252447..bc4a5eae 100644 --- a/vel/rl/module/actor_critic_policy.py +++ b/vel/rl/module/actor_critic_policy.py @@ -36,6 +36,16 @@ def layer_groups(self): [self.input_net, self.value_backbone, self.critic_head], ] + def grouped_parameters(self): + """ Return iterable of pairs (group, parameters) """ + return it.chain( + self.input_net.grouped_parameters(), + self.policy_backbone.grouped_parameters(), + self.value_backbone.grouped_parameters(), + [("actor", self.action_head.parameters())], + [("critic", self.critic_head.parameters())], + ) + def reset_weights(self): """ Initialize properly model weights """ self.input_net.reset_weights() diff --git a/vel/rl/policy/ddpg.py b/vel/rl/policy/ddpg.py index 3f40d317..3d286d16 100644 --- a/vel/rl/policy/ddpg.py +++ b/vel/rl/policy/ddpg.py @@ -47,8 +47,7 @@ def reset_episodic_state(self, dones: torch.Tensor): def create_optimizer(self, optimizer_factory: OptimizerFactory) -> VelOptimizer: """ Create optimizer for the purpose of optimizing this model """ - parameter_groups = mu.to_parameter_groups(self.net.layer_groups()) - return optimizer_factory.instantiate_parameter_groups(parameter_groups) + return optimizer_factory.instantiate(self.net.grouped_parameters()) def forward(self, observation, state=None): """ Calculate model outputs """ diff --git a/vel/rl/vecenv/dummy.py b/vel/rl/vecenv/dummy.py index 29b405e0..4df8cd2b 100644 --- a/vel/rl/vecenv/dummy.py +++ b/vel/rl/vecenv/dummy.py @@ -29,7 +29,7 @@ def instantiate(self, parallel_envs, seed=0, preset='default') -> VecEnv: def instantiate_single(self, seed=0, preset='default'): """ Create a new Env instance - single """ - env = self.env.instantiate(seed=seed, serial_id=0, preset=preset) + env = self.env.instantiate() if self.frame_history is not None: env = FrameStack(env, self.frame_history) @@ -38,7 +38,7 @@ def instantiate_single(self, seed=0, preset='default'): def _creation_function(self, idx, seed, preset): """ Helper function to create a proper closure around supplied values """ - return lambda: self.env.instantiate(seed=seed, serial_id=idx, preset=preset) + return lambda: self.env.instantiate() def create(env, frame_history=None, normalize_returns=False): diff --git a/vel/train/phase/cycle.py b/vel/train/phase/cycle.py index 63fae205..cb04c409 100644 --- a/vel/train/phase/cycle.py +++ b/vel/train/phase/cycle.py @@ -74,6 +74,13 @@ def on_batch_begin(self, batch_info: BatchInfo, dataset: typing.Optional[str] = interp.interpolate_single(max_lr, min_lr, interpolation_number, how=self.interpolate) for max_lr, min_lr in zip(self.max_lr, self.min_lr) ] + elif isinstance(self.max_lr, dict): + lr = { + name: interp.interpolate_single( + self.max_lr[name], self.min_lr[name], interpolation_number, how=self.interpolate + ) + for name in self.max_lr + } else: lr = interp.interpolate_single(self.max_lr, self.min_lr, interpolation_number, how=self.interpolate) diff --git a/vel/train/phase/freeze.py b/vel/train/phase/freeze.py index c230c762..635b7ebd 100644 --- a/vel/train/phase/freeze.py +++ b/vel/train/phase/freeze.py @@ -3,12 +3,14 @@ class FreezePhase(train.EmptyTrainPhase): """ Freeze the model """ + def __init__(self, groups=None): + self.groups = groups def set_up_phase(self, training_info, model, loader): """ Freeze the model """ - model.freeze() + model.freeze(groups=self.groups) -def create(): +def create(groups=None): """ Vel factory function """ - return FreezePhase() + return FreezePhase(groups) diff --git a/vel/util/module_util.py b/vel/util/module_util.py index e2dbef9b..4d1ca456 100644 --- a/vel/util/module_util.py +++ b/vel/util/module_util.py @@ -83,6 +83,11 @@ def to_parameter_groups(layer_groups): return [{'params': chain_params(x)} for x in layer_groups] +def module_list_to_param_list(module_list): + """ Conver a list of pytorch modules into a list of parameters """ + return it.chain.from_iterable(m.parameters() for m in module_list) + + def optimizer_parameter_helper(parameters, parameter_dict): """ Helper function for creating layer group optimizer instances """ out_dict = parameter_dict.copy() From f4e61077bd2d95b840c079961ecdf3bbed98dc58 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Mon, 14 Oct 2019 11:12:09 -0700 Subject: [PATCH 129/162] Improving formatting. --- vel/model/nlp/language_model.py | 1 - vel/rl/layer/double_noisy_nature_cnn.py | 3 +-- vel/rl/policy/ddpg.py | 2 -- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/vel/model/nlp/language_model.py b/vel/model/nlp/language_model.py index def34db5..e45347a0 100644 --- a/vel/model/nlp/language_model.py +++ b/vel/model/nlp/language_model.py @@ -47,7 +47,6 @@ def forward(self, input_data: torch.Tensor, state=None) -> torch.Tensor: output = self.net(input_data) return F.log_softmax(self.output_layer(output), dim=-1) - def loss_value(self, x_data, y_true, y_pred) -> torch.tensor: """ Calculate a value of loss function """ y_pred = y_pred.view(-1, y_pred.size(2)) diff --git a/vel/rl/layer/double_noisy_nature_cnn.py b/vel/rl/layer/double_noisy_nature_cnn.py index 536eccce..da867a76 100644 --- a/vel/rl/layer/double_noisy_nature_cnn.py +++ b/vel/rl/layer/double_noisy_nature_cnn.py @@ -13,8 +13,7 @@ import vel.util.network as net_util from vel.api import SizeHints, SizeHint - -from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext +from vel.net.layer_base import Layer, LayerFactory, LayerFactoryContext, LayerInfo from vel.rl.module.noisy_linear import NoisyLinear diff --git a/vel/rl/policy/ddpg.py b/vel/rl/policy/ddpg.py index 3d286d16..3e47955f 100644 --- a/vel/rl/policy/ddpg.py +++ b/vel/rl/policy/ddpg.py @@ -6,8 +6,6 @@ import torch.nn as nn import torch.nn.functional as F -import vel.util.module_util as mu - from vel.api import BackboneModule, BatchInfo, ModuleFactory, OptimizerFactory, VelOptimizer, SizeHints from vel.metric.base import AveragingNamedMetric from vel.rl.api import RlPolicy, Rollout From a00d125b857c08bc2e8167ff1ec4cb790a8af4b2 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Mon, 14 Oct 2019 11:13:23 -0700 Subject: [PATCH 130/162] Formatting. --- vel/rl/policy/ddpg.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vel/rl/policy/ddpg.py b/vel/rl/policy/ddpg.py index 3e47955f..84d64a60 100644 --- a/vel/rl/policy/ddpg.py +++ b/vel/rl/policy/ddpg.py @@ -181,8 +181,7 @@ def instantiate(self, **extra_args): def create(actor_net: ModuleFactory, critic_net: ModuleFactory, discount_factor: float, tau: float, noise_std_dev: float, - input_net: typing.Optional[ModuleFactory] = None - ): + input_net: typing.Optional[ModuleFactory] = None): """ Vel factory function """ return DDPGFactory( actor_net=actor_net, From 284260a115287a92b7b3270aba4bdf1248d75b9c Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Mon, 14 Oct 2019 12:05:46 -0700 Subject: [PATCH 131/162] EWMA input normalization. --- examples-configs/rl/mujoco/mujoco_a2c.yaml | 2 +- examples-configs/rl/mujoco/mujoco_ddpg.yaml | 2 +- examples-configs/rl/mujoco/mujoco_ppo.yaml | 2 +- examples-configs/rl/mujoco/mujoco_trpo.yaml | 2 +- vel/module/input/normalize_ewma.py | 47 +++++++++++++++ vel/net/layer/input/normalize_ewma.py | 65 +++++++++++++++++++++ 6 files changed, 116 insertions(+), 4 deletions(-) create mode 100644 vel/module/input/normalize_ewma.py create mode 100644 vel/net/layer/input/normalize_ewma.py diff --git a/examples-configs/rl/mujoco/mujoco_a2c.yaml b/examples-configs/rl/mujoco/mujoco_a2c.yaml index 9f89431a..f85a277a 100644 --- a/examples-configs/rl/mujoco/mujoco_a2c.yaml +++ b/examples-configs/rl/mujoco/mujoco_a2c.yaml @@ -22,7 +22,7 @@ model: net: name: vel.net.modular layers: - - name: vel.net.layer.input.normalize_expanding + - name: vel.net.layer.input.normalize_ewma - name: vel.net.layer.mlp hidden_layers: [64, 64] activation: 'tanh' diff --git a/examples-configs/rl/mujoco/mujoco_ddpg.yaml b/examples-configs/rl/mujoco/mujoco_ddpg.yaml index f75ef7e3..a5c837e0 100644 --- a/examples-configs/rl/mujoco/mujoco_ddpg.yaml +++ b/examples-configs/rl/mujoco/mujoco_ddpg.yaml @@ -21,7 +21,7 @@ model: input_net: name: vel.net.modular layers: - - name: vel.net.layer.input.normalize_expanding + - name: vel.net.layer.input.normalize_ewma actor_net: name: vel.net.modular diff --git a/examples-configs/rl/mujoco/mujoco_ppo.yaml b/examples-configs/rl/mujoco/mujoco_ppo.yaml index 975eabf6..5fdbd7bd 100644 --- a/examples-configs/rl/mujoco/mujoco_ppo.yaml +++ b/examples-configs/rl/mujoco/mujoco_ppo.yaml @@ -25,7 +25,7 @@ model: net: name: vel.net.modular layers: - - name: vel.net.layer.input.normalize_expanding + - name: vel.net.layer.input.normalize_ewma - name: vel.net.layer.util.repeat times: 2 # Need to repeat output twice, to consume by the 'parallel' layers - name: vel.net.layer.arch.parallel diff --git a/examples-configs/rl/mujoco/mujoco_trpo.yaml b/examples-configs/rl/mujoco/mujoco_trpo.yaml index f88fc5ba..5c2a83ee 100644 --- a/examples-configs/rl/mujoco/mujoco_trpo.yaml +++ b/examples-configs/rl/mujoco/mujoco_trpo.yaml @@ -28,7 +28,7 @@ model: input_net: name: vel.net.modular layers: - - name: vel.net.layer.input.normalize_expanding + - name: vel.net.layer.input.normalize_ewma policy_net: name: vel.net.modular diff --git a/vel/module/input/normalize_ewma.py b/vel/module/input/normalize_ewma.py new file mode 100644 index 00000000..3219e358 --- /dev/null +++ b/vel/module/input/normalize_ewma.py @@ -0,0 +1,47 @@ +import torch + +from vel.api import VModule + + +class NormalizeEwma(VModule): + """ Normalize a vector of observations - across the batch dim """ + + def __init__(self, input_shape, beta=0.99, per_element_update=False, epsilon=1e-1): + super().__init__() + + self.input_shape = input_shape + self.epsilon = epsilon + self.beta = beta + self.per_element_update = per_element_update + + self.register_buffer('running_mean', torch.zeros(input_shape, dtype=torch.float)) + self.register_buffer('running_var', torch.ones(input_shape, dtype=torch.float)) + self.register_buffer('debiasing_term', torch.tensor(self.epsilon, dtype=torch.float)) + + def reset_weights(self): + self.running_mean.zero_() + self.running_var.fill_(1.0) + self.count.fill_(self.epsilon) + + def forward(self, input_vector): + # Make sure input is float32 + input_vector = input_vector.to(torch.float) + + if self.training: + batch_size = input_vector.size(0) + batch_mean = input_vector.mean(dim=0) + batch_var = input_vector.var(dim=0, unbiased=False) + + if self.per_element_update: + weight = self.beta ** batch_size + else: + weight = self.beta + + self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight)) + self.running_var.mul_(weight).add_(batch_var * (1.0 - weight)) + self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight)) + + debiased_mean = self.running_mean / self.debiasing_term + debiased_var = self.running_var / self.debiasing_term + + return (input_vector - debiased_mean.unsqueeze(0)) / torch.sqrt(debiased_var.unsqueeze(0)) diff --git a/vel/net/layer/input/normalize_ewma.py b/vel/net/layer/input/normalize_ewma.py new file mode 100644 index 00000000..50b6dec5 --- /dev/null +++ b/vel/net/layer/input/normalize_ewma.py @@ -0,0 +1,65 @@ +from vel.api import SizeHints, SizeHint +from vel.module.input.normalize_ewma import NormalizeEwma +from vel.net.layer_base import LayerFactory, Layer, LayerFactoryContext, LayerInfo + + +class NormalizeEwmaLayer(Layer): + """ Layer that normalizes the inputs """ + + def __init__(self, info: LayerInfo, input_shape: SizeHints, beta: float = 0.99, epsilon: float = 1e-1, + per_element_update=False): + super().__init__(info) + + self.input_shape = input_shape + self.beta = beta + self.epsilon = epsilon + self.per_element_update = per_element_update + + self.normalize = NormalizeEwma( + beta=self.beta, + epsilon=self.epsilon, + per_element_update=self.per_element_update, + input_shape=self.input_shape.assert_single()[1:] # Remove batch axis + ) + + def forward(self, direct, state: dict = None, context: dict = None): + return self.normalize(direct) + + def size_hints(self) -> SizeHints: + return self.input_shape + + +class NormalizeEwmaLayerFactory(LayerFactory): + def __init__(self, beta: float = 0.99, epsilon: float = 1e-2, shape=None, per_element_update=False): + super().__init__() + self.shape = shape + self.beta = beta + self.epsilon = epsilon + self.per_element_update = per_element_update + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "image_to_tensor" + + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ + if self.shape is None: + input_shape = direct_input + else: + input_shape = SizeHints(SizeHint(*([None] + list(self.shape)))) + + return NormalizeEwmaLayer( + info=self.make_info(context), + beta=self.beta, + epsilon=self.epsilon, + per_element_update=self.per_element_update, + input_shape=input_shape + ) + + +def create(beta=0.99, epsilon=1e-1, shape=None, per_element_update=False, label=None, group=None): + """ Vel factory function """ + return NormalizeEwmaLayerFactory( + beta=beta, epsilon=epsilon, shape=shape, per_element_update=per_element_update + ).with_given_name(label).with_given_group(group) From a2bb1314fa76a398d88674d7187ce73d13ecfa52 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Fri, 18 Oct 2019 18:18:09 -0700 Subject: [PATCH 132/162] Implemented WANDB streaming. --- vel/metric/base/base_metric.py | 17 +++++++++++++++-- vel/storage/streaming/visdom.py | 4 ++-- vel/storage/streaming/wandb.py | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 4 deletions(-) create mode 100644 vel/storage/streaming/wandb.py diff --git a/vel/metric/base/base_metric.py b/vel/metric/base/base_metric.py index 6a64d1b2..8f073247 100644 --- a/vel/metric/base/base_metric.py +++ b/vel/metric/base/base_metric.py @@ -1,9 +1,22 @@ -import collections +import attr +import typing from vel.api import TrainingInfo -MetricKey = collections.namedtuple('MetricKey', ['dataset', 'name', 'scope']) +@attr.s(auto_attribs=True, frozen=True) +class MetricKey: + """ Key for each metric """ + name: str + scope: str + dataset: typing.Optional[str] = None + + def format(self): + """ Format a metric key into a string """ + if self.dataset is None: + return f"{self.scope}/{self.name}" + else: + return f"{self.dataset}:{self.scope}/{self.name}" class BaseMetric: diff --git a/vel/storage/streaming/visdom.py b/vel/storage/streaming/visdom.py index c861afe0..b0c31277 100644 --- a/vel/storage/streaming/visdom.py +++ b/vel/storage/streaming/visdom.py @@ -33,8 +33,8 @@ def on_batch_end(self, batch_info, dataset=None): """ Stream LR to visdom """ if self.settings.stream_lr: iteration_idx = ( - float(batch_info.epoch_number) + - float(batch_info.batch_number) / batch_info.batches_per_epoch + float(batch_info.epoch_number) + + float(batch_info.batch_number) / batch_info.batches_per_epoch ) lr = batch_info.optimizer.param_groups[-1]['lr'] diff --git a/vel/storage/streaming/wandb.py b/vel/storage/streaming/wandb.py new file mode 100644 index 00000000..24bf0297 --- /dev/null +++ b/vel/storage/streaming/wandb.py @@ -0,0 +1,32 @@ +import wandb + + +from vel.api import ModelConfig, Callback, TrainingInfo + + +class WandbStreaming(Callback): + """ Stream live results from training to WandB """ + + def __init__(self, model_config: ModelConfig): + self.model_config = model_config + + def on_train_begin(self, training_info: TrainingInfo) -> None: + wandb.init( + job_type='train', + project='vel', + dir=self.model_config.output_dir('wandb'), + group=self.model_config.name, + name=self.model_config.run_name, + resume=training_info.start_epoch_idx > 0, + tags=[self.model_config.tag] if self.model_config.tag else [] + ) + + def on_epoch_end(self, epoch_info): + """ Send data to wandb """ + result = {k.format(): v for k, v in epoch_info.result.items()} + wandb.log(row=result, step=epoch_info.global_epoch_idx) + + +def create(model_config): + """ Vel factory function """ + return WandbStreaming(model_config) From b2b60d42aec35b5f5647af3e968addf95d9cdedc Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Fri, 18 Oct 2019 18:18:53 -0700 Subject: [PATCH 133/162] Added wandb settings to git ignore. --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index b0a800fd..e0b8259c 100644 --- a/.gitignore +++ b/.gitignore @@ -117,3 +117,6 @@ environment.yaml # Test cache /.pytest_cache + +# WANDB settings +/wandb From e3e211fc393856869359b3d6e84e901dd89f722f Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Fri, 18 Oct 2019 18:58:57 -0700 Subject: [PATCH 134/162] Added faster VAE NLL command. --- .../autoencoder/mnist/mnist_cnn_ae.yaml | 2 +- .../cifar10/cifar10_cnn_01.yaml | 2 +- .../cifar10/cifar10_resnetv1_110.yaml | 2 +- .../cifar10/cifar10_resnetv1_32.yaml | 2 +- .../cifar10/cifar10_resnetv2_110.yaml | 2 +- .../cifar10_resnetv2_164_bottleneck.yaml | 2 +- .../cifar10/cifar10_resnetv2_32.yaml | 2 +- .../cifar10/cifar10_resnext_29_c1.yaml | 2 +- .../cifar10/cifar10_resnext_29_c8.yaml | 2 +- .../cats_vs_dogs_resnet34.yaml | 2 +- .../classification/mnist/mnist_cnn_01.yaml | 2 +- examples-configs/gan/mnist/mnist_gan.yaml | 2 +- .../latent/mnist/mnist_cnn_iwae.yaml | 2 +- .../latent/mnist/mnist_cnn_vae.yaml | 2 +- .../latent/mnist/mnist_cnn_vq_vae.yaml | 2 +- .../latent/mnist/mnist_fc_iwae.yaml | 2 +- .../latent/mnist/mnist_fc_vae.yaml | 9 +++- .../latent/omniglot/omniglot_cnn_vae.yaml | 2 +- .../latent/omniglot/omniglot_fc_vae.yaml | 2 +- .../classification/imdb_sentiment_gru.yaml | 2 +- .../nlp/generation/gen_shakespeare.yaml | 2 +- vel/command/augvis_command.py | 2 +- vel/command/latent/__init__.py | 0 vel/command/latent/vae_nll.py | 53 +++++++++++++++++++ vel/data/__init__.py | 1 - vel/data/loader/__init__.py | 3 ++ vel/data/{ => loader}/bucket_loader.py | 21 +++++--- vel/data/{ => loader}/dataset_loader.py | 11 ++-- .../{ => loader}/text_character_loader.py | 17 +++--- vel/model/latent/vae_base.py | 29 +++++----- vel/train/phase/generic.py | 2 +- vel/train/train_phase.py | 2 +- vel/train/trainer.py | 2 +- 33 files changed, 133 insertions(+), 59 deletions(-) create mode 100644 vel/command/latent/__init__.py create mode 100644 vel/command/latent/vae_nll.py create mode 100644 vel/data/loader/__init__.py rename vel/data/{ => loader}/bucket_loader.py (81%) rename vel/data/{ => loader}/dataset_loader.py (88%) rename vel/data/{ => loader}/text_character_loader.py (88%) diff --git a/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml b/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml index 2591cc04..897bafaf 100644 --- a/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml +++ b/examples-configs/autoencoder/mnist/mnist_cnn_ae.yaml @@ -15,7 +15,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_cnn_01.yaml b/examples-configs/classification/cifar10/cifar10_cnn_01.yaml index e6292546..d192c20b 100644 --- a/examples-configs/classification/cifar10/cifar10_cnn_01.yaml +++ b/examples-configs/classification/cifar10/cifar10_cnn_01.yaml @@ -14,7 +14,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml b/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml index 3ce7feb8..c6f45b84 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml @@ -15,7 +15,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml b/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml index 935b8277..2cd2fedb 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml @@ -15,7 +15,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml b/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml index f0bd7291..fb0213d6 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml @@ -14,7 +14,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml b/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml index a7ff1491..a24785bf 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml @@ -16,7 +16,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml b/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml index 60ebf5ad..2d67a653 100644 --- a/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml @@ -14,7 +14,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml b/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml index c007c5fe..601a9a27 100644 --- a/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml @@ -19,7 +19,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml b/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml index 6e90611d..632a5f45 100644 --- a/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml +++ b/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml @@ -19,7 +19,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 diff --git a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml index da2f8b8f..83784841 100644 --- a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml +++ b/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml @@ -16,7 +16,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader num_workers: 8 batch_size: 64 diff --git a/examples-configs/classification/mnist/mnist_cnn_01.yaml b/examples-configs/classification/mnist/mnist_cnn_01.yaml index d11b5742..f23a96c6 100644 --- a/examples-configs/classification/mnist/mnist_cnn_01.yaml +++ b/examples-configs/classification/mnist/mnist_cnn_01.yaml @@ -14,7 +14,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 # num_workers: 4 diff --git a/examples-configs/gan/mnist/mnist_gan.yaml b/examples-configs/gan/mnist/mnist_gan.yaml index 4de4ecce..6a3ea519 100644 --- a/examples-configs/gan/mnist/mnist_gan.yaml +++ b/examples-configs/gan/mnist/mnist_gan.yaml @@ -14,7 +14,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 # num_workers: 4 # pin_memory: true diff --git a/examples-configs/latent/mnist/mnist_cnn_iwae.yaml b/examples-configs/latent/mnist/mnist_cnn_iwae.yaml index 90cb5da7..56e86038 100644 --- a/examples-configs/latent/mnist/mnist_cnn_iwae.yaml +++ b/examples-configs/latent/mnist/mnist_cnn_iwae.yaml @@ -17,7 +17,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 pin_memory: true diff --git a/examples-configs/latent/mnist/mnist_cnn_vae.yaml b/examples-configs/latent/mnist/mnist_cnn_vae.yaml index 118ad430..11debfb0 100644 --- a/examples-configs/latent/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/latent/mnist/mnist_cnn_vae.yaml @@ -16,7 +16,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 256 num_workers: 4 pin_memory: true diff --git a/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml b/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml index bab34608..e99c7703 100644 --- a/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml +++ b/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml @@ -18,7 +18,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 # num_workers: 4 # pin_memory: true diff --git a/examples-configs/latent/mnist/mnist_fc_iwae.yaml b/examples-configs/latent/mnist/mnist_fc_iwae.yaml index 215906dd..46111a50 100644 --- a/examples-configs/latent/mnist/mnist_fc_iwae.yaml +++ b/examples-configs/latent/mnist/mnist_fc_iwae.yaml @@ -17,7 +17,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 pin_memory: true diff --git a/examples-configs/latent/mnist/mnist_fc_vae.yaml b/examples-configs/latent/mnist/mnist_fc_vae.yaml index 1fa51447..d9a64b4b 100644 --- a/examples-configs/latent/mnist/mnist_fc_vae.yaml +++ b/examples-configs/latent/mnist/mnist_fc_vae.yaml @@ -15,7 +15,7 @@ source: name: vel.data.source.vision.mnist loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 pin_memory: true @@ -42,4 +42,9 @@ scheduler: commands: train: name: vel.command.train_command - epochs: 3280 \ No newline at end of file + epochs: 3280 + + nll: + name: vel.command.latent.vae_nll + max_batch: 10_000 + samples: !param samples = 10 \ No newline at end of file diff --git a/examples-configs/latent/omniglot/omniglot_cnn_vae.yaml b/examples-configs/latent/omniglot/omniglot_cnn_vae.yaml index 2df6f80b..e952d0e0 100644 --- a/examples-configs/latent/omniglot/omniglot_cnn_vae.yaml +++ b/examples-configs/latent/omniglot/omniglot_cnn_vae.yaml @@ -17,7 +17,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 pin_memory: true diff --git a/examples-configs/latent/omniglot/omniglot_fc_vae.yaml b/examples-configs/latent/omniglot/omniglot_fc_vae.yaml index 263a72eb..700ca014 100644 --- a/examples-configs/latent/omniglot/omniglot_fc_vae.yaml +++ b/examples-configs/latent/omniglot/omniglot_fc_vae.yaml @@ -17,7 +17,7 @@ source: loader: - name: vel.data.dataset_loader + name: vel.data.loader.dataset_loader batch_size: 128 num_workers: 4 pin_memory: true diff --git a/examples-configs/nlp/classification/imdb_sentiment_gru.yaml b/examples-configs/nlp/classification/imdb_sentiment_gru.yaml index cb3c9e2a..8fa86a95 100644 --- a/examples-configs/nlp/classification/imdb_sentiment_gru.yaml +++ b/examples-configs/nlp/classification/imdb_sentiment_gru.yaml @@ -7,7 +7,7 @@ source: loader: - name: vel.data.bucket_loader + name: vel.data.loader.bucket_loader batch_size: 32 diff --git a/examples-configs/nlp/generation/gen_shakespeare.yaml b/examples-configs/nlp/generation/gen_shakespeare.yaml index ac7bd121..f27a7161 100644 --- a/examples-configs/nlp/generation/gen_shakespeare.yaml +++ b/examples-configs/nlp/generation/gen_shakespeare.yaml @@ -9,7 +9,7 @@ source: loader: - name: vel.data.text_character_loader + name: vel.data.loader.text_character_loader sequence_length: 128 batch_size: 64 diff --git a/vel/command/augvis_command.py b/vel/command/augvis_command.py index 880830b9..734f5e44 100644 --- a/vel/command/augvis_command.py +++ b/vel/command/augvis_command.py @@ -1,7 +1,7 @@ import matplotlib.pyplot as plt import numpy as np -from vel.data import DatasetLoader +from vel.data.loader import DatasetLoader class AugmentationVisualizationCommand: diff --git a/vel/command/latent/__init__.py b/vel/command/latent/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/command/latent/vae_nll.py b/vel/command/latent/vae_nll.py new file mode 100644 index 00000000..9aafa3c0 --- /dev/null +++ b/vel/command/latent/vae_nll.py @@ -0,0 +1,53 @@ +import numpy as np +import torch +import tqdm + +from vel.api import TrainingInfo + + +class VaeNllCommand: + """ Calculate NLL for the VAE using importance sampling """ + def __init__(self, model_config, model_factory, loader, storage, max_batch: int, samples: int): + self.model_config = model_config + self.model_factory = model_factory + self.loader = loader + self.storage = storage + + self.max_batch = max_batch + self.samples = samples + + @torch.no_grad() + def run(self): + device = self.model_config.torch_device() + model = self.model_factory.instantiate().to(device) + + start_epoch = self.storage.last_epoch_idx() + + training_info = TrainingInfo(start_epoch_idx=start_epoch) + + model_state, hidden_state = self.storage.load(training_info) + model.load_state_dict(model_state) + + model.eval() + + validation_dataset = self.loader.source.validation + + results = [] + + # Always take at least one + batch_size = max(self.max_batch // self.samples, 1) + + for i in tqdm.trange(validation_dataset.num_batches(batch_size)): + batch = validation_dataset.get_batch(i, batch_size)['x'].to(self.model_config.device) + nll = model.nll(batch, num_posterior_samples=self.samples) + + results.append(nll.cpu().numpy()) + + full_results = np.concatenate(results) + + print("NLL: {:.2f}".format(np.mean(full_results))) + + +def create(model_config, model, loader, storage, max_batch: int = 1024, samples: int = 100): + """ Vel factory function """ + return VaeNllCommand(model_config, model, loader, storage, max_batch=max_batch, samples=samples) diff --git a/vel/data/__init__.py b/vel/data/__init__.py index 122edbd6..3a6245c7 100644 --- a/vel/data/__init__.py +++ b/vel/data/__init__.py @@ -1,2 +1 @@ from .dataflow import DataFlow -from .dataset_loader import DatasetLoader diff --git a/vel/data/loader/__init__.py b/vel/data/loader/__init__.py new file mode 100644 index 00000000..57122da0 --- /dev/null +++ b/vel/data/loader/__init__.py @@ -0,0 +1,3 @@ +from .dataset_loader import DatasetLoader +from .bucket_loader import BucketLoader +from .text_character_loader import TextCharacterLoader diff --git a/vel/data/bucket_loader.py b/vel/data/loader/bucket_loader.py similarity index 81% rename from vel/data/bucket_loader.py rename to vel/data/loader/bucket_loader.py index 740d83c1..f587d4a6 100644 --- a/vel/data/bucket_loader.py +++ b/vel/data/loader/bucket_loader.py @@ -8,12 +8,12 @@ class BucketLoader: """ Loads sequence data from a source and batches together examples of similar length """ def __init__(self, model_config: ModelConfig, source: LanguageSource, batch_size: int): - self.source = source + self._source = source self.batch_size = batch_size - if self.source.test is None: + if self._source.test is None: self.train_loader, self.val_loader = data.BucketIterator.splits( - (self.source.train, self.source.validation), + (self._source.train, self._source.validation), batch_size=batch_size, device=model_config.torch_device(), shuffle=True @@ -21,17 +21,17 @@ def __init__(self, model_config: ModelConfig, source: LanguageSource, batch_size self.test_loader = None else: self.train_loader, self.val_loader, self.test_loader = data.BucketIterator.splits( - (self.source.train, self.source.validation, self.source.test), + (self._source.train, self._source.validation, self._source.test), batch_size=batch_size, device=model_config.torch_device(), shuffle=True ) - self.train_loader = IteratorDictWrapper(self.train_loader, self.source.mapping) - self.val_loader = IteratorDictWrapper(self.val_loader, self.source.mapping) + self.train_loader = IteratorDictWrapper(self.train_loader, self._source.mapping) + self.val_loader = IteratorDictWrapper(self.val_loader, self._source.mapping) if self.test_loader: - self.test_loader = IteratorDictWrapper(self.test_loader, self.source.mapping) + self.test_loader = IteratorDictWrapper(self.test_loader, self._source.mapping) self._loaders = { 'train': self.train_loader, @@ -48,6 +48,11 @@ def __init__(self, model_config: ModelConfig, source: LanguageSource, batch_size def __getitem__(self, item): return self._loaders[item] + @property + def source(self): + """ Return the source for this loader """ + return self._source + @property def loader(self): """ Get a dict of loaders """ @@ -61,7 +66,7 @@ def size(self): @property def alphabet_size(self): """ Size of the text alphabet """ - return self.source.metadata.get('alphabet_size', 0) + return self._source.metadata.get('alphabet_size', 0) def create(model_config: ModelConfig, source: LanguageSource, batch_size: int): diff --git a/vel/data/dataset_loader.py b/vel/data/loader/dataset_loader.py similarity index 88% rename from vel/data/dataset_loader.py rename to vel/data/loader/dataset_loader.py index 59be7841..aa37fd62 100644 --- a/vel/data/dataset_loader.py +++ b/vel/data/loader/dataset_loader.py @@ -3,7 +3,7 @@ from vel.api import Source -from .dataflow import DataFlow +from vel.data.dataflow import DataFlow class DatasetLoader: @@ -11,14 +11,14 @@ class DatasetLoader: def __init__(self, source: Source, batch_size: int, num_workers: int, transformations: typing.Optional[list] = None, pin_memory=False): - self.source = source + self._source = source self.batch_size = batch_size self.num_workers = num_workers self.transformations = transformations self.pin_memory = pin_memory if transformations is not None: - self.transformed_source = DataFlow.transform(self.source, transformations) + self.transformed_source = DataFlow.transform(self._source, transformations) else: self.transformed_source = source @@ -54,6 +54,11 @@ def __init__(self, source: Source, batch_size: int, num_workers: int, def __getitem__(self, item): return self._loaders[item] + @property + def source(self): + """ Return the source for this loader """ + return self.transformed_source + @property def loader(self): """ Get a dict of loaders """ diff --git a/vel/data/text_character_loader.py b/vel/data/loader/text_character_loader.py similarity index 88% rename from vel/data/text_character_loader.py rename to vel/data/loader/text_character_loader.py index 5eafc95b..d37e095f 100644 --- a/vel/data/text_character_loader.py +++ b/vel/data/loader/text_character_loader.py @@ -81,18 +81,18 @@ class TextCharacterLoader: """ Loader for the text character data source """ def __init__(self, source, sequence_length: int, batch_size: int): - self.source = source + self._source = source self.sequence_length = sequence_length self.batch_size = batch_size - self.alphabet = self.source.metadata['alphabet'] + self.alphabet = self._source.metadata['alphabet'] - self.train_loader = TextLoader(self.source.train, self.sequence_length, self.batch_size, len(self.alphabet)) - self.val_loader = TextLoader(self.source.validation, self.sequence_length, self.batch_size, len(self.alphabet)) + self.train_loader = TextLoader(self._source.train, self.sequence_length, self.batch_size, len(self.alphabet)) + self.val_loader = TextLoader(self._source.validation, self.sequence_length, self.batch_size, len(self.alphabet)) - if self.source.test is None: + if self._source.test is None: self.test_loader = None else: - self.test_loader = TextLoader(self.source.test, self.sequence_length, self.batch_size, len(self.alphabet)) + self.test_loader = TextLoader(self._source.test, self.sequence_length, self.batch_size, len(self.alphabet)) self._loaders = { 'train': self.train_loader, @@ -109,6 +109,11 @@ def __init__(self, source, sequence_length: int, batch_size: int): def __getitem__(self, item): return self._loaders[item] + @property + def source(self): + """ Return source for this loader """ + return self._source + @property def alphabet_size(self): """ Size of the text alphabet """ diff --git a/vel/model/latent/vae_base.py b/vel/model/latent/vae_base.py index 75562b42..4c3509ab 100644 --- a/vel/model/latent/vae_base.py +++ b/vel/model/latent/vae_base.py @@ -129,8 +129,6 @@ def nll(self, sample: torch.Tensor, num_posterior_samples: int = 1): """ assert num_posterior_samples >= 1, "Need at least one posterior sample" - buffer = [] - encoded = self.encoder_network(sample) z_dist = self.encoder_distribution(encoded) prior = self.prior_distribution() @@ -138,24 +136,25 @@ def nll(self, sample: torch.Tensor, num_posterior_samples: int = 1): if self.analytical_kl_div: kl_divergence = dist.kl_divergence(z_dist, prior) - for i in range(num_posterior_samples): - z = z_dist.rsample() - decoded = self.decoder_network(z) - x_dist = self.decoder_distribution(decoded) + bs = encoded.size(0) + z = z_dist.rsample((num_posterior_samples,)) - if not self.analytical_kl_div: - lpz = prior.log_prob(z) - lqzx = z_dist.log_prob(z) - kl_divergence = -lpz + lqzx + # Reshape, decode, reshape + z_reshaped = z.view([bs * num_posterior_samples] + list(z.shape[2:])) + decoded = self.decoder_network(z_reshaped) + decoded = decoded.view([num_posterior_samples, bs] + list(decoded.shape[1:])) - likelihood = x_dist.log_prob(sample) - elbo = likelihood - kl_divergence + x_dist = self.decoder_distribution(decoded) - buffer.append(elbo) + if not self.analytical_kl_div: + lpz = prior.log_prob(z) + lqzx = z_dist.log_prob(z) + kl_divergence = -lpz + lqzx - averaged = self.log_mean_exp(torch.stack(buffer, dim=-1), dim=-1) + likelihood = x_dist.log_prob(sample) + elbo = likelihood - kl_divergence - return -averaged + return -self.log_mean_exp(elbo, dim=0) #################################################################################################################### # Utility methods diff --git a/vel/train/phase/generic.py b/vel/train/phase/generic.py index 00c86660..4619a390 100644 --- a/vel/train/phase/generic.py +++ b/vel/train/phase/generic.py @@ -1,5 +1,5 @@ from vel.api import TrainingInfo, EpochInfo, OptimizedModel -from vel.data import DatasetLoader +from vel.data.loader import DatasetLoader from vel.train import TrainPhase diff --git a/vel/train/train_phase.py b/vel/train/train_phase.py index 13733d12..8f7ff84f 100644 --- a/vel/train/train_phase.py +++ b/vel/train/train_phase.py @@ -1,5 +1,5 @@ from vel.api import TrainingInfo, EpochInfo, Model, VelOptimizer -from vel.data import DatasetLoader +from vel.data.loader import DatasetLoader from .trainer import Trainer diff --git a/vel/train/trainer.py b/vel/train/trainer.py index 7d151b18..2a18266f 100644 --- a/vel/train/trainer.py +++ b/vel/train/trainer.py @@ -4,7 +4,7 @@ import tqdm from vel.api import OptimizedModel, TrainingInfo, EpochInfo, BatchInfo -from vel.data import DatasetLoader +from vel.data.loader import DatasetLoader from vel.util.tensor_util import to_device From 8923844cdf92e115e324c3f832b649495025d5f2 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Fri, 18 Oct 2019 19:01:55 -0700 Subject: [PATCH 135/162] Fixed issue in train command. --- vel/command/train_command.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vel/command/train_command.py b/vel/command/train_command.py index 9e94450c..bbe45e8b 100644 --- a/vel/command/train_command.py +++ b/vel/command/train_command.py @@ -1,9 +1,9 @@ import typing import vel.api as api -import vel.data as data import vel.train as train +from vel.data.loader import DatasetLoader from vel.metric.samples_per_sec import SamplesPerSec from vel.callback.time_tracker import TimeTracker from vel.callback.sample_tracker import SampleTracker @@ -14,7 +14,7 @@ class SimpleTrainCommand: def __init__(self, epochs: int, model_config: api.ModelConfig, model_factory: api.ModuleFactory, optimizer_factory: api.OptimizerFactory, scheduler_factory: typing.Optional[api.SchedulerFactory], - loader: data.DatasetLoader, storage: api.Storage, + loader: DatasetLoader, storage: api.Storage, callbacks: typing.Optional[typing.List[api.Callback]]): self.epochs = epochs self.model_config = model_config From 3eb8d2ffd4916070ac7fae37d3075f3fc33f579d Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 19 Oct 2019 12:29:42 -0700 Subject: [PATCH 136/162] Remove omniglot VAE examples. --- .../latent/omniglot/omniglot_cnn_vae.yaml | 49 ----------------- .../latent/omniglot/omniglot_fc_vae.yaml | 54 ------------------- 2 files changed, 103 deletions(-) delete mode 100644 examples-configs/latent/omniglot/omniglot_cnn_vae.yaml delete mode 100644 examples-configs/latent/omniglot/omniglot_fc_vae.yaml diff --git a/examples-configs/latent/omniglot/omniglot_cnn_vae.yaml b/examples-configs/latent/omniglot/omniglot_cnn_vae.yaml deleted file mode 100644 index e952d0e0..00000000 --- a/examples-configs/latent/omniglot/omniglot_cnn_vae.yaml +++ /dev/null @@ -1,49 +0,0 @@ -name: 'omniglot_cnn_vae' - - -model: - name: vel.model.latent.cnn_vae - img_rows: 28 - img_cols: 28 - img_channels: 1 - channels: [64, 128, 256] - representation_length: 50 - max_grad_norm: 1.0 - analytical_kl_div: true - - -source: - name: vel.data.source.vision.omniglot - - -loader: - name: vel.data.loader.dataset_loader - batch_size: 128 - num_workers: 4 - pin_memory: true - - transformations: - - name: vel.data.transformation.pil_resize - shape: [28, 28] - - name: vel.data.transformation.to_array - - name: vel.data.transformation.binarize_image - - name: vel.data.transformation.image_to_tensor - - name: vel.data.transformation.unsupervised - - -optimizer: - name: vel.optimizer.radam - lr: 1.0e-3 - eps: 1.0e-4 - - -commands: - augvis: - name: vel.command.augvis_command - samples: 5 - cases: 3 - - train: - name: vel.command.train_command - epochs: 3280 - diff --git a/examples-configs/latent/omniglot/omniglot_fc_vae.yaml b/examples-configs/latent/omniglot/omniglot_fc_vae.yaml deleted file mode 100644 index 700ca014..00000000 --- a/examples-configs/latent/omniglot/omniglot_fc_vae.yaml +++ /dev/null @@ -1,54 +0,0 @@ -name: 'omniglot_fc_vae' - - -model: - name: vel.model.latent.fc_vae - img_rows: 28 - img_cols: 28 - img_channels: 1 - layers: [200, 200] - representation_length: 50 - max_grad_norm: 1.0 - analytical_kl_div: true - - -source: - name: vel.data.source.vision.omniglot - - -loader: - name: vel.data.loader.dataset_loader - batch_size: 128 - num_workers: 4 - pin_memory: true - - transformations: - - name: vel.data.transformation.pil_resize - shape: [28, 28] - - name: vel.data.transformation.to_array - - name: vel.data.transformation.binarize_image - - name: vel.data.transformation.image_to_tensor - - name: vel.data.transformation.unsupervised - - -optimizer: - name: vel.optimizer.radam - lr: 1.0e-3 - eps: 1.0e-4 - - -scheduler: - name: vel.scheduler.multi_step - gamma: 0.71968 # 10 * (-1/7) - milestones: [ 1, 4, 13, 40, 121, 364, 1093, 3280] - - -commands: - augvis: - name: vel.command.augvis_command - samples: 5 - cases: 3 - - train: - name: vel.command.train_command - epochs: 3280 From da8a3b7d6440483e17e5a844950d557b06e18e15 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 20 Oct 2019 12:40:01 -0700 Subject: [PATCH 137/162] Initial benchmarks. --- README.md | 6 +++- docs/Benchmarks.md | 26 ++++++++++++++++ .../latent/mnist/mnist_cnn_iwae.yaml | 1 + .../latent/mnist/mnist_cnn_vae.yaml | 5 ++-- .../latent/mnist/mnist_cnn_vq_vae.yaml | 1 + .../latent/mnist/mnist_fc_iwae.yaml | 4 ++- .../latent/mnist/mnist_fc_vae.yaml | 4 ++- vel/model/latent/fc_vae.py | 30 ++++++++++--------- 8 files changed, 58 insertions(+), 19 deletions(-) create mode 100644 docs/Benchmarks.md diff --git a/README.md b/README.md index 22075ae9..29681425 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,9 @@ If that's not the case few bits of custom glue code should do the job. This repository is still in an early stage of that journey but it will grow as I'll be putting work into it. +For up-to-date benchmarks, look here: +[Benchmarks](docs/Benchmarks.md) + # Blogposts @@ -59,7 +62,7 @@ If you want to run YAML config examples, you'll also need a **project configurat `.velproject.yaml`. An example is included in this repository. Default project configuration writes logs to the tensorboard directory `output/tensorboard` -under the main directory. Outputs to visdom and mongodb are also implemented. +under the main directory. Output modules to visdom, mongodb and wandb are also implemented. If you don't want any logging, there is included another example file `.velproject.dummy.yaml` that writes training progress to the standard output only. @@ -77,6 +80,7 @@ To use it, just rename it to `.velproject.yaml`. understand what exactly the model is doing for newcomers already comfortable with PyTorch. - All state-of-the-art models should be implemented in the framework with accuracy matching published results. + For up-to-date benchmarks, look here: [Benchmarks](docs/Benchmarks.md) - All common deep learning workflows should be fast to implement, while uncommon ones should be possible, at least as far as PyTorch allows. diff --git a/docs/Benchmarks.md b/docs/Benchmarks.md new file mode 100644 index 00000000..274224fa --- /dev/null +++ b/docs/Benchmarks.md @@ -0,0 +1,26 @@ +# Benchmarks + +In this file I'll gather up to date benchmarking results for examples included in this repository. + +Levels of hierarchy will be first task, then dataset (benchmark) and then table listing model results of +relevant metrics. + +Each metric I'll try to average over six runs and provide mean and standard deviation of results. + + +## Generative models + + +### Binarized MNIST + + +For VAE models, I'll include upper bound for Negative Log Likelihood (NLL) for given number of importance samples (IS). + + +| Model | NLL (IS=1) | NLL (IS=100) | NLL (IS=5000) | +| ----- | ---------- | ------------ | ------------- | +| FC VAE | 90.98 ± 0.14 | 87.07 ± 0.18 | 86.93 ± 0.18 | +| CNN VAE | +| FC IWAE | +| CNN IWAE | + diff --git a/examples-configs/latent/mnist/mnist_cnn_iwae.yaml b/examples-configs/latent/mnist/mnist_cnn_iwae.yaml index 56e86038..fc3d39fd 100644 --- a/examples-configs/latent/mnist/mnist_cnn_iwae.yaml +++ b/examples-configs/latent/mnist/mnist_cnn_iwae.yaml @@ -33,6 +33,7 @@ optimizer: name: vel.optimizer.radam lr: 1.0e-3 eps: 1.0e-4 + max_grad_norm: 1.0 scheduler: diff --git a/examples-configs/latent/mnist/mnist_cnn_vae.yaml b/examples-configs/latent/mnist/mnist_cnn_vae.yaml index 11debfb0..56340d6b 100644 --- a/examples-configs/latent/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/latent/mnist/mnist_cnn_vae.yaml @@ -18,8 +18,8 @@ source: loader: name: vel.data.loader.dataset_loader batch_size: 256 - num_workers: 4 - pin_memory: true +# num_workers: 4 +# pin_memory: true transformations: - name: vel.data.transformation.to_array @@ -45,6 +45,7 @@ optimizer: name: vel.optimizer.radam lr: 1.0e-3 eps: 1.0e-4 + max_grad_norm: 1.0 scheduler: diff --git a/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml b/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml index e99c7703..8e1dbed1 100644 --- a/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml +++ b/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml @@ -33,6 +33,7 @@ optimizer: name: vel.optimizer.radam lr: 1.0e-3 eps: 1.0e-4 + max_grad_norm: 1.0 scheduler: diff --git a/examples-configs/latent/mnist/mnist_fc_iwae.yaml b/examples-configs/latent/mnist/mnist_fc_iwae.yaml index 46111a50..48fa116f 100644 --- a/examples-configs/latent/mnist/mnist_fc_iwae.yaml +++ b/examples-configs/latent/mnist/mnist_fc_iwae.yaml @@ -32,7 +32,9 @@ loader: optimizer: name: vel.optimizer.radam lr: 1.0e-3 - eps: 1.0e-4 + eps: 1.0e- + max_grad_norm: 1.0 + scheduler: diff --git a/examples-configs/latent/mnist/mnist_fc_vae.yaml b/examples-configs/latent/mnist/mnist_fc_vae.yaml index d9a64b4b..e61c2028 100644 --- a/examples-configs/latent/mnist/mnist_fc_vae.yaml +++ b/examples-configs/latent/mnist/mnist_fc_vae.yaml @@ -14,6 +14,7 @@ model: source: name: vel.data.source.vision.mnist + loader: name: vel.data.loader.dataset_loader batch_size: 128 @@ -29,8 +30,9 @@ loader: optimizer: name: vel.optimizer.radam - lr: 1.0e-3 + lr: 1.0e-2 eps: 1.0e-4 + max_grad_norm: 1.0 scheduler: diff --git a/vel/model/latent/fc_vae.py b/vel/model/latent/fc_vae.py index 701c5717..8c5fbad4 100644 --- a/vel/model/latent/fc_vae.py +++ b/vel/model/latent/fc_vae.py @@ -1,7 +1,10 @@ +import itertools as it + import torch import torch.distributions as dist import torch.nn as nn import torch.nn.functional as F +import torch.nn.init as init from vel.api import ModuleFactory from vel.module.layers import Flatten, Reshape @@ -75,20 +78,19 @@ def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ return decoded -# import torch.nn.init as init -# @staticmethod -# def _weight_initializer(tensor): -# init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('tanh')) -# init.constant_(tensor.bias, 0.01) -# -# def reset_weights(self): -# for m in it.chain(self.encoder, self.decoder): -# if isinstance(m, nn.Conv2d): -# self._weight_initializer(m) -# elif isinstance(m, nn.ConvTranspose2d): -# self._weight_initializer(m) -# elif isinstance(m, nn.Linear): -# self._weight_initializer(m) + @staticmethod + def _weight_initializer(tensor): + init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('tanh')) + init.constant_(tensor.bias, 0.01) + + def reset_weights(self): + for m in it.chain(self.encoder, self.decoder): + if isinstance(m, nn.Conv2d): + self._weight_initializer(m) + elif isinstance(m, nn.ConvTranspose2d): + self._weight_initializer(m) + elif isinstance(m, nn.Linear): + self._weight_initializer(m) def create(img_rows, img_cols, img_channels, layers=None, representation_length=32, From a6acbbad7367cfe815fe690e9d49b985ea581442 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 20 Oct 2019 12:50:49 -0700 Subject: [PATCH 138/162] Moved configs around. --- .../{gan => generative-adversarial}/mnist/mnist_gan.yaml | 0 .../{latent => generative-likelihood}/mnist/mnist_cnn_iwae.yaml | 0 .../{latent => generative-likelihood}/mnist/mnist_cnn_vae.yaml | 0 .../{latent => generative-likelihood}/mnist/mnist_cnn_vq_vae.yaml | 0 .../{latent => generative-likelihood}/mnist/mnist_fc_iwae.yaml | 0 .../{latent => generative-likelihood}/mnist/mnist_fc_vae.yaml | 0 .../cats_vs_dogs}/cats_vs_dogs_resnet34.yaml | 0 .../cifar10/cifar10_cnn_01.yaml | 0 .../cifar10/cifar10_resnetv1_110.yaml | 0 .../cifar10/cifar10_resnetv1_32.yaml | 0 .../cifar10/cifar10_resnetv2_110.yaml | 0 .../cifar10/cifar10_resnetv2_164_bottleneck.yaml | 0 .../cifar10/cifar10_resnetv2_32.yaml | 0 .../cifar10/cifar10_resnext_29_c1.yaml | 0 .../cifar10/cifar10_resnext_29_c8.yaml | 0 .../mnist/mnist_cnn_01.yaml | 0 .../shakespeare}/gen_shakespeare.yaml | 0 .../imdb_sentiment_gru.yaml | 0 18 files changed, 0 insertions(+), 0 deletions(-) rename examples-configs/{gan => generative-adversarial}/mnist/mnist_gan.yaml (100%) rename examples-configs/{latent => generative-likelihood}/mnist/mnist_cnn_iwae.yaml (100%) rename examples-configs/{latent => generative-likelihood}/mnist/mnist_cnn_vae.yaml (100%) rename examples-configs/{latent => generative-likelihood}/mnist/mnist_cnn_vq_vae.yaml (100%) rename examples-configs/{latent => generative-likelihood}/mnist/mnist_fc_iwae.yaml (100%) rename examples-configs/{latent => generative-likelihood}/mnist/mnist_fc_vae.yaml (100%) rename examples-configs/{classification/imagenet_transfer => image-classification/cats_vs_dogs}/cats_vs_dogs_resnet34.yaml (100%) rename examples-configs/{classification => image-classification}/cifar10/cifar10_cnn_01.yaml (100%) rename examples-configs/{classification => image-classification}/cifar10/cifar10_resnetv1_110.yaml (100%) rename examples-configs/{classification => image-classification}/cifar10/cifar10_resnetv1_32.yaml (100%) rename examples-configs/{classification => image-classification}/cifar10/cifar10_resnetv2_110.yaml (100%) rename examples-configs/{classification => image-classification}/cifar10/cifar10_resnetv2_164_bottleneck.yaml (100%) rename examples-configs/{classification => image-classification}/cifar10/cifar10_resnetv2_32.yaml (100%) rename examples-configs/{classification => image-classification}/cifar10/cifar10_resnext_29_c1.yaml (100%) rename examples-configs/{classification => image-classification}/cifar10/cifar10_resnext_29_c8.yaml (100%) rename examples-configs/{classification => image-classification}/mnist/mnist_cnn_01.yaml (100%) rename examples-configs/{nlp/generation => language-modeling/shakespeare}/gen_shakespeare.yaml (100%) rename examples-configs/{nlp/classification => text-classification}/imdb_sentiment_gru.yaml (100%) diff --git a/examples-configs/gan/mnist/mnist_gan.yaml b/examples-configs/generative-adversarial/mnist/mnist_gan.yaml similarity index 100% rename from examples-configs/gan/mnist/mnist_gan.yaml rename to examples-configs/generative-adversarial/mnist/mnist_gan.yaml diff --git a/examples-configs/latent/mnist/mnist_cnn_iwae.yaml b/examples-configs/generative-likelihood/mnist/mnist_cnn_iwae.yaml similarity index 100% rename from examples-configs/latent/mnist/mnist_cnn_iwae.yaml rename to examples-configs/generative-likelihood/mnist/mnist_cnn_iwae.yaml diff --git a/examples-configs/latent/mnist/mnist_cnn_vae.yaml b/examples-configs/generative-likelihood/mnist/mnist_cnn_vae.yaml similarity index 100% rename from examples-configs/latent/mnist/mnist_cnn_vae.yaml rename to examples-configs/generative-likelihood/mnist/mnist_cnn_vae.yaml diff --git a/examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml b/examples-configs/generative-likelihood/mnist/mnist_cnn_vq_vae.yaml similarity index 100% rename from examples-configs/latent/mnist/mnist_cnn_vq_vae.yaml rename to examples-configs/generative-likelihood/mnist/mnist_cnn_vq_vae.yaml diff --git a/examples-configs/latent/mnist/mnist_fc_iwae.yaml b/examples-configs/generative-likelihood/mnist/mnist_fc_iwae.yaml similarity index 100% rename from examples-configs/latent/mnist/mnist_fc_iwae.yaml rename to examples-configs/generative-likelihood/mnist/mnist_fc_iwae.yaml diff --git a/examples-configs/latent/mnist/mnist_fc_vae.yaml b/examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml similarity index 100% rename from examples-configs/latent/mnist/mnist_fc_vae.yaml rename to examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml diff --git a/examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml b/examples-configs/image-classification/cats_vs_dogs/cats_vs_dogs_resnet34.yaml similarity index 100% rename from examples-configs/classification/imagenet_transfer/cats_vs_dogs_resnet34.yaml rename to examples-configs/image-classification/cats_vs_dogs/cats_vs_dogs_resnet34.yaml diff --git a/examples-configs/classification/cifar10/cifar10_cnn_01.yaml b/examples-configs/image-classification/cifar10/cifar10_cnn_01.yaml similarity index 100% rename from examples-configs/classification/cifar10/cifar10_cnn_01.yaml rename to examples-configs/image-classification/cifar10/cifar10_cnn_01.yaml diff --git a/examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml b/examples-configs/image-classification/cifar10/cifar10_resnetv1_110.yaml similarity index 100% rename from examples-configs/classification/cifar10/cifar10_resnetv1_110.yaml rename to examples-configs/image-classification/cifar10/cifar10_resnetv1_110.yaml diff --git a/examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml b/examples-configs/image-classification/cifar10/cifar10_resnetv1_32.yaml similarity index 100% rename from examples-configs/classification/cifar10/cifar10_resnetv1_32.yaml rename to examples-configs/image-classification/cifar10/cifar10_resnetv1_32.yaml diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml b/examples-configs/image-classification/cifar10/cifar10_resnetv2_110.yaml similarity index 100% rename from examples-configs/classification/cifar10/cifar10_resnetv2_110.yaml rename to examples-configs/image-classification/cifar10/cifar10_resnetv2_110.yaml diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml b/examples-configs/image-classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml similarity index 100% rename from examples-configs/classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml rename to examples-configs/image-classification/cifar10/cifar10_resnetv2_164_bottleneck.yaml diff --git a/examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml b/examples-configs/image-classification/cifar10/cifar10_resnetv2_32.yaml similarity index 100% rename from examples-configs/classification/cifar10/cifar10_resnetv2_32.yaml rename to examples-configs/image-classification/cifar10/cifar10_resnetv2_32.yaml diff --git a/examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml b/examples-configs/image-classification/cifar10/cifar10_resnext_29_c1.yaml similarity index 100% rename from examples-configs/classification/cifar10/cifar10_resnext_29_c1.yaml rename to examples-configs/image-classification/cifar10/cifar10_resnext_29_c1.yaml diff --git a/examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml b/examples-configs/image-classification/cifar10/cifar10_resnext_29_c8.yaml similarity index 100% rename from examples-configs/classification/cifar10/cifar10_resnext_29_c8.yaml rename to examples-configs/image-classification/cifar10/cifar10_resnext_29_c8.yaml diff --git a/examples-configs/classification/mnist/mnist_cnn_01.yaml b/examples-configs/image-classification/mnist/mnist_cnn_01.yaml similarity index 100% rename from examples-configs/classification/mnist/mnist_cnn_01.yaml rename to examples-configs/image-classification/mnist/mnist_cnn_01.yaml diff --git a/examples-configs/nlp/generation/gen_shakespeare.yaml b/examples-configs/language-modeling/shakespeare/gen_shakespeare.yaml similarity index 100% rename from examples-configs/nlp/generation/gen_shakespeare.yaml rename to examples-configs/language-modeling/shakespeare/gen_shakespeare.yaml diff --git a/examples-configs/nlp/classification/imdb_sentiment_gru.yaml b/examples-configs/text-classification/imdb_sentiment_gru.yaml similarity index 100% rename from examples-configs/nlp/classification/imdb_sentiment_gru.yaml rename to examples-configs/text-classification/imdb_sentiment_gru.yaml From d826300ed08415f9f04e46f98b388eda9958005b Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 20 Oct 2019 12:56:24 -0700 Subject: [PATCH 139/162] Improving WANDB bindings. --- vel/storage/streaming/wandb.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/vel/storage/streaming/wandb.py b/vel/storage/streaming/wandb.py index 24bf0297..3db02c75 100644 --- a/vel/storage/streaming/wandb.py +++ b/vel/storage/streaming/wandb.py @@ -7,13 +7,14 @@ class WandbStreaming(Callback): """ Stream live results from training to WandB """ - def __init__(self, model_config: ModelConfig): + def __init__(self, model_config: ModelConfig, project: str): self.model_config = model_config + self.project = project def on_train_begin(self, training_info: TrainingInfo) -> None: wandb.init( job_type='train', - project='vel', + project=self.project, dir=self.model_config.output_dir('wandb'), group=self.model_config.name, name=self.model_config.run_name, @@ -27,6 +28,6 @@ def on_epoch_end(self, epoch_info): wandb.log(row=result, step=epoch_info.global_epoch_idx) -def create(model_config): +def create(model_config, project: str = 'vel'): """ Vel factory function """ - return WandbStreaming(model_config) + return WandbStreaming(model_config, project=project) From 7734186be55a071c19eb6d5a976cd45ac267f116 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 20 Oct 2019 13:19:51 -0700 Subject: [PATCH 140/162] Fixing a bug in metric key initialization. --- vel/api/info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vel/api/info.py b/vel/api/info.py index e76cec24..4177d592 100644 --- a/vel/api/info.py +++ b/vel/api/info.py @@ -122,7 +122,7 @@ def _reset_metrics(self): def value(self, dataset=None): """ Return current dictionary value of the metrics """ from vel.metric import MetricKey - return {MetricKey(dataset, m.name, m.scope): m.value() for m in self.metrics} + return {MetricKey(m.name, m.scope, dataset): m.value() for m in self.metrics} def intermediate_value(self, metric): """ Return an intermediate (inter-epoch) value of a metric """ From 883d95d0917232967b5bc0cbc78612b8d34dc6e4 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 24 Oct 2019 17:53:00 -0700 Subject: [PATCH 141/162] Misc changes to logging and stuff. --- .velproject.yaml | 3 +++ .../mnist/mnist_fc_vae.yaml | 6 ++--- vel/api/callback.py | 4 +-- vel/api/info.py | 4 +-- vel/api/model_config.py | 9 +++++-- vel/callback/time_tracker.py | 4 +-- vel/command/phase_train_command.py | 6 ++--- vel/command/train_command.py | 2 +- vel/model/latent/fc_vae.py | 26 +++++++++---------- vel/model/latent/vae_base.py | 7 ++--- vel/rl/command/rl_train_command.py | 2 +- vel/storage/classic.py | 4 +++ vel/storage/streaming/tensorboard.py | 4 +-- vel/storage/streaming/wandb.py | 26 ++++++++++++++----- 14 files changed, 64 insertions(+), 43 deletions(-) diff --git a/.velproject.yaml b/.velproject.yaml index 5e25ba22..87ba09d0 100644 --- a/.velproject.yaml +++ b/.velproject.yaml @@ -1,3 +1,5 @@ +project_name: 'vel' + storage: name: vel.storage.classic @@ -13,6 +15,7 @@ storage: - name: vel.storage.streaming.tensorboard # - name: vel.storage.streaming.visdom - name: vel.storage.streaming.stdout +# - name: vel.storage.streaming.wandb checkpoint_strategy: diff --git a/examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml b/examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml index e61c2028..b9910753 100644 --- a/examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml +++ b/examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml @@ -30,9 +30,9 @@ loader: optimizer: name: vel.optimizer.radam - lr: 1.0e-2 - eps: 1.0e-4 - max_grad_norm: 1.0 + lr: 1.0e-3 + eps: 1.0e-3 +# max_grad_norm: 10.0 scheduler: diff --git a/vel/api/callback.py b/vel/api/callback.py index 6a4e7819..372ac699 100644 --- a/vel/api/callback.py +++ b/vel/api/callback.py @@ -15,10 +15,10 @@ def on_initialization(self, training_info: TrainingInfo) -> None: """ pass - def on_train_begin(self, training_info: TrainingInfo) -> None: + def on_train_begin(self, training_info: TrainingInfo, model) -> None: """ Beginning of a training process - is run every time a training process is started, even if it's restarted from - a checkpoint. + a checkpoint. Can access the model that is used for this training. """ pass diff --git a/vel/api/info.py b/vel/api/info.py index 4177d592..7dd8e996 100644 --- a/vel/api/info.py +++ b/vel/api/info.py @@ -54,13 +54,13 @@ def initialize(self): for callback in self.callbacks: callback.on_initialization(self) - def on_train_begin(self): + def on_train_begin(self, model): """ Beginning of a training process - is run every time a training process is started, even if it's restarted from a checkpoint. """ for callback in self.callbacks: - callback.on_train_begin(self) + callback.on_train_begin(self, model) def on_train_end(self): """ diff --git a/vel/api/model_config.py b/vel/api/model_config.py index 62bcbdbc..7e091bed 100644 --- a/vel/api/model_config.py +++ b/vel/api/model_config.py @@ -213,13 +213,19 @@ def output_dir(self, *args) -> str: return os.path.join(self.project_dir, self.output_directory_name, *args) def meta_dir(self, *args) -> str: - """ Return directory for openai output files for this model """ + """ Return directory for metadata output files for this model """ return self.output_dir('meta', self.run_name, *args) def data_dir(self, *args) -> str: """ Directory where to store data """ return os.path.normpath(os.path.join(self.project_dir, 'data', *args)) + def model_output_dir(self, *args): + """ Return an output directory of given kind for given kind only """ + fname = os.path.join(self.project_dir, self.output_directory_name, args[0], self.run_name, *(args[1:])) + os.makedirs(os.path.dirname(fname), exist_ok=True) + return fname + def checkpoint_dir(self, *args) -> str: """ Return checkpoint directory for this model """ return self.output_dir('checkpoints', self.run_name, *args) @@ -227,7 +233,6 @@ def checkpoint_dir(self, *args) -> str: def openai_dir(self, *args) -> str: """ Return directory for openai output files for this model """ return self.output_dir('openai', self.run_name, *args) - #################################################################################################################### # NAME UTILITIES @property diff --git a/vel/callback/time_tracker.py b/vel/callback/time_tracker.py index df280213..6e298c50 100644 --- a/vel/callback/time_tracker.py +++ b/vel/callback/time_tracker.py @@ -1,6 +1,6 @@ import time -from vel.api import BatchInfo, TrainingInfo, Callback +from vel.api import BatchInfo, TrainingInfo, Callback, Model class TimeTracker(Callback): @@ -12,7 +12,7 @@ def __init__(self): def on_initialization(self, training_info: TrainingInfo): training_info['time'] = 0.0 - def on_train_begin(self, training_info: TrainingInfo): + def on_train_begin(self, training_info: TrainingInfo, model: Model): self.start_time = time.time() def on_batch_end(self, batch_info: BatchInfo, dataset=None): diff --git a/vel/command/phase_train_command.py b/vel/command/phase_train_command.py index 2670fcac..7992364a 100644 --- a/vel/command/phase_train_command.py +++ b/vel/command/phase_train_command.py @@ -3,9 +3,9 @@ import typing import vel.api as api -import vel.data as data import vel.train as train +from vel.data.loader import DatasetLoader from vel.metric.samples_per_sec import SamplesPerSec from vel.callback.time_tracker import TimeTracker from vel.callback.sample_tracker import SampleTracker @@ -14,7 +14,7 @@ class PhaseTrainCommand: """ Training command - learn according to a set of phases """ - def __init__(self, model_config: api.ModelConfig, model_factory: api.ModuleFactory, loader: data.DatasetLoader, + def __init__(self, model_config: api.ModelConfig, model_factory: api.ModuleFactory, loader: DatasetLoader, storage: api.Storage, phases: typing.List[train.TrainPhase], callbacks=None, restart=True): self.model_config = model_config @@ -72,7 +72,7 @@ def run(self): if training_info.start_epoch_idx > 0: current_phase.restore(training_info, local_idx, trainer.model, hidden_state) - training_info.on_train_begin() + training_info.on_train_begin(trainer.model) for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.full_number_of_epochs + 1): iteration_phase_idx = self._select_phase_right_bound(global_epoch_idx-1) diff --git a/vel/command/train_command.py b/vel/command/train_command.py index bbe45e8b..3ea825e6 100644 --- a/vel/command/train_command.py +++ b/vel/command/train_command.py @@ -37,7 +37,7 @@ def run(self): # Check if training was already started and potentially continue where we left off training_info = self.start_training(trainer, optimizer) - training_info.on_train_begin() + training_info.on_train_begin(trainer.model) for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.epochs + 1): epoch_info = api.EpochInfo( diff --git a/vel/model/latent/fc_vae.py b/vel/model/latent/fc_vae.py index 8c5fbad4..739443d8 100644 --- a/vel/model/latent/fc_vae.py +++ b/vel/model/latent/fc_vae.py @@ -78,19 +78,19 @@ def decoder_sample(self, decoded: torch.Tensor) -> torch.Tensor: """ Sample from a decoder distribution - we ignore that since it's so weak in this case """ return decoded - @staticmethod - def _weight_initializer(tensor): - init.xavier_uniform_(tensor.weight, gain=init.calculate_gain('tanh')) - init.constant_(tensor.bias, 0.01) - - def reset_weights(self): - for m in it.chain(self.encoder, self.decoder): - if isinstance(m, nn.Conv2d): - self._weight_initializer(m) - elif isinstance(m, nn.ConvTranspose2d): - self._weight_initializer(m) - elif isinstance(m, nn.Linear): - self._weight_initializer(m) + # @staticmethod + # def _weight_initializer(tensor): + # init.xavier_normal_(tensor.weight, gain=init.calculate_gain('tanh')) + # init.zeros_(tensor.bias) + # + # def reset_weights(self): + # for m in it.chain(self.encoder.modules(), self.decoder.modules()): + # if isinstance(m, nn.Conv2d): + # self._weight_initializer(m) + # elif isinstance(m, nn.ConvTranspose2d): + # self._weight_initializer(m) + # elif isinstance(m, nn.Linear): + # self._weight_initializer(m) def create(img_rows, img_cols, img_channels, layers=None, representation_length=32, diff --git a/vel/model/latent/vae_base.py b/vel/model/latent/vae_base.py index 4c3509ab..be480e50 100644 --- a/vel/model/latent/vae_base.py +++ b/vel/model/latent/vae_base.py @@ -160,8 +160,5 @@ def nll(self, sample: torch.Tensor, num_posterior_samples: int = 1): # Utility methods def log_mean_exp(self, inputs, dim=1): """ Perform log(mean(exp(data))) in a numerically stable way """ - if inputs.size(dim) == 1: - return inputs - else: - input_max = inputs.max(dim, keepdim=True)[0] - return (inputs - input_max).exp().mean(dim).log() + input_max.squeeze(dim=dim) + input_max = inputs.max(dim, keepdim=True)[0] + return (inputs - input_max).exp().mean(dim).log() + input_max.squeeze(dim=dim) diff --git a/vel/rl/command/rl_train_command.py b/vel/rl/command/rl_train_command.py index b63807fb..b4d10758 100644 --- a/vel/rl/command/rl_train_command.py +++ b/vel/rl/command/rl_train_command.py @@ -70,7 +70,7 @@ def run(self): training_info = self.start_training(reinforcer, optimizer) reinforcer.initialize_training(training_info) - training_info.on_train_begin() + training_info.on_train_begin(reinforcer.policy) global_epoch_idx = training_info.start_epoch_idx + 1 diff --git a/vel/storage/classic.py b/vel/storage/classic.py index de130c52..e61dd91b 100644 --- a/vel/storage/classic.py +++ b/vel/storage/classic.py @@ -135,6 +135,10 @@ def checkpoint_hidden_filename(self, epoch_idx) -> str: """ Return checkpoint filename for this model - hidden state """ return self.model_config.checkpoint_dir('checkpoint_hidden_{:08}.data'.format(epoch_idx)) + def last_checkpoint_filename(self) -> str: + """ return checkpoint filename for the last saved checkpoint """ + return self.checkpoint_filename(self.last_epoch_idx()) + #################################################################################################################### # Internal interface def _persisted_last_epoch(self) -> int: diff --git a/vel/storage/streaming/tensorboard.py b/vel/storage/streaming/tensorboard.py index f75ca570..dde270a7 100644 --- a/vel/storage/streaming/tensorboard.py +++ b/vel/storage/streaming/tensorboard.py @@ -1,7 +1,7 @@ import os import shutil -from vel.api import ModelConfig, Callback, TrainingInfo, EpochInfo +from vel.api import ModelConfig, Callback, TrainingInfo, EpochInfo, Model from torch.utils.tensorboard import SummaryWriter @@ -12,7 +12,7 @@ def __init__(self, model_config: ModelConfig): self.model_config = model_config self.logdir = self.model_config.output_dir('tensorboard', self.model_config.run_name) - def on_train_begin(self, training_info: TrainingInfo) -> None: + def on_train_begin(self, training_info: TrainingInfo, model: Model) -> None: """ Potentially cleanup previous runs """ if training_info.start_epoch_idx == 0: if os.path.exists(self.logdir): diff --git a/vel/storage/streaming/wandb.py b/vel/storage/streaming/wandb.py index 3db02c75..ed104fd3 100644 --- a/vel/storage/streaming/wandb.py +++ b/vel/storage/streaming/wandb.py @@ -1,33 +1,45 @@ import wandb +import yaml -from vel.api import ModelConfig, Callback, TrainingInfo +from vel.api import ModelConfig, Callback, TrainingInfo, Model class WandbStreaming(Callback): """ Stream live results from training to WandB """ - def __init__(self, model_config: ModelConfig, project: str): + def __init__(self, model_config: ModelConfig, register_model: bool = False, write_hyperparams: bool = True): self.model_config = model_config - self.project = project + self.project = self.model_config.provide('project_name') + self.register_model = register_model + self.write_hyperparams = write_hyperparams - def on_train_begin(self, training_info: TrainingInfo) -> None: + def on_train_begin(self, training_info: TrainingInfo, model: Model) -> None: wandb.init( job_type='train', project=self.project, - dir=self.model_config.output_dir('wandb'), + dir=self.model_config.model_output_dir('wandb'), group=self.model_config.name, name=self.model_config.run_name, resume=training_info.start_epoch_idx > 0, tags=[self.model_config.tag] if self.model_config.tag else [] ) + if self.register_model: + wandb.watch(model) + + if self.write_hyperparams: + path = self.model_config.model_output_dir('wandb', 'vel-config.yaml') + with open(path, 'wt') as fp: + yaml.dump(self.model_config.render_configuration(), fp) + wandb.save(path) + def on_epoch_end(self, epoch_info): """ Send data to wandb """ result = {k.format(): v for k, v in epoch_info.result.items()} wandb.log(row=result, step=epoch_info.global_epoch_idx) -def create(model_config, project: str = 'vel'): +def create(model_config, register_model: bool = False): """ Vel factory function """ - return WandbStreaming(model_config, project=project) + return WandbStreaming(model_config, register_model=register_model) From 81e742a9264549510b9ce73afdedc3227a1e0257 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 31 Oct 2019 13:17:28 -0700 Subject: [PATCH 142/162] Example how to run config from file. --- examples-scripts/run_config.py | 17 +++++++++++++++++ vel/api/model_config.py | 11 ++++++++++- vel/launcher.py | 5 +---- 3 files changed, 28 insertions(+), 5 deletions(-) create mode 100644 examples-scripts/run_config.py diff --git a/examples-scripts/run_config.py b/examples-scripts/run_config.py new file mode 100644 index 00000000..18200680 --- /dev/null +++ b/examples-scripts/run_config.py @@ -0,0 +1,17 @@ +import os.path + + +from vel.api import ModelConfig + +project_dir = ModelConfig.find_project_directory(os.getcwd()) + +model_config = ModelConfig.from_file( + filename=os.path.join( + project_dir, 'examples-configs/generative-likelihood/mnist/mnist_cnn_iwae.yaml', + ), + run_number=2, +) + +model_config.set_seed() +model_config.banner('train') +model_config.run_command('train') diff --git a/vel/api/model_config.py b/vel/api/model_config.py index 7e091bed..a33dc9f7 100644 --- a/vel/api/model_config.py +++ b/vel/api/model_config.py @@ -197,8 +197,11 @@ def get_command(self, command_name): """ Return object for given command """ return self.provider.instantiate_from_data(self.command_descriptors[command_name]) - def run_command(self, command_name, varargs): + def run_command(self, command_name, varargs=None): """ Instantiate model class """ + if varargs is None: + varargs = [] + command_descriptor = self.get_command(command_name) return command_descriptor.run(*varargs) @@ -333,3 +336,9 @@ def load_trained_model(self): model.load_state_dict(model_state) return model + + def set_seed(self): + """ Set random seeds """ + # Set seed already in the launcher + from vel.util.random import set_seed + set_seed(self.seed) diff --git a/vel/launcher.py b/vel/launcher.py index 8b537415..cd3a08de 100644 --- a/vel/launcher.py +++ b/vel/launcher.py @@ -51,10 +51,7 @@ def main(): # This needs to be called before any of PyTorch module is imported multiprocessing.set_start_method(multiprocessing_setting) - # Set seed already in the launcher - from vel.util.random import set_seed - set_seed(model_config.seed) - + model_config.set_seed() model_config.banner(args.command) if args.profile: From 73edf972cc08b634fb8a66120913dc3018299f8e Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 2 Nov 2019 20:25:31 -0700 Subject: [PATCH 143/162] Reordering default velproject. --- .velproject.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.velproject.yaml b/.velproject.yaml index 87ba09d0..b9ba7d20 100644 --- a/.velproject.yaml +++ b/.velproject.yaml @@ -13,8 +13,8 @@ storage: streaming: - name: vel.storage.streaming.tensorboard -# - name: vel.storage.streaming.visdom - name: vel.storage.streaming.stdout +# - name: vel.storage.streaming.visdom # - name: vel.storage.streaming.wandb From c21d608b1128fb67a34fc5213d0830aacc461568 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 2 Nov 2019 20:28:11 -0700 Subject: [PATCH 144/162] Updated bibliography --- docs/Bibliography.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/docs/Bibliography.md b/docs/Bibliography.md index 31cfef82..3453e484 100644 --- a/docs/Bibliography.md +++ b/docs/Bibliography.md @@ -14,6 +14,11 @@ on this library: - (Sep 2015) **Importance Weighted Autoencoders** Yuri Burda, Roger Grosse, Ruslan Salakhutdinov https://arxiv.org/abs/1509.00519 + + +- (Nov 2017) **Neural Discrete Representation Learning** + Aaron van den Oord, Oriol Vinyals, Koray Kavukcuoglu + https://arxiv.org/abs/1711.00937 ### Learning rate and optimization @@ -91,10 +96,19 @@ on this library: https://arxiv.org/abs/1707.06887 - (Oct 2017) **Rainbow: Combining Improvements in Deep Reinforcement Learning** - Matteo Hessel, Joseph Modayil, Hado van Hasselt, Tom Schaul, Georg Ostrovski, Will Dabney, Dan Horgan, Bilal Piot, Mohammad Azar, David Silver + Matteo Hessel, Joseph Modayil, Hado van Hasselt, Tom Schaul, Georg Ostrovski, Will Dabney, Dan Horgan, + Bilal Piot, Mohammad Azar, David Silver https://arxiv.org/abs/1710.02298 +### Generative adversarial networks + +- (Jun 2014) **Generative Adversarial Networks** + Ian J. Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, + Yoshua Bengio + https://arxiv.org/abs/1406.2661 + + ### Policy gradient methods From 2a8949a345006131f9c02fdec61f2bdfc01ab10c Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 2 Nov 2019 23:50:31 -0700 Subject: [PATCH 145/162] Aggregate metrics from optimizers in multi-optimzier setup. --- vel/api/optimizer.py | 17 +++++++++++++++-- vel/metric/base/base_metric.py | 5 +++++ vel/model/gan/simple_gan.py | 30 +++++++++++++++++++++++------- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/vel/api/optimizer.py b/vel/api/optimizer.py index e5d8a50c..4b926f53 100644 --- a/vel/api/optimizer.py +++ b/vel/api/optimizer.py @@ -178,7 +178,16 @@ def step(self, closure=None) -> dict: for name, optimizer in self.optimizers.items(): metrics = optimizer.step() - flatten_dict(metrics, output, name) + output[name] = metrics + + return output + + def aggregate_metrics(self, metrics) -> dict: + """ Aggregate metrics from multiple optimizers """ + output = {} + + for key, value in metrics.items(): + flatten_dict(value, output, key) return output @@ -195,7 +204,11 @@ def add_param_group(self, param_group: dict) -> None: def metrics(self) -> list: """ Set of metrics for this model """ # TODO(jerry): aggregate metrics - return [] + return [ + metric.prefix(name) + for name, optimizer in self.optimizers.items() + for metric in optimizer.metrics() + ] class OptimizerFactory: diff --git a/vel/metric/base/base_metric.py b/vel/metric/base/base_metric.py index 8f073247..93bcb7b2 100644 --- a/vel/metric/base/base_metric.py +++ b/vel/metric/base/base_metric.py @@ -42,6 +42,11 @@ def write_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict) """ Potentially store some metric state to the checkpoint """ pass + def prefix(self, prefix: str): + """ Prepend a prefix to the name of the metric """ + self.name = f"{prefix}.{self.name}" + return self + def load_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict) -> None: """ Potentially load some metric state from the checkpoint """ pass diff --git a/vel/model/gan/simple_gan.py b/vel/model/gan/simple_gan.py index a8f56044..eb22e09c 100644 --- a/vel/model/gan/simple_gan.py +++ b/vel/model/gan/simple_gan.py @@ -32,7 +32,7 @@ def block(in_feat, out_feat, normalize=True): *block(256, 512), *block(512, 1024), nn.Linear(1024, int(np.prod(img_shape))), - nn.Tanh() + nn.Sigmoid() ) def forward(self, z): @@ -114,7 +114,7 @@ def optimize(self, data: dict, optimizer: VelMultiOptimizer) -> dict: g_loss = self.adversarial_loss(self.discriminator(gen_imgs), valid) g_loss.backward() - optimizer_G.step() + g_metrics = optimizer_G.step() # --------------------- # Train Discriminator @@ -123,16 +123,28 @@ def optimize(self, data: dict, optimizer: VelMultiOptimizer) -> dict: optimizer_D.zero_grad() # Measure discriminator's ability to classify real from generated samples - real_loss = self.adversarial_loss(self.discriminator(input_data), valid) - fake_loss = self.adversarial_loss(self.discriminator(gen_imgs.detach()), fake) + input_data_prob = self.discriminator(input_data) + generated_images_prob = self.discriminator(gen_imgs.detach()) + + real_loss = self.adversarial_loss(input_data_prob, valid) + fake_loss = self.adversarial_loss(generated_images_prob, fake) + d_loss = (real_loss + fake_loss) / 2 d_loss.backward() - optimizer_D.step() + d_metrics = optimizer_D.step() + + optimizer_metrics = optimizer.aggregate_metrics({ + 'generator': g_metrics, + 'discriminator': d_metrics + }) return { + **optimizer_metrics, 'gen_loss': g_loss.item(), - 'disc_loss': d_loss.item() + 'disc_loss': d_loss.item(), + 'discriminator_real_accuracy': (input_data_prob > 0.5).float().mean().item(), + 'discriminator_fake_accuracy': (generated_images_prob < 0.5).float().mean().item(), } def validate(self, data: dict) -> dict: @@ -142,7 +154,9 @@ def validate(self, data: dict) -> dict: """ return { 'gen_loss': 0.0, - 'disc_loss': 0.0 + 'disc_loss': 0.0, + 'discriminator_real_accuracy': 0.0, + 'discriminator_fake_accuracy': 0.0, } def metrics(self): @@ -150,6 +164,8 @@ def metrics(self): return [ AveragingNamedMetric('gen_loss', scope="train"), AveragingNamedMetric('disc_loss', scope="train"), + AveragingNamedMetric('discriminator_real_accuracy', scope="train"), + AveragingNamedMetric('discriminator_fake_accuracy', scope="train"), ] From d09942fdbc97bbd0180687d3ee6664bd4d7b3ced Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sun, 3 Nov 2019 18:18:59 -0800 Subject: [PATCH 146/162] Add image metrics to tensorboard and somehow stabilize the MNIST GAN. --- .../mnist/mnist_gan.yaml | 6 ++- vel/api/info.py | 2 +- vel/metric/__init__.py | 1 + vel/metric/base/base_metric.py | 5 +++ vel/metric/base/image_metric.py | 40 +++++++++++++++++++ vel/model/gan/simple_gan.py | 22 ++++++++-- vel/storage/streaming/stdout.py | 2 + vel/storage/streaming/tensorboard.py | 26 ++++++++---- vel/storage/streaming/visdom.py | 3 +- vel/storage/streaming/wandb.py | 2 +- 10 files changed, 92 insertions(+), 17 deletions(-) create mode 100644 vel/metric/base/image_metric.py diff --git a/examples-configs/generative-adversarial/mnist/mnist_gan.yaml b/examples-configs/generative-adversarial/mnist/mnist_gan.yaml index 6a3ea519..5a52deae 100644 --- a/examples-configs/generative-adversarial/mnist/mnist_gan.yaml +++ b/examples-configs/generative-adversarial/mnist/mnist_gan.yaml @@ -41,7 +41,9 @@ loader: optimizer: name: vel.optimizer.radam lr: 2.0e-4 - eps: 1.0e-4 + eps: 1.0e-3 + betas: [0.5, 0.999] + max_grad_norm: 5.0 #scheduler: @@ -58,4 +60,4 @@ commands: train: name: vel.command.train_command - epochs: 200 + epochs: 400 diff --git a/vel/api/info.py b/vel/api/info.py index 7dd8e996..48d25883 100644 --- a/vel/api/info.py +++ b/vel/api/info.py @@ -122,7 +122,7 @@ def _reset_metrics(self): def value(self, dataset=None): """ Return current dictionary value of the metrics """ from vel.metric import MetricKey - return {MetricKey(m.name, m.scope, dataset): m.value() for m in self.metrics} + return {MetricKey(m.name, m.scope, dataset, m.metric_type()): m.value() for m in self.metrics} def intermediate_value(self, metric): """ Return an intermediate (inter-epoch) value of a metric """ diff --git a/vel/metric/__init__.py b/vel/metric/__init__.py index 29d9f310..244c8acd 100644 --- a/vel/metric/__init__.py +++ b/vel/metric/__init__.py @@ -3,3 +3,4 @@ AveragingMetric, AveragingNamedMetric, AveragingSupervisedMetric, DefaultAveragingNamedMetric # noqa ) from .base.value_metric import ValueMetric # noqa +from .base.image_metric import RandomImageMetric # noqa diff --git a/vel/metric/base/base_metric.py b/vel/metric/base/base_metric.py index 93bcb7b2..5ff1c10a 100644 --- a/vel/metric/base/base_metric.py +++ b/vel/metric/base/base_metric.py @@ -10,6 +10,7 @@ class MetricKey: name: str scope: str dataset: typing.Optional[str] = None + metric_type: str = 'scalar' def format(self): """ Format a metric key into a string """ @@ -50,3 +51,7 @@ def prefix(self, prefix: str): def load_state_dict(self, training_info: TrainingInfo, hidden_state_dict: dict) -> None: """ Potentially load some metric state from the checkpoint """ pass + + def metric_type(self) -> str: + """ Type of the metric """ + return 'scalar' diff --git a/vel/metric/base/image_metric.py b/vel/metric/base/image_metric.py new file mode 100644 index 00000000..c4ca40da --- /dev/null +++ b/vel/metric/base/image_metric.py @@ -0,0 +1,40 @@ +import numpy as np + +from .base_metric import BaseMetric + + +class ImageMetric(BaseMetric): + """ Metric that logs an image """ + + def metric_type(self) -> str: + return 'image' + + +class RandomImageMetric(ImageMetric): + """ Just pick a random image from the supplied list """ + + def __init__(self, name, scope="general"): + super().__init__(name, scope=scope) + + self.image = None + + def calculate(self, batch_info): + batch = batch_info[self.name] + + if batch is not None: + if len(batch.shape) > 3: + image = batch[np.random.choice(batch.shape[0])] + else: + image = batch + + if image.shape[2] == 1: + image = np.broadcast_to(image, shape=(image.shape[0], image.shape[1], 3)) + + self.image = image + + def reset(self): + self.image = None + + def value(self): + return self.image + diff --git a/vel/model/gan/simple_gan.py b/vel/model/gan/simple_gan.py index eb22e09c..310c41d1 100644 --- a/vel/model/gan/simple_gan.py +++ b/vel/model/gan/simple_gan.py @@ -9,7 +9,7 @@ from vel.api import OptimizedModel, ModuleFactory, OptimizerFactory from vel.api.optimizer import VelMultiOptimizer -from vel.metric import AveragingNamedMetric +from vel.metric import AveragingNamedMetric, RandomImageMetric class Generator(nn.Module): @@ -46,11 +46,18 @@ def __init__(self, img_shape, latent_dim): super(Discriminator, self).__init__() self.model = nn.Sequential( - nn.Linear(int(np.prod(img_shape)), 512), + # nn.Linear(int(np.prod(img_shape)), 512), + # nn.LeakyReLU(0.2, inplace=True), + # nn.Linear(512, 256), + # nn.LeakyReLU(0.2, inplace=True), + # nn.Linear(256, 1), + nn.Linear(int(np.prod(img_shape)), 256), nn.LeakyReLU(0.2, inplace=True), - nn.Linear(512, 256), + nn.Dropout(0.2), + nn.Linear(256, 128), nn.LeakyReLU(0.2, inplace=True), - nn.Linear(256, 1), + nn.Dropout(0.2), + nn.Linear(128, 1), nn.Sigmoid(), ) @@ -139,12 +146,17 @@ def optimize(self, data: dict, optimizer: VelMultiOptimizer) -> dict: 'discriminator': d_metrics }) + # Log images to see how we're doing + np_image = gen_imgs[0].detach().cpu().numpy() + np_image = np.transpose(np_image, (2, 1, 0)) + return { **optimizer_metrics, 'gen_loss': g_loss.item(), 'disc_loss': d_loss.item(), 'discriminator_real_accuracy': (input_data_prob > 0.5).float().mean().item(), 'discriminator_fake_accuracy': (generated_images_prob < 0.5).float().mean().item(), + 'generated_image': np_image } def validate(self, data: dict) -> dict: @@ -157,6 +169,7 @@ def validate(self, data: dict) -> dict: 'disc_loss': 0.0, 'discriminator_real_accuracy': 0.0, 'discriminator_fake_accuracy': 0.0, + 'generated_image': None } def metrics(self): @@ -166,6 +179,7 @@ def metrics(self): AveragingNamedMetric('disc_loss', scope="train"), AveragingNamedMetric('discriminator_real_accuracy', scope="train"), AveragingNamedMetric('discriminator_fake_accuracy', scope="train"), + RandomImageMetric('generated_image', scope='train') ] diff --git a/vel/storage/streaming/stdout.py b/vel/storage/streaming/stdout.py index 7dc18f8e..fc53f23f 100644 --- a/vel/storage/streaming/stdout.py +++ b/vel/storage/streaming/stdout.py @@ -31,11 +31,13 @@ def _print_metrics_line(metrics, dataset=None): metrics_list = [ "{}/{} {:.04f}".format(k.scope, k.name, metrics[k]) for k in sorted([k for k in metrics.keys() if k.dataset is None]) + if k.metric_type == 'scalar' ] else: metrics_list = [ "{}/{} {:.04f}".format(k.scope, k.name, metrics[k]) for k in sorted([k for k in metrics.keys() if k.dataset == dataset]) + if k.metric_type == 'scalar' ] print('{0: <10}'.format(dataset.capitalize()), " ".join(metrics_list)) diff --git a/vel/storage/streaming/tensorboard.py b/vel/storage/streaming/tensorboard.py index dde270a7..52462d90 100644 --- a/vel/storage/streaming/tensorboard.py +++ b/vel/storage/streaming/tensorboard.py @@ -8,9 +8,10 @@ class TensorboardStreaming(Callback): """ Stream results to tensorboard """ - def __init__(self, model_config: ModelConfig): + def __init__(self, model_config: ModelConfig, record_images=True): self.model_config = model_config self.logdir = self.model_config.output_dir('tensorboard', self.model_config.run_name) + self.record_images = record_images def on_train_begin(self, training_info: TrainingInfo, model: Model) -> None: """ Potentially cleanup previous runs """ @@ -32,15 +33,24 @@ def on_epoch_end(self, epoch_info: EpochInfo): if key.dataset == head: tag = '{}/{}'.format(key.scope, key.name) - summary_writer.add_scalar( - tag=tag, - scalar_value=value, - global_step=epoch_info.global_epoch_idx, - ) + if key.metric_type == 'scalar': + summary_writer.add_scalar( + tag=tag, + scalar_value=value, + global_step=epoch_info.global_epoch_idx, + ) + elif key.metric_type == 'image' and self.record_images: + if value is not None: + summary_writer.add_image( + tag=tag, + img_tensor=value, + global_step=epoch_info.global_epoch_idx, + dataformats='WHC' + ) summary_writer.close() -def create(model_config): +def create(model_config, record_images=True): """ Vel factory function """ - return TensorboardStreaming(model_config) + return TensorboardStreaming(model_config, record_images=record_images) diff --git a/vel/storage/streaming/visdom.py b/vel/storage/streaming/visdom.py index b0c31277..c7b95f29 100644 --- a/vel/storage/streaming/visdom.py +++ b/vel/storage/streaming/visdom.py @@ -21,7 +21,8 @@ def __init__(self, model_config: ModelConfig, visdom_settings: VisdomSettings): def on_epoch_end(self, epoch_info): """ Update data in visdom on push """ - metrics_df = pd.DataFrame([epoch_info.result]).set_index('epoch_idx') + result = {k.format(): v for k, v in epoch_info.result.items() if k.metric_type == 'scalar'} + metrics_df = pd.DataFrame([result], index=[epoch_info.global_epoch_idx]) visdom_append_metrics( self.vis, diff --git a/vel/storage/streaming/wandb.py b/vel/storage/streaming/wandb.py index ed104fd3..a0c11d9c 100644 --- a/vel/storage/streaming/wandb.py +++ b/vel/storage/streaming/wandb.py @@ -36,7 +36,7 @@ def on_train_begin(self, training_info: TrainingInfo, model: Model) -> None: def on_epoch_end(self, epoch_info): """ Send data to wandb """ - result = {k.format(): v for k, v in epoch_info.result.items()} + result = {k.format(): v for k, v in epoch_info.result.items() if k.metric_type == 'scalar'} wandb.log(row=result, step=epoch_info.global_epoch_idx) From bda0c39d09707d538712bf58a9729f9f9e758395 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 9 Nov 2019 11:06:52 -0800 Subject: [PATCH 147/162] Updated VAE configs. --- .../generative-likelihood/mnist/mnist_cnn_iwae.yaml | 9 +++++++-- .../generative-likelihood/mnist/mnist_cnn_vae.yaml | 12 ++++++------ .../generative-likelihood/mnist/mnist_fc_iwae.yaml | 11 ++++++++--- .../generative-likelihood/mnist/mnist_fc_vae.yaml | 4 ++-- 4 files changed, 23 insertions(+), 13 deletions(-) diff --git a/examples-configs/generative-likelihood/mnist/mnist_cnn_iwae.yaml b/examples-configs/generative-likelihood/mnist/mnist_cnn_iwae.yaml index fc3d39fd..735f2b7a 100644 --- a/examples-configs/generative-likelihood/mnist/mnist_cnn_iwae.yaml +++ b/examples-configs/generative-likelihood/mnist/mnist_cnn_iwae.yaml @@ -33,7 +33,7 @@ optimizer: name: vel.optimizer.radam lr: 1.0e-3 eps: 1.0e-4 - max_grad_norm: 1.0 + max_grad_norm: 10.0 scheduler: @@ -45,4 +45,9 @@ scheduler: commands: train: name: vel.command.train_command - epochs: 3280 \ No newline at end of file + epochs: 3280 + + nll: + name: vel.command.latent.vae_nll + max_batch: 1_000 + samples: !param samples = 10 diff --git a/examples-configs/generative-likelihood/mnist/mnist_cnn_vae.yaml b/examples-configs/generative-likelihood/mnist/mnist_cnn_vae.yaml index 56340d6b..dc3b20b0 100644 --- a/examples-configs/generative-likelihood/mnist/mnist_cnn_vae.yaml +++ b/examples-configs/generative-likelihood/mnist/mnist_cnn_vae.yaml @@ -45,7 +45,7 @@ optimizer: name: vel.optimizer.radam lr: 1.0e-3 eps: 1.0e-4 - max_grad_norm: 1.0 + max_grad_norm: 10.0 scheduler: @@ -55,11 +55,11 @@ scheduler: commands: - augvis: - name: vel.command.augvis_command - samples: 10 - cases: 5 - train: name: vel.command.train_command epochs: 3280 + + nll: + name: vel.command.latent.vae_nll + max_batch: 1_000 + samples: !param samples = 10 diff --git a/examples-configs/generative-likelihood/mnist/mnist_fc_iwae.yaml b/examples-configs/generative-likelihood/mnist/mnist_fc_iwae.yaml index 48fa116f..83ea2399 100644 --- a/examples-configs/generative-likelihood/mnist/mnist_fc_iwae.yaml +++ b/examples-configs/generative-likelihood/mnist/mnist_fc_iwae.yaml @@ -32,8 +32,8 @@ loader: optimizer: name: vel.optimizer.radam lr: 1.0e-3 - eps: 1.0e- - max_grad_norm: 1.0 + eps: 1.0e-4 + max_grad_norm: 10.0 @@ -46,4 +46,9 @@ scheduler: commands: train: name: vel.command.train_command - epochs: 3280 \ No newline at end of file + epochs: 3280 + + nll: + name: vel.command.latent.vae_nll + max_batch: 10_000 + samples: !param samples = 10 diff --git a/examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml b/examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml index b9910753..56a3991d 100644 --- a/examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml +++ b/examples-configs/generative-likelihood/mnist/mnist_fc_vae.yaml @@ -31,8 +31,8 @@ loader: optimizer: name: vel.optimizer.radam lr: 1.0e-3 - eps: 1.0e-3 -# max_grad_norm: 10.0 + eps: 1.0e-4 + max_grad_norm: 10.0 scheduler: From cb11b38516ba0ad1b908da8eac1447f3b63cb1cf Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 9 Nov 2019 11:31:54 -0800 Subject: [PATCH 148/162] VAE benchmarks. --- docs/Benchmarks.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/Benchmarks.md b/docs/Benchmarks.md index 274224fa..c6555e69 100644 --- a/docs/Benchmarks.md +++ b/docs/Benchmarks.md @@ -17,10 +17,9 @@ Each metric I'll try to average over six runs and provide mean and standard devi For VAE models, I'll include upper bound for Negative Log Likelihood (NLL) for given number of importance samples (IS). -| Model | NLL (IS=1) | NLL (IS=100) | NLL (IS=5000) | -| ----- | ---------- | ------------ | ------------- | -| FC VAE | 90.98 ± 0.14 | 87.07 ± 0.18 | 86.93 ± 0.18 | -| CNN VAE | -| FC IWAE | -| CNN IWAE | - +| Model | NLL (IS=1) |NLL (IS=100)|NLL (IS=5000)| +|-------------:|------------:|-----------:|------------:| +| FC VAE| 90.85 ± 0.20|87.00 ± 0.28| 86.83 ± 0.26| +|FC IWAE (k=50)|100.53 ± 0.62|82.41 ± 0.05| 80.73 ± 0.09| +| CNN VAE| 86.47 ± 0.11|81.33 ± 0.05| 81.02 ± 0.05| +|CNN IWAE (k=5)| 88.44 ± 0.25|78.78 ± 0.05| 77.77 ± 0.06| From b1524bbc4605d50ab302e9e1d5648ccd151d0179 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 9 Nov 2019 12:05:26 -0800 Subject: [PATCH 149/162] Update to the WANDB stream. --- vel/storage/streaming/wandb.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/vel/storage/streaming/wandb.py b/vel/storage/streaming/wandb.py index a0c11d9c..78ea775c 100644 --- a/vel/storage/streaming/wandb.py +++ b/vel/storage/streaming/wandb.py @@ -8,11 +8,13 @@ class WandbStreaming(Callback): """ Stream live results from training to WandB """ - def __init__(self, model_config: ModelConfig, register_model: bool = False, write_hyperparams: bool = True): + def __init__(self, model_config: ModelConfig, register_model: bool = False, write_hyperparams: bool = True, + wandb_config=None): self.model_config = model_config self.project = self.model_config.provide('project_name') self.register_model = register_model self.write_hyperparams = write_hyperparams + self.wandb_config = {} if wandb_config is None else wandb_config def on_train_begin(self, training_info: TrainingInfo, model: Model) -> None: wandb.init( @@ -20,6 +22,7 @@ def on_train_begin(self, training_info: TrainingInfo, model: Model) -> None: project=self.project, dir=self.model_config.model_output_dir('wandb'), group=self.model_config.name, + config=self.wandb_config, name=self.model_config.run_name, resume=training_info.start_epoch_idx > 0, tags=[self.model_config.tag] if self.model_config.tag else [] @@ -40,6 +43,6 @@ def on_epoch_end(self, epoch_info): wandb.log(row=result, step=epoch_info.global_epoch_idx) -def create(model_config, register_model: bool = False): +def create(model_config, register_model: bool = False, wandb_config=None): """ Vel factory function """ - return WandbStreaming(model_config, register_model=register_model) + return WandbStreaming(model_config, register_model=register_model, wandb_config=wandb_config) From 4636bba3f5e95afbd608107466166d80b3a5be85 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 9 Nov 2019 12:05:36 -0800 Subject: [PATCH 150/162] Fixed evaluation command. --- vel/rl/command/evaluate_env_command.py | 39 ++++++++------------------ 1 file changed, 11 insertions(+), 28 deletions(-) diff --git a/vel/rl/command/evaluate_env_command.py b/vel/rl/command/evaluate_env_command.py index 0a27aec6..9596e109 100644 --- a/vel/rl/command/evaluate_env_command.py +++ b/vel/rl/command/evaluate_env_command.py @@ -2,24 +2,22 @@ import pandas as pd import torch import tqdm -import typing from vel.api import ModelConfig, TrainingInfo, Storage, ModuleFactory from vel.rl.api import VecEnvFactory +from vel.rl.util.actor import PolicyActor class EvaluateEnvCommand: """ Record environment playthrough as a game """ def __init__(self, model_config: ModelConfig, env_factory: VecEnvFactory, model_factory: ModuleFactory, - storage: Storage, parallel_envs: int, action_noise: typing.Optional[ModuleFactory], takes: int, - sample_args: dict = None): + storage: Storage, parallel_envs: int, takes: int, sample_args: dict = None): self.model_config = model_config self.model_factory = model_factory self.env_factory = env_factory self.storage = storage self.takes = takes self.parallel_envs = parallel_envs - self.action_noise_factory = action_noise self.sample_args = sample_args if sample_args is not None else {} @@ -30,12 +28,9 @@ def run(self): env = self.env_factory.instantiate( parallel_envs=self.parallel_envs, preset='record', seed=self.model_config.seed ) - model = self.model_factory.instantiate(action_space=env.action_space).to(device) - - if self.action_noise_factory is not None: - action_noise = self.action_noise_factory.instantiate(environment=env).to(device) - else: - action_noise = None + model = self.model_factory.instantiate( + action_space=env.action_space, observation_space=env.observation_space + ).to(device) training_info = TrainingInfo( start_epoch_idx=self.storage.last_epoch_idx() @@ -48,26 +43,17 @@ def run(self): model.eval() + actor = PolicyActor(num_envs=self.parallel_envs, policy=model, device=device) + episode_rewards = [] episode_lengths = [] observations = env.reset() observations_tensor = torch.from_numpy(observations).to(device) - if model.is_stateful: - hidden_state = model.zero_state(observations.shape[0]).to(device) - with tqdm.tqdm(total=self.takes) as progress_bar: while len(episode_rewards) < self.takes: - if model.is_stateful: - output = model.step(observations_tensor, hidden_state, **self.sample_args) - hidden_state = output['state'] - actions = output['actions'] - else: - actions = model.step(observations_tensor, **self.sample_args)['actions'] - - if action_noise is not None: - actions = action_noise(actions) + actions = actor.act(observations_tensor, **self.sample_args)['actions'] observations, rewards, dones, infos = env.step(actions.cpu().numpy()) observations_tensor = torch.from_numpy(observations).to(device) @@ -78,22 +64,19 @@ def run(self): episode_lengths.append(info['episode']['l']) progress_bar.update(1) - if model.is_stateful: - # Zero state belongiong to finished episodes - dones_tensor = torch.from_numpy(dones.astype(np.float32)).to(device) - hidden_state = hidden_state * (1.0 - dones_tensor.unsqueeze(-1)) + dones_tensor = torch.from_numpy(dones.astype(np.float32)).to(device) + actor.reset_states(dones_tensor) print(pd.DataFrame({'lengths': episode_lengths, 'rewards': episode_rewards}).describe()) -def create(model_config, model, vec_env, storage, takes, parallel_envs, action_noise=None, sample_args=None): +def create(model_config, model, vec_env, storage, takes, parallel_envs, sample_args=None): """ Vel factory function """ return EvaluateEnvCommand( model_config=model_config, model_factory=model, env_factory=vec_env, parallel_envs=parallel_envs, - action_noise=action_noise, storage=storage, takes=takes, sample_args=sample_args From 37e1dea2d4d0683669a9451b0cdee9c8c8c1ce6c Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 9 Nov 2019 15:13:55 -0800 Subject: [PATCH 151/162] Make ACER work again. --- vel/rl/api/policy.py | 4 ++++ vel/rl/env_roller/step_env_roller.py | 3 --- vel/rl/env_roller/trajectory_replay_env_roller.py | 7 ++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/vel/rl/api/policy.py b/vel/rl/api/policy.py index d9215180..b69813f0 100644 --- a/vel/rl/api/policy.py +++ b/vel/rl/api/policy.py @@ -71,3 +71,7 @@ def reset_state(self, state, dones): def action(self, observation, state=None, deterministic=False): """ Return policy action for given observation """ return self.act(observation, state=state, deterministic=deterministic)['actions'] + + def value(self, observation, state=None, deterministic=False): + """ Return policy action for given observation """ + return self.act(observation, state=state, deterministic=deterministic)['values'] diff --git a/vel/rl/env_roller/step_env_roller.py b/vel/rl/env_roller/step_env_roller.py index 44782283..cd8b5cd1 100644 --- a/vel/rl/env_roller/step_env_roller.py +++ b/vel/rl/env_roller/step_env_roller.py @@ -72,9 +72,6 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: flatten_dict(cpu_final_obs, rollout_tensors, root='final') - # for key, value in final_obs.items(): - # rollout_tensors[f"final_{key}"] = value.cpu() - return Trajectories( num_steps=accumulated_tensors['observations'].size(0), num_envs=accumulated_tensors['observations'].size(1), diff --git a/vel/rl/env_roller/trajectory_replay_env_roller.py b/vel/rl/env_roller/trajectory_replay_env_roller.py index 1a788025..be516637 100644 --- a/vel/rl/env_roller/trajectory_replay_env_roller.py +++ b/vel/rl/env_roller/trajectory_replay_env_roller.py @@ -7,7 +7,8 @@ Trajectories, Rollout, ReplayEnvRollerBase, ReplayEnvRollerFactoryBase, ReplayBuffer, ReplayBufferFactory, RlPolicy ) from vel.rl.util.actor import PolicyActor -from vel.util.tensor_util import TensorAccumulator +from vel.util.tensor_util import TensorAccumulator, to_device +from vel.util.datastructure import flatten_dict class TrajectoryReplayEnvRoller(ReplayEnvRollerBase): @@ -85,11 +86,11 @@ def rollout(self, batch_info: BatchInfo, number_of_steps: int) -> Rollout: accumulated_tensors = accumulator.result() final_obs = self.actor.act(self.last_observation.to(self.device), advance_state=False) + cpu_final_obs = to_device(final_obs, torch.device('cpu')) rollout_tensors = {} - for key, value in final_obs.items(): - rollout_tensors[f"final_{key}"] = value.cpu() + flatten_dict(cpu_final_obs, rollout_tensors, root='final') return Trajectories( num_steps=accumulated_tensors['observations'].size(0), From c85205739fef3988de627e6c4f4552abb3295d76 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 9 Nov 2019 16:25:10 -0800 Subject: [PATCH 152/162] Fixing rainbow. --- vel/net/modular.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vel/net/modular.py b/vel/net/modular.py index ca9efecc..dc2ebda0 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -84,8 +84,7 @@ def reset_state(self, state, dones): """ Reset the state after the episode has been terminated """ raise NotImplementedError - def forward(self, input_data, state=None): - context = {} + def forward(self, input_data, state=None, context: dict = None): return self.layers(input_data, state=None, context=context) def grouped_parameters(self): From 8277f60a06beddd1e86b34487a61208faee7c786 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 9 Nov 2019 16:56:32 -0800 Subject: [PATCH 153/162] Vel Research - Rubik's cube project --- requirements.txt | 4 +- vel/module/input/flatten.py | 14 ------- vel/net/layer/nlp/alphabet_one_hot_encode.py | 16 ++++++-- vel/net/layer/util/flatten.py | 39 ++++++++++++++++++++ 4 files changed, 53 insertions(+), 20 deletions(-) delete mode 100644 vel/module/input/flatten.py create mode 100644 vel/net/layer/util/flatten.py diff --git a/requirements.txt b/requirements.txt index 5776b12b..2ff5c561 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile +# pip-compile requirements.in # atari-py==0.2.6 # via gym atomicwrites==1.3.0 # via pytest @@ -55,4 +55,4 @@ websocket-client==0.56.0 # via visdom zipp==0.6.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -# setuptools==41.4.0 # via kiwisolver +# setuptools==41.6.0 # via kiwisolver diff --git a/vel/module/input/flatten.py b/vel/module/input/flatten.py deleted file mode 100644 index 7a1e5246..00000000 --- a/vel/module/input/flatten.py +++ /dev/null @@ -1,14 +0,0 @@ -from vel.module.layers import Flatten - - -from vel.api import VModule - - -class FlattenInput(VModule): - """ Sequence input """ - def __init__(self): - super().__init__() - self.model = Flatten() - - def forward(self, input_data): - return self.model(input_data) diff --git a/vel/net/layer/nlp/alphabet_one_hot_encode.py b/vel/net/layer/nlp/alphabet_one_hot_encode.py index 1198369b..a1f27f7f 100644 --- a/vel/net/layer/nlp/alphabet_one_hot_encode.py +++ b/vel/net/layer/nlp/alphabet_one_hot_encode.py @@ -23,7 +23,11 @@ def size_hints(self) -> SizeHints: class AlphabetOneHotEncodeLayerFactory(LayerFactory): - """ Factory class for the AlphabetoneHotEncode layer """ + """ Factory class for the AlphabetOneHotEncode layer """ + + def __init__(self, alphabet_size): + super().__init__() + self.alphabet_size = alphabet_size @property def name_base(self) -> str: @@ -32,7 +36,11 @@ def name_base(self) -> str: def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: """ Create a given layer object """ - alphabet_size = extra_args['alphabet_size'] + if 'alphabet_size' in extra_args: + alphabet_size = extra_args['alphabet_size'] + else: + alphabet_size = self.alphabet_size + return AlphabetOneHotEncodeLayer( info=self.make_info(context), alphabet_size=alphabet_size, @@ -40,6 +48,6 @@ def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, ext ) -def create(label=None, group=None): +def create(alphabet_size=None, label=None, group=None): """ Vel factory function """ - return AlphabetOneHotEncodeLayerFactory().with_given_name(label).with_given_group(group) + return AlphabetOneHotEncodeLayerFactory(alphabet_size=alphabet_size).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/util/flatten.py b/vel/net/layer/util/flatten.py new file mode 100644 index 00000000..3b4a9cfd --- /dev/null +++ b/vel/net/layer/util/flatten.py @@ -0,0 +1,39 @@ +import numpy as np + +from vel.api import SizeHints, SizeHint +from vel.net.layer_base import LayerFactory, Layer, LayerFactoryContext, LayerInfo + + +class Flatten(Layer): + """ Flatten single tensor to a unit shape """ + + def __init__(self, info: LayerInfo, size_hint: SizeHint): + super().__init__(info) + + self._size_hints = SizeHints(SizeHint(None, np.prod(size_hint[1:]))) + + def forward(self, direct, state: dict = None, context: dict = None): + return direct.view(direct.size(0), -1) + + def size_hints(self) -> SizeHints: + return self._size_hints + + +class FlattenFactory(LayerFactory): + """ Factory for Concat Layer """ + @property + def name_base(self) -> str: + """ Base of layer name """ + return "flatten" + + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ + return Flatten( + info=self.make_info(context), + size_hint=direct_input.assert_single() + ) + + +def create(label=None, group=None): + """ Vel factory function """ + return FlattenFactory().with_given_name(label).with_given_group(group) From 13f86fe0d1e5340b8e5e54d59e37bc49d5fdac17 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Sat, 9 Nov 2019 17:14:05 -0800 Subject: [PATCH 154/162] Dependencies update. --- Makefile | 5 +---- requirements.in | 2 +- requirements.txt | 28 ++++++++++++++-------------- 3 files changed, 16 insertions(+), 19 deletions(-) diff --git a/Makefile b/Makefile index 9725ace3..87c23a87 100644 --- a/Makefile +++ b/Makefile @@ -37,10 +37,7 @@ test: pytest . requirements.txt: requirements.in - pip-compile requirements.in - -requpgrade: - pip-compile --upgrade + pip-compile --upgrade requirements.in lint: flake8 vel diff --git a/requirements.in b/requirements.in index 416b5ede..979d9ef7 100644 --- a/requirements.in +++ b/requirements.in @@ -12,6 +12,6 @@ pyyaml scikit-learn torchtext torchvision -torch~=1.3 +torch~=1.3.1 tqdm visdom diff --git a/requirements.txt b/requirements.txt index 2ff5c561..2447817b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,15 +6,15 @@ # atari-py==0.2.6 # via gym atomicwrites==1.3.0 # via pytest -attrs==19.2.0 +attrs==19.3.0 box2d-py==2.3.8 # via gym certifi==2019.9.11 # via requests chardet==3.0.4 # via requests cloudpickle==1.2.2 cycler==0.10.0 # via matplotlib dnspython==1.16.0 -future==0.18.0 # via pyglet -gym[atari,box2d,classic_control]==0.15.3 +future==0.18.2 # via pyglet +gym[atari,box2d,classic_control]==0.15.4 idna==2.8 # via requests importlib-metadata==0.23 # via pluggy, pytest joblib==0.14.0 # via scikit-learn @@ -23,31 +23,31 @@ jsonpointer==2.0 # via jsonpatch kiwisolver==1.1.0 # via matplotlib matplotlib==3.1.1 more-itertools==7.2.0 # via pytest, zipp -numpy==1.17.2 +numpy==1.17.3 opencv-python==4.1.1.26 packaging==19.2 # via pytest -pandas==0.25.1 -pillow==6.2.0 # via gym, torchvision, visdom +pandas==0.25.3 +pillow==6.2.1 # via gym, torchvision, visdom pluggy==0.13.0 # via pytest py==1.8.0 # via pytest pyglet==1.3.2 # via gym pymongo==3.9.0 -pyparsing==2.4.2 # via matplotlib, packaging -pytest==5.2.1 -python-dateutil==2.8.0 # via matplotlib, pandas +pyparsing==2.4.4 # via matplotlib, packaging +pytest==5.2.2 +python-dateutil==2.8.1 # via matplotlib, pandas pytz==2019.3 # via pandas pyyaml==5.1.2 pyzmq==18.1.0 # via visdom requests==2.22.0 # via torchtext, visdom scikit-learn==0.21.3 -scipy==1.3.1 # via gym, scikit-learn, visdom -six==1.12.0 # via atari-py, cycler, gym, packaging, python-dateutil, torchtext, torchvision, visdom, websocket-client -torch==1.3.0 +scipy==1.3.2 # via gym, scikit-learn, visdom +six==1.13.0 # via atari-py, cycler, gym, packaging, python-dateutil, torchtext, torchvision, visdom, websocket-client +torch==1.3.1 torchfile==0.1.0 # via visdom torchtext==0.4.0 -torchvision==0.4.1 +torchvision==0.4.2 tornado==6.0.3 # via visdom -tqdm==4.36.1 +tqdm==4.38.0 urllib3==1.25.6 # via requests visdom==0.1.8.9 wcwidth==0.1.7 # via pytest From 4e94cc511f0831674c74c569339895651a20ae51 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Sun, 10 Nov 2019 22:26:21 -0800 Subject: [PATCH 155/162] Implemented skip-connection layer. --- vel/net/layer/arch/skip_connection.py | 81 ++++++++++++++++++++++++++ vel/net/modular.py | 83 +++++++++++++++++---------- vel/net/sequence.py | 76 ------------------------ 3 files changed, 134 insertions(+), 106 deletions(-) create mode 100644 vel/net/layer/arch/skip_connection.py delete mode 100644 vel/net/sequence.py diff --git a/vel/net/layer/arch/skip_connection.py b/vel/net/layer/arch/skip_connection.py new file mode 100644 index 00000000..f627664b --- /dev/null +++ b/vel/net/layer/arch/skip_connection.py @@ -0,0 +1,81 @@ +import collections + +from vel.api import SizeHints, SizeHint +from vel.net.layer_base import LayerFactory, Layer, LayerInfo, LayerFactoryContext +from vel.net.modular import LayerList + + +class SkipConnectionLayer(Layer): + """ Container around a skip connection """ + + def __init__(self, info: LayerInfo, layers: [Layer], size_hint: SizeHint): + super().__init__(info) + + self.layers = LayerList(layers) + self._size_hints = SizeHints(size_hint) + + @property + def is_stateful(self) -> bool: + return self.layers.is_stateful + + def zero_state(self, batch_size): + return self.layers.zero_state(batch_size) + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return self._size_hints + + def forward(self, direct, state: dict = None, context: dict = None): + """ Forward propagation of a single layer """ + if self.is_stateful: + result, out_state = self.layers(direct, state=state, context=context) + return direct + result, out_state + else: + result = self.layers(direct, state=state, context=context) + return direct + result + + +class SkipConnectionLayerFactory(LayerFactory): + """ Factory for skip connection layers """ + + def __init__(self, layers: [LayerFactory]): + super().__init__() + self.layers = layers + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "skip_connection" + + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ + size_hint = direct_input.assert_single() + + layers = collections.OrderedDict() + + info = self.make_info(context) + + for idx, layer_factory in enumerate(self.layers): + counter = idx + 1 + + child_context = LayerFactoryContext( + idx=counter, + parent_group=info.group, + parent_name=info.name, + data=context.data + ) + + layer = layer_factory.instantiate( + direct_input=SizeHints(size_hint), + context=child_context, + extra_args=extra_args + ) + + layers[layer.name] = layer + + return SkipConnectionLayer(info, layers=layers, size_hint=size_hint) + + +def create(layers: [LayerFactory], label=None, group=None): + """ Vel factory function """ + return SkipConnectionLayerFactory(layers=layers).with_given_name(label).with_given_group(group) diff --git a/vel/net/modular.py b/vel/net/modular.py index dc2ebda0..a4e548c8 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -8,7 +8,7 @@ from .layer_base import LayerFactory, LayerFactoryContext -class ModularSequential(nn.Module): +class LayerList(BackboneModule): """ Modification of nn.Sequential for the purpose of modular networks """ def __init__(self, layers: collections.OrderedDict): super().__init__() @@ -19,6 +19,32 @@ def __init__(self, layers: collections.OrderedDict): self.add_module(key, module) self._layers.append(module) + self._is_stateful = any(l.is_stateful for l in self._layers) + + def reset_weights(self): + for l in self._layers: + l.reset_weights() + + def size_hints(self) -> SizeHints: + return self._layers[-1].size_hints() + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return self._is_stateful + + def zero_state(self, batch_size): + """ Potential state for the model """ + zero_state = {} + + for l in self._layers: + if l.is_stateful: + layer_zero_state = l.zero_state(batch_size) + if layer_zero_state is not None: + zero_state.update(layer_zero_state) + + return zero_state + def __len__(self): return len(self._layers) @@ -26,9 +52,22 @@ def __getitem__(self, item): return self._layers[item] def forward(self, direct, state: dict = None, context: dict = None): - for layer in self._layers: - direct = layer(direct, state=state, context=context) - return direct + if not self._is_stateful: + for layer in self._layers: + direct = layer(direct, state=state, context=context) + return direct + else: + data = direct + output_state = {} + + for layer in self._layers: + if layer.is_stateful: + data, new_state = layer(data, state=state, context=context) + output_state.update(new_state) + else: + data = layer(data, state=state, context=context) + + return data, output_state def instantiate_layers(layers: [LayerFactory], group: str, size_hint: SizeHints, extra_args: dict) -> nn.Module: @@ -51,22 +90,21 @@ def instantiate_layers(layers: [LayerFactory], group: str, size_hint: SizeHints, module_dict[layer.name] = layer - return ModularSequential(module_dict) + return LayerList(module_dict) class ModularNetwork(BackboneModule): """ Network that is built from layers """ - def __init__(self, layers: nn.Module): + def __init__(self, layers: LayerList): super().__init__() self.layers = layers - assert not any(l.is_stateful for l in self.layers), "Does not support stateful layers" + assert not self.layers.is_stateful def reset_weights(self): """ Call proper initializers for the weights """ - for l in self.layers: - l.reset_weights() + self.layers.reset_weights() @property def is_stateful(self) -> bool: @@ -74,7 +112,7 @@ def is_stateful(self) -> bool: return False def size_hints(self) -> SizeHints: - return self.layers[-1].size_hints() + return self.layers.size_hints() def zero_state(self, batch_size): """ Potential state for the model """ @@ -95,15 +133,14 @@ def grouped_parameters(self): class StatefulModularNetwork(BackboneModule): """ Modular network handling the state between the episodes """ - def __init__(self, layers: nn.Module): + def __init__(self, layers: LayerList): super().__init__() self.layers = layers def reset_weights(self): """ Call proper initializers for the weights """ - for l in self.layers: - l.reset_weights() + self.layers.reset_weights() @property def is_stateful(self) -> bool: @@ -111,18 +148,11 @@ def is_stateful(self) -> bool: return True def size_hints(self) -> SizeHints: - return self.layers[-1].size_hints() + return self.layers.size_hints() def zero_state(self, batch_size): """ Potential state for the model """ - zero_state = {} - - for l in self.layers: - layer_zero_state = l.zero_state(batch_size) - if layer_zero_state is not None: - zero_state.update(layer_zero_state) - - return zero_state + return self.layers.zero_state(batch_size) def reset_state(self, state, dones): """ Reset the state after the episode has been terminated """ @@ -130,20 +160,13 @@ def reset_state(self, state, dones): def forward(self, input_data, state=None): data = input_data - context = {} - output_state = {} if state is None: # input_data.device here may break. Should be fixed at some point state = to_device(self.zero_state(input_data.size(0)), input_data.device) - for layer in self.layers: - if layer.is_stateful: - data, new_state = layer(data, state=state, context=context) - output_state.update(new_state) - else: - data = layer(data, state=state, context=context) + data, output_state = self.layers(data, state=state, context=context) return data, output_state diff --git a/vel/net/sequence.py b/vel/net/sequence.py deleted file mode 100644 index f759051c..00000000 --- a/vel/net/sequence.py +++ /dev/null @@ -1,76 +0,0 @@ -import collections -import typing - -from vel.api import BackboneModule, SizeHints -from vel.exception import VelException -from vel.util.tensor_util import to_device - - -class GenericModularSequential(BackboneModule): - """ Modification of nn.Sequential for the purpose of modular networks """ - - def __init__(self, layers: typing.Union[collections.OrderedDict, collections.Sequence]): - super().__init__() - self._layers = [] - - if isinstance(layers, collections.OrderedDict): - for key, module in layers.items(): - self.add_module(key, module) - self._layers.append(module) - elif isinstance(layers, collections.Sequence): - for idx, module in enumerate(layers): - key = str(idx) - self.add_module(key, module) - self._layers.append(module) - else: - raise VelException("Incorrectly specified layers, must be a sequence or an ordered dict") - - self._is_stateful = any(l.is_stateful() for l in self._layers) - - def size_hints(self) -> SizeHints: - return self._layers[-1].size_hints() - - @property - def is_stateful(self) -> bool: - """ If the model has a state that needs to be fed between individual observations """ - return self._is_stateful - - def zero_state(self, batch_size): - """ Potential state for the model """ - zero_state = {} - - for l in self.layers: - layer_zero_state = l.zero_state(batch_size) - if layer_zero_state is not None: - zero_state.update(layer_zero_state) - - return zero_state - - def __len__(self): - return len(self._layers) - - def __getitem__(self, item): - return self._layers[item] - - def forward(self, direct, state: dict = None, context: dict = None): - if not self.is_stateful: - for layer in self._layers: - direct = layer(direct, state=state, context=context) - return direct - else: - output_state = {} - - if state is None: - # direct.device here may break. Should be fixed at some point - state = to_device(self.zero_state(direct.size(0)), direct.device) - - data = direct - - for layer in self.layers: - if layer.is_stateful: - data, new_state = layer(data, state=state, context=context) - output_state.update(new_state) - else: - data = layer(data, state=state, context=context) - - return data, output_state From 965903c1829c8ab9d4219b4c839243abe5a0a85a Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Mon, 11 Nov 2019 09:11:19 -0800 Subject: [PATCH 156/162] Fixing an error in TRPO config. --- examples-configs/rl/atari/atari_trpo.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples-configs/rl/atari/atari_trpo.yaml b/examples-configs/rl/atari/atari_trpo.yaml index adc7850a..2fad825b 100644 --- a/examples-configs/rl/atari/atari_trpo.yaml +++ b/examples-configs/rl/atari/atari_trpo.yaml @@ -20,7 +20,7 @@ model: improvement_acceptance_ratio: 0.1 cg_damping: 0.001 vf_iters: 3 - entropy_coefficient: 0.1 + entropy_coefficient: 0.01 discount_factor: 0.99 gae_lambda: 1.00 # Generalized Advantage Estimator Lambda parameter From e2d78146d159169c09903fd5003a31a0995e70fb Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 14 Nov 2019 13:35:45 -0800 Subject: [PATCH 157/162] Improved summary command --- vel/api/model.py | 3 +++ vel/rl/command/rl_summary_command.py | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 vel/rl/command/rl_summary_command.py diff --git a/vel/api/model.py b/vel/api/model.py index ac3fe547..259145ce 100644 --- a/vel/api/model.py +++ b/vel/api/model.py @@ -39,6 +39,9 @@ def summary(self): """ Print a model summary """ print(self) print("-" * 100) + for name, module in self.named_parameters(): + print("> {} {:,}".format(name, module.numel())) + print("-" * 100) number = sum(p.numel() for p in self.parameters()) print("Number of model parameters: {:,}".format(number)) print("-" * 100) diff --git a/vel/rl/command/rl_summary_command.py b/vel/rl/command/rl_summary_command.py new file mode 100644 index 00000000..108a038b --- /dev/null +++ b/vel/rl/command/rl_summary_command.py @@ -0,0 +1,16 @@ +class ModelSummary: + """ Just print model summary """ + def __init__(self, model, vec_env): + self.model_factory = model + self.vec_env = vec_env + + def run(self, *args): + """ Print model summary """ + env = self.vec_env.instantiate(parallel_envs=1, seed=1) + model = self.model_factory.instantiate(action_space=env.action_space, observation_space=env.observation_space) + model.summary() + + +def create(model, vec_env): + """ Vel factory function """ + return ModelSummary(model, vec_env) From ed4d804aab12e2da558fe36a5996edc957e2d614 Mon Sep 17 00:00:00 2001 From: Jerry Tworek Date: Thu, 14 Nov 2019 13:40:47 -0800 Subject: [PATCH 158/162] Enhancements to the modular network code. --- vel/net/layer/arch/parallel.py | 35 ++++++++- vel/net/layer/arch/sequence.py | 77 ++++++++++++++++++++ vel/net/layer/arch/skip_connection.py | 6 +- vel/net/layer/residual/__init__.py | 0 vel/net/layer/residual/fc_res.py | 101 ++++++++++++++++++++++++++ vel/net/layer_base.py | 2 +- vel/net/layer_list.py | 65 +++++++++++++++++ vel/net/modular.py | 64 +--------------- 8 files changed, 282 insertions(+), 68 deletions(-) create mode 100644 vel/net/layer/arch/sequence.py create mode 100644 vel/net/layer/residual/__init__.py create mode 100644 vel/net/layer/residual/fc_res.py create mode 100644 vel/net/layer_list.py diff --git a/vel/net/layer/arch/parallel.py b/vel/net/layer/arch/parallel.py index cebe1e5e..241933f5 100644 --- a/vel/net/layer/arch/parallel.py +++ b/vel/net/layer/arch/parallel.py @@ -13,6 +13,12 @@ def __init__(self, info: LayerInfo, layers: [Layer]): self.layers = nn.ModuleList(layers) self._size_hints = SizeHints(tuple(layer.size_hints().unwrap() for layer in self.layers)) + self._is_stateful = any(l.is_stateful for l in self.layers) + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return self._is_stateful def size_hints(self) -> SizeHints: """ Size hints for this network """ @@ -20,10 +26,33 @@ def size_hints(self) -> SizeHints: def forward(self, direct, state: dict = None, context: dict = None): """ Forward propagation of a single layer """ - results = [layer(x, state, context) for layer, x in zip(self.layers, direct)] - return tuple(results) + if self._is_stateful: + results = [] + output_state = {} + + for layer, layer_input in zip(self.layers, direct): + data, new_state = layer(layer_input, state=state, context=context) + results.append(data) + output_state.update(new_state) + + return tuple(results), output_state + else: + results = [layer(x, state, context) for layer, x in zip(self.layers, direct)] + return tuple(results) + + def zero_state(self, batch_size): + """ Potential state for the model """ + zero_state = {} + + for l in self.layers: + if l.is_stateful: + layer_zero_state = l.zero_state(batch_size) + if layer_zero_state is not None: + zero_state.update(layer_zero_state) + + return zero_state - def grouped_parameters(self) -> typing.Iterable[(str, object)]: + def grouped_parameters(self) -> typing.Iterable[typing.Tuple[str, object]]: """ Return iterable of pairs (group, parameters) """ raise NotImplementedError diff --git a/vel/net/layer/arch/sequence.py b/vel/net/layer/arch/sequence.py new file mode 100644 index 00000000..6c2beb30 --- /dev/null +++ b/vel/net/layer/arch/sequence.py @@ -0,0 +1,77 @@ +import collections + +from vel.api import SizeHints +from vel.net.layer_base import LayerFactory, Layer, LayerInfo, LayerFactoryContext +from vel.net.modular import LayerList + + +class SequenceLayer(Layer): + """ Container around a skip connection """ + + def __init__(self, info: LayerInfo, layers: [Layer]): + super().__init__(info) + + self.layers = LayerList(layers) + + @property + def is_stateful(self) -> bool: + return self.layers.is_stateful + + def zero_state(self, batch_size): + return self.layers.zero_state(batch_size) + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return self.layers[-1].size_hints() + + def forward(self, direct, state: dict = None, context: dict = None): + """ Forward propagation of a single layer """ + return self.layers(direct, state=state, context=context) + + +class SequenceFactory(LayerFactory): + """ Factory for skip connection layers """ + + def __init__(self, layers: [LayerFactory]): + super().__init__() + self.layers = layers + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "skip_connection" + + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ + loop_size_hints = direct_input + + layers = collections.OrderedDict() + + info = self.make_info(context) + + for idx, layer_factory in enumerate(self.layers): + counter = idx + 1 + + child_context = LayerFactoryContext( + idx=counter, + parent_group=info.group, + parent_name=info.name, + data=context.data + ) + + layer = layer_factory.instantiate( + direct_input=loop_size_hints, + context=child_context, + extra_args=extra_args + ) + + loop_size_hints = layer.size_hints() + + layers[layer.name] = layer + + return SequenceLayer(info, layers=layers) + + +def create(layers: [LayerFactory], label=None, group=None): + """ Vel factory function """ + return SequenceFactory(layers=layers).with_given_name(label).with_given_group(group) diff --git a/vel/net/layer/arch/skip_connection.py b/vel/net/layer/arch/skip_connection.py index f627664b..bc7cab8a 100644 --- a/vel/net/layer/arch/skip_connection.py +++ b/vel/net/layer/arch/skip_connection.py @@ -49,7 +49,7 @@ def name_base(self) -> str: def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: """ Create a given layer object """ - size_hint = direct_input.assert_single() + size_hint = loop_size_hint = direct_input.assert_single() layers = collections.OrderedDict() @@ -66,11 +66,13 @@ def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, ext ) layer = layer_factory.instantiate( - direct_input=SizeHints(size_hint), + direct_input=SizeHints(loop_size_hint), context=child_context, extra_args=extra_args ) + loop_size_hint = layer.size_hints().assert_single() + layers[layer.name] = layer return SkipConnectionLayer(info, layers=layers, size_hint=size_hint) diff --git a/vel/net/layer/residual/__init__.py b/vel/net/layer/residual/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vel/net/layer/residual/fc_res.py b/vel/net/layer/residual/fc_res.py new file mode 100644 index 00000000..393dd5e3 --- /dev/null +++ b/vel/net/layer/residual/fc_res.py @@ -0,0 +1,101 @@ +import typing +import torch.nn as nn +import torch.nn.init as init +import numpy as np + +from vel.api import SizeHint, SizeHints +from vel.net.layer_base import Layer, LayerFactory, LayerInfo, LayerFactoryContext + + +class FcResidual(Layer): + """ Residual fully-connected block """ + + def __init__(self, info: LayerInfo, input_shape: SizeHint, divisor: int = 1, activation: str = 'relu', + normalization: typing.Optional[str] = None): + super().__init__(info) + + self._size_hints = SizeHints(input_shape) + + self.trunk_shape = input_shape[-1] + self.bottleneck_shape = self.trunk_shape // divisor + + self.f1 = nn.Linear(self.trunk_shape, self.bottleneck_shape) + + if normalization == 'layer': + self.n1 = nn.LayerNorm(self.bottleneck_shape) + elif normalization is None: + self.n1 = nn.Identity() + else: + raise NotImplementedError + + if activation == 'relu': + self.a1 = nn.ReLU(inplace=True) + else: + raise NotImplementedError + + self.f2 = nn.Linear(self.bottleneck_shape, self.trunk_shape) + + if normalization == 'layer': + self.n2 = nn.LayerNorm(self.trunk_shape) + elif normalization is None: + self.n2 = nn.Identity() + else: + raise NotImplementedError + + if activation == 'relu': + self.a2 = nn.ReLU(inplace=True) + else: + raise NotImplementedError + + def reset_weights(self): + """ Call proper initializers for the weights """ + for m in self.modules(): + if isinstance(m, nn.Linear): + # init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + init.orthogonal_(m.weight, gain=np.sqrt(2)) + init.constant_(m.bias, 0.0) + + def size_hints(self) -> SizeHints: + """ Size hints for this network """ + return self._size_hints + + def forward(self, direct, state: dict, context: dict): + residual = direct + + residual = self.a1(self.n1(self.f1(residual))) + residual = self.a2(self.n2(self.f2(residual))) + + return residual + direct + + +class FcResidualFactory(LayerFactory): + """ Factory for fully-connected residual layers """ + def __init__(self, divisor: int, activation: str, normalization: typing.Optional[str] = None): + super().__init__() + self.divisor = divisor + self.activation = activation + self.normalization = normalization + + @property + def name_base(self) -> str: + """ Base of layer name """ + return "fc_residual" + + def instantiate(self, direct_input: SizeHints, context: LayerFactoryContext, extra_args: dict) -> Layer: + """ Create a given layer object """ + size_hint = direct_input.assert_single() + info = self.make_info(context) + + return FcResidual( + info=info, + input_shape=size_hint, + divisor=self.divisor, + activation=self.activation, + normalization=self.normalization + ) + + +def create(divisor: int, activation: str = 'relu', normalization: typing.Optional[str] = None, + label=None, group=None): + return FcResidualFactory(divisor, activation, normalization) + diff --git a/vel/net/layer_base.py b/vel/net/layer_base.py index 591041c6..e974a8f1 100644 --- a/vel/net/layer_base.py +++ b/vel/net/layer_base.py @@ -38,7 +38,7 @@ def forward(self, direct, state: dict, context: dict): """ Forward propagation of a single layer """ raise NotImplementedError - def grouped_parameters(self): + def grouped_parameters(self) -> typing.Iterable[typing.Tuple[str, object]]: """ Return iterable of pairs (group, parameters) """ return [(self.group, self.parameters())] diff --git a/vel/net/layer_list.py b/vel/net/layer_list.py new file mode 100644 index 00000000..24d44e7d --- /dev/null +++ b/vel/net/layer_list.py @@ -0,0 +1,65 @@ +import collections + +from vel.api import BackboneModule, SizeHints + + +class LayerList(BackboneModule): + """ Modification of nn.Sequential for the purpose of modular networks """ + def __init__(self, layers: collections.OrderedDict): + super().__init__() + + self._layers = [] + + for key, module in layers.items(): + self.add_module(key, module) + self._layers.append(module) + + self._is_stateful = any(l.is_stateful for l in self._layers) + + def reset_weights(self): + for l in self._layers: + l.reset_weights() + + def size_hints(self) -> SizeHints: + return self._layers[-1].size_hints() + + @property + def is_stateful(self) -> bool: + """ If the model has a state that needs to be fed between individual observations """ + return self._is_stateful + + def zero_state(self, batch_size): + """ Potential state for the model """ + zero_state = {} + + for l in self._layers: + if l.is_stateful: + layer_zero_state = l.zero_state(batch_size) + if layer_zero_state is not None: + zero_state.update(layer_zero_state) + + return zero_state + + def __len__(self): + return len(self._layers) + + def __getitem__(self, item): + return self._layers[item] + + def forward(self, direct, state: dict = None, context: dict = None): + if not self._is_stateful: + for layer in self._layers: + direct = layer(direct, state=state, context=context) + return direct + else: + data = direct + output_state = {} + + for layer in self._layers: + if layer.is_stateful: + data, new_state = layer(data, state=state, context=context) + output_state.update(new_state) + else: + data = layer(data, state=state, context=context) + + return data, output_state diff --git a/vel/net/modular.py b/vel/net/modular.py index a4e548c8..82573056 100644 --- a/vel/net/modular.py +++ b/vel/net/modular.py @@ -5,69 +5,9 @@ from vel.api import BackboneModule, ModuleFactory, SizeHints from vel.util.tensor_util import to_device -from .layer_base import LayerFactory, LayerFactoryContext - - -class LayerList(BackboneModule): - """ Modification of nn.Sequential for the purpose of modular networks """ - def __init__(self, layers: collections.OrderedDict): - super().__init__() - - self._layers = [] - - for key, module in layers.items(): - self.add_module(key, module) - self._layers.append(module) - - self._is_stateful = any(l.is_stateful for l in self._layers) - - def reset_weights(self): - for l in self._layers: - l.reset_weights() - - def size_hints(self) -> SizeHints: - return self._layers[-1].size_hints() - - @property - def is_stateful(self) -> bool: - """ If the model has a state that needs to be fed between individual observations """ - return self._is_stateful - - def zero_state(self, batch_size): - """ Potential state for the model """ - zero_state = {} - - for l in self._layers: - if l.is_stateful: - layer_zero_state = l.zero_state(batch_size) - if layer_zero_state is not None: - zero_state.update(layer_zero_state) - - return zero_state - def __len__(self): - return len(self._layers) - - def __getitem__(self, item): - return self._layers[item] - - def forward(self, direct, state: dict = None, context: dict = None): - if not self._is_stateful: - for layer in self._layers: - direct = layer(direct, state=state, context=context) - return direct - else: - data = direct - output_state = {} - - for layer in self._layers: - if layer.is_stateful: - data, new_state = layer(data, state=state, context=context) - output_state.update(new_state) - else: - data = layer(data, state=state, context=context) - - return data, output_state +from .layer_base import LayerFactory, LayerFactoryContext +from .layer_list import LayerList def instantiate_layers(layers: [LayerFactory], group: str, size_hint: SizeHints, extra_args: dict) -> nn.Module: From fc554a02f90b0be2ec950e1b213991c1e61b78c7 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 14 Nov 2019 17:18:33 -0800 Subject: [PATCH 159/162] Fixing O-U noise --- vel/rl/module/noise/ou_noise.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/vel/rl/module/noise/ou_noise.py b/vel/rl/module/noise/ou_noise.py index 10c154f0..66bcb065 100644 --- a/vel/rl/module/noise/ou_noise.py +++ b/vel/rl/module/noise/ou_noise.py @@ -20,25 +20,29 @@ def __init__(self, std_dev: float, action_space: gym.Space): self.register_buffer('low_tensor', torch.from_numpy(self.action_space.low).unsqueeze(0)) self.register_buffer('high_tensor', torch.from_numpy(self.action_space.high).unsqueeze(0)) + def _expand_processes(self, shape): + while len(self.processes) < shape: + len_action_space = self.action_space.shape[-1] + + self.processes.append( + OrnsteinUhlenbeckNoiseProcess( + np.zeros(len_action_space), float(self.std_dev) * np.ones(len_action_space) + ) + ) + def reset_episodic_state(self, dones): """ A hook for a model to react when during training episode is finished """ + self._expand_processes(dones.shape[0]) + for idx, done in enumerate(dones.cpu()): if done > 0.5: self.processes[idx].reset() def forward(self, actions): """ Return model step after applying noise """ - while len(self.processes) < actions.shape[0]: - len_action_space = self.action_space.shape[-1] - - self.processes.append( - OrnsteinUhlenbeckNoiseProcess( - np.zeros(len_action_space), float(self.std_dev) * np.ones(len_action_space) - ) - ) + self._expand_processes(actions.shape[0]) noise = torch.from_numpy(np.stack([x() for x in self.processes])).float().to(actions.device) - return torch.min(torch.max(actions + noise, self.low_tensor), self.high_tensor) From 3e0dd6ac07005cca7a9ac63a5f10f1a8e9156d25 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 14 Nov 2019 17:27:52 -0800 Subject: [PATCH 160/162] Tiny updates to EWMA normalization. --- vel/module/input/normalize_ewma.py | 4 ++-- vel/net/layer/input/normalize_ewma.py | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/vel/module/input/normalize_ewma.py b/vel/module/input/normalize_ewma.py index 3219e358..0e59b792 100644 --- a/vel/module/input/normalize_ewma.py +++ b/vel/module/input/normalize_ewma.py @@ -21,18 +21,18 @@ def __init__(self, input_shape, beta=0.99, per_element_update=False, epsilon=1e- def reset_weights(self): self.running_mean.zero_() self.running_var.fill_(1.0) - self.count.fill_(self.epsilon) + self.debiasing_term.fill_(self.epsilon) def forward(self, input_vector): # Make sure input is float32 input_vector = input_vector.to(torch.float) if self.training: - batch_size = input_vector.size(0) batch_mean = input_vector.mean(dim=0) batch_var = input_vector.var(dim=0, unbiased=False) if self.per_element_update: + batch_size = input_vector.size(0) weight = self.beta ** batch_size else: weight = self.beta diff --git a/vel/net/layer/input/normalize_ewma.py b/vel/net/layer/input/normalize_ewma.py index 50b6dec5..64e1cbb3 100644 --- a/vel/net/layer/input/normalize_ewma.py +++ b/vel/net/layer/input/normalize_ewma.py @@ -22,6 +22,9 @@ def __init__(self, info: LayerInfo, input_shape: SizeHints, beta: float = 0.99, input_shape=self.input_shape.assert_single()[1:] # Remove batch axis ) + def reset_weights(self): + self.normalize.reset_weights() + def forward(self, direct, state: dict = None, context: dict = None): return self.normalize(direct) From 174c505ac57ff5b2ec1eff6bb47d12773d448833 Mon Sep 17 00:00:00 2001 From: Million Integrals Date: Thu, 14 Nov 2019 19:41:25 -0800 Subject: [PATCH 161/162] Registering env for the iteration. --- vel/rl/reinforcer/on_policy_iteration_reinforcer.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py index 13b9853f..b9455fd7 100644 --- a/vel/rl/reinforcer/on_policy_iteration_reinforcer.py +++ b/vel/rl/reinforcer/on_policy_iteration_reinforcer.py @@ -5,6 +5,7 @@ import tqdm from vel.api import ModuleFactory, TrainingInfo, EpochInfo, BatchInfo +from vel.openai.baselines.common.vec_env import VecEnv from vel.rl.api import ( Reinforcer, ReinforcerFactory, VecEnvFactory, EnvRollerFactoryBase, EnvRollerBase, RlPolicy @@ -36,9 +37,10 @@ class OnPolicyIterationReinforcer(Reinforcer): May split the sample into multiple batches and may replay batches a few times. """ def __init__(self, device: torch.device, settings: OnPolicyIterationReinforcerSettings, policy: RlPolicy, - env_roller: EnvRollerBase) -> None: + env: VecEnv, env_roller: EnvRollerBase) -> None: self.device = device self.settings = settings + self.env = env self.env_roller = env_roller self._model: RlPolicy = policy.to(self.device) @@ -67,6 +69,9 @@ def initialize_training(self, training_info: TrainingInfo, model_state=None, hid self.policy.load_state_dict(model_state) else: self.policy.reset_weights() + + # Register env in the training info + training_info['env'] = self.env def train_epoch(self, epoch_info: EpochInfo, interactive=True) -> None: """ Train model on an epoch of a fixed number of batch updates """ @@ -160,7 +165,7 @@ def instantiate(self, device: torch.device) -> Reinforcer: env = self.env_factory.instantiate(parallel_envs=self.parallel_envs, seed=self.seed) policy = self.model_factory.instantiate(action_space=env.action_space, observation_space=env.observation_space) env_roller = self.env_roller_factory.instantiate(environment=env, policy=policy, device=device) - return OnPolicyIterationReinforcer(device, self.settings, policy, env_roller) + return OnPolicyIterationReinforcer(device, self.settings, policy, env, env_roller) def create(model_config, model, vec_env, env_roller, parallel_envs, number_of_steps, From 4582c4e743dbb7c9e5ae109868bbad40bb6ab14c Mon Sep 17 00:00:00 2001 From: Mattia Rigotti Date: Fri, 21 Feb 2020 12:21:40 -0500 Subject: [PATCH 162/162] Minor fixes to rl.command.record_movie_command and evaluate (#55) * A couple of minor bugfixes * Minor fix * Load checkpoint on correct device (cpu or cuda) * Fix openai logging --- examples-configs/rl/atari/atari_dqn.yaml | 1 + vel/rl/command/record_movie_command.py | 6 +++--- vel/rl/command/rl_train_command.py | 6 +++--- vel/storage/classic.py | 5 +++-- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/examples-configs/rl/atari/atari_dqn.yaml b/examples-configs/rl/atari/atari_dqn.yaml index c2fd5cde..2406ea1e 100644 --- a/examples-configs/rl/atari/atari_dqn.yaml +++ b/examples-configs/rl/atari/atari_dqn.yaml @@ -74,3 +74,4 @@ commands: evaluate: name: vel.rl.command.evaluate_env_command takes: 100 + parallel_envs: 1 diff --git a/vel/rl/command/record_movie_command.py b/vel/rl/command/record_movie_command.py index 79598e30..12eee428 100644 --- a/vel/rl/command/record_movie_command.py +++ b/vel/rl/command/record_movie_command.py @@ -28,7 +28,7 @@ def run(self): device = self.model_config.torch_device() env = self.env_factory.instantiate_single(preset='record', seed=self.model_config.seed) - model = self.model_factory.instantiate(action_space=env.action_space).to(device) + model = self.model_factory.instantiate(action_space=env.action_space, observation_space=env.observation_space).to(device) training_info = TrainingInfo( start_epoch_idx=self.storage.last_epoch_idx() @@ -61,11 +61,11 @@ def record_take(self, model, env_instance, device, take_number): observation_tensor = torch.from_numpy(observation_array).to(device) if model.is_stateful: - output = model.step(observation_tensor, hidden_state, **self.sample_args) + output = model.act(observation_tensor, hidden_state, **self.sample_args) hidden_state = output['state'] actions = output['actions'] else: - actions = model.step(observation_tensor, **self.sample_args)['actions'] + actions = model.act(observation_tensor, **self.sample_args)['actions'] actions = actions.detach().cpu().numpy() diff --git a/vel/rl/command/rl_train_command.py b/vel/rl/command/rl_train_command.py index b4d10758..6d6f0303 100644 --- a/vel/rl/command/rl_train_command.py +++ b/vel/rl/command/rl_train_command.py @@ -141,11 +141,11 @@ def start_training(self, reinforcer: Reinforcer, optimizer: VelOptimizer) -> Tra def _openai_logging(self, epoch_result): """ Use OpenAI logging facilities for the same type of logging """ for key in sorted(epoch_result.keys()): - if key == 'fps': + if key.name == 'fps': # Not super elegant, but I like nicer display of FPS - openai_logger.record_tabular(key, int(epoch_result[key])) + openai_logger.record_tabular(key.name, int(epoch_result[key])) else: - openai_logger.record_tabular(key, epoch_result[key]) + openai_logger.record_tabular(key.name, epoch_result[key]) openai_logger.dump_tabular() diff --git a/vel/storage/classic.py b/vel/storage/classic.py index e61dd91b..5815dca4 100644 --- a/vel/storage/classic.py +++ b/vel/storage/classic.py @@ -36,9 +36,10 @@ def load(self, train_info: TrainingInfo) -> (dict, dict): Resume learning process and return loaded hidden state dictionary """ last_epoch = train_info.start_epoch_idx + device = self.model_config.torch_device() - model_state = torch.load(self.checkpoint_filename(last_epoch)) - hidden_state = torch.load(self.checkpoint_hidden_filename(last_epoch)) + model_state = torch.load(self.checkpoint_filename(last_epoch), map_location=device) + hidden_state = torch.load(self.checkpoint_hidden_filename(last_epoch), map_location=device) self.checkpoint_strategy.restore(hidden_state) train_info.restore(hidden_state)