From 79b410d280e3365e3a958b144274bb899e6bf620 Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Thu, 29 Apr 2021 01:14:25 +0800 Subject: [PATCH 01/13] add jax operator --- distml/operator/__init__.py | 3 --- format.sh | 8 ++++---- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/distml/operator/__init__.py b/distml/operator/__init__.py index 5fe7080..e69de29 100644 --- a/distml/operator/__init__.py +++ b/distml/operator/__init__.py @@ -1,3 +0,0 @@ -from distml.operator.torch_operator import TorchTrainingOperator - -__all__ = ["TorchTrainingOperator"] diff --git a/format.sh b/format.sh index c03b7b9..bad8cb9 100755 --- a/format.sh +++ b/format.sh @@ -46,7 +46,7 @@ builtin cd "$ROOT" || exit 1 # Add the upstream remote if it doesn't exist if ! git remote -v | grep -q upstream; then - git remote add 'upstream' 'https://github.com/ray-project/distml.git' + git remote add 'upstream' 'https://yuan.cm/https://github.com/ray-project/distml.git' fi FLAKE8_VERSION=$(flake8 --version | awk '{print $1}') @@ -106,14 +106,14 @@ format_changed() { yapf --in-place "${YAPF_EXCLUDES[@]}" "${YAPF_FLAGS[@]}" if which flake8 >/dev/null; then git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.py' | xargs -P 5 \ - flake8 --inline-quotes '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E226,E24,E704,W503,W504,W605 + flake8 '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E226,E24,E704,W503,W504,W605 fi fi if ! git diff --diff-filter=ACRM --quiet --exit-code "$MERGEBASE" -- '*.pyx' '*.pxd' '*.pxi' &>/dev/null; then if which flake8 >/dev/null; then git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.pyx' '*.pxd' '*.pxi' | xargs -P 5 \ - flake8 --inline-quotes '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 + flake8 '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 fi fi } @@ -121,7 +121,7 @@ format_changed() { # Format all files, and print the diff to stdout for travis. format_all() { yapf --diff "${YAPF_FLAGS[@]}" "${YAPF_EXCLUDES[@]}" distml - flake8 --inline-quotes '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 distml + flake8 '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 distml } # This flag formats individual files. --files *must* be the first command line From dac98abb101848125769079265e77d0ab90a787d Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Thu, 29 Apr 2021 01:14:50 +0800 Subject: [PATCH 02/13] add jax operator --- distml/operator/jax_operator.py | 331 ++++++++++++++++++++++++++++++ examples/jax/.ray.lock | 0 examples/jax/default_train.csv | 2 + examples/jax/jax_util/__init__.py | 2 + examples/jax/jax_util/datasets.py | 281 +++++++++++++++++++++++++ examples/jax/jax_util/resnet.py | 221 ++++++++++++++++++++ examples/jax/mnist_jax_example.py | 120 +++++++++++ 7 files changed, 957 insertions(+) create mode 100644 distml/operator/jax_operator.py create mode 100755 examples/jax/.ray.lock create mode 100644 examples/jax/default_train.csv create mode 100644 examples/jax/jax_util/__init__.py create mode 100644 examples/jax/jax_util/datasets.py create mode 100644 examples/jax/jax_util/resnet.py create mode 100644 examples/jax/mnist_jax_example.py diff --git a/distml/operator/jax_operator.py b/distml/operator/jax_operator.py new file mode 100644 index 0000000..3ac8cd4 --- /dev/null +++ b/distml/operator/jax_operator.py @@ -0,0 +1,331 @@ +import numpy as np +import cupy as cp +import jax +from jax import grad, value_and_grad +import jax.numpy as jnp +from jax.lib import xla_client +from jax.dlpack import from_dlpack, to_dlpack +from jax.tree_util import tree_flatten, tree_unflatten, tree_structure, tree_map, build_tree +from jax._src.util import unzip2 +from jax.experimental.optimizers import OptimizerState + +from .base_operator import TrainingOperator +from distml.util import ThroughputCollection, func_timer +from ray.util.sgd.utils import TimerCollection, AverageMeterCollection + +import time + + +class JAXTrainingOperator(TrainingOperator): + + def __init__(self, operator_config): + super(JAXTrainingOperator, self).__init__(operator_config) + # Should be set by users in the `register` function. + # model methods + self.opt_state = None + self.init_fun = None + self.predict_fun = None + # optimizer methods + self.opt_init = None + self.opt_update = None + self.get_params = None + + self.criterion = None + + # Data loaders for training and validation, registered by users. + self._train_loader = None + self._validation_loader = None + + self.setup(operator_config) + + if hasattr(operator_config, "jit_mode"): + assert not operator_config["jit_mode"], "Not support jit in jax operator." + + self.train_step_num = 0 + + def setup(self, *args, **kwargs): + """Function that needs to be override by users. + + Example: + # some code is the same for all users, maybe we can put it in register. + rng_key = random.PRNGKey(0) + input_shape = (28, 28, 1, batch_size) + lr=0.01 + init_fun, predict_fun = ResNet18(num_classes) + _, init_params = init_fun(rng_key, input_shape) + + opt_init, opt_update, get_params = optimizers.adam(lr) + opt_state = opt_init(init_params) + + self.register(model=(opt_state, get_params, predict_fun), optimizer=opt_update, criterion=lambda logits, targets:-jnp.sum(logits * targets)) + + """ + + pass + + def register(self, + *, + model, + optimizer, + criterion, + lr_schedulers=None, + jit_mode=False): + """Register a few critical information about the model to operator.""" + self.criterion = criterion + if lr_schedulers: + self.lr_schedulers = lr_schedulers + print("WARNING: jax not support learning rate scheduler." + "This will not work.") + + self._register_model(model) + self._register_optimizer(optimizer) + + def _register_model(self, model): + """register model components. + + This function shall be instantiated in framework-specific operator + implementations. + """ + self.opt_state = model[0] + self.init_fun = model[1] + self.predict_fun = model[2] + + def _register_optimizer(self, optimizer): + self.opt_init = optimizer[0] + self.opt_update = optimizer[1] + self.get_params = optimizer[2] + + def register_data(self, *, train_loader=None, validation_loader=None): + self._train_loader = train_loader + self._validation_loader = validation_loader + + def _get_train_loader(self): + return self._train_loader + + def _get_validation_loader(self): + return self._validation_loader + + def loss_func(self, params, batch): + """A function to calculate predictions and loss value. + + This function is going to be decorated by `grad` in Jax to calculate gradients. + + Args: + batch (tuple): a data batch containing a feature/target pair. + """ + inputs, targets = batch + logits = self.predict_fun(params, inputs) + return self.criterion(logits, targets) + + def derive_updates(self, batch): + """Compute the parameter updates on a given batch of data. + + The `derive_updates` function should be called in conjunction with + the next `apply_updates` function in order to finish one iteration + of training. + + Args: + batch (tuple): a data batch containing a feature/target pair. + """ + loss_val, gradient = self._calculate_gradient(self.opt_state, batch) + + gradient_dict, tree = tree_flatten(gradient) + assert tree == self.opt_state[1] + + if hasattr(self, "preset_keys"): + gradient_dict = {k:g for k, g in zip(self.preset_keys, gradient_dict)} + else: + gradient_dict = {f"{idx}":g for idx, g in enumerate(gradient_dict)} + return loss_val.item(), gradient_dict + + def apply_updates(self, updates): + """Set and apply the updates using the opt_update in Jax. + + Args: + updates (dict): a dictionary of parameter name and updates. + """ + keys, updates = unzip2(sorted(updates.items(), key=lambda d: int(d[0]))) + updates = tree_unflatten(self.opt_state[1], updates) + self.opt_state = self.opt_update(self.train_step_num, updates, self.opt_state) + self.train_step_num += 1 + + def to_cupy(self, tensor): + """Convert a torch GPU tensor to cupy tensor.""" + if isinstance(tensor, list): + return list(map(self.to_cupy, tensor)) + ctensor = cp.fromDlpack(self.get_jax_dlpack(tensor)) + return ctensor + + def to_operator_tensor(self, tensor): + """Convert a cupy tensor to jax tensor. + + There comes a bug. The layouts of tensor explained by cupy and jax are different. But dlpack doesn't convert the layout. + """ + if isinstance(tensor, list): + return list(map(self.to_operator_tensor, tensor)) + return from_dlpack(tensor.toDlpack()) + + # TODO(HUI): support return logits by adding use_aux in `value_and_grad` + def _calculate_gradient(self, opt_state, batch): + params = self.get_params(opt_state) + loss_val, gradient = value_and_grad(self.loss_func)(params, batch) + return loss_val, gradient + + def get_jax_dlpack(self, tensor): + """Get the dlpack of a jax tensor. + + Jax api might cause different pointer address after the conversion. + We use the xla api to avoid this bug in jax api. + """ + return xla_client._xla.buffer_to_dlpack_managed_tensor(tensor.device_buffer, + take_ownership=False) + + def validate_batch(self, batch): + """Perform validation over a data batch. + + Args: + batch (tuple): a data batch containing a feature/target pair. + """ + params = self.get_params(self.opt_state) + criterion = self.criterion + predict_fun = self.predict_fun + + # unpack features into list to support multiple inputs model + features, targets = batch + + outputs = predict_fun(params, features) + loss = criterion(outputs, targets) + prediction_class = jnp.argmax(outputs, axis=1) + targets_class = jnp.argmax(targets, axis=1) + + acc = jnp.mean(prediction_class == targets_class) + samples_num = targets.shape[0] + + return { + "val_loss": loss.item(), + "val_accuracy": acc.item(), + "samples_num": samples_num + } + + def get_parameters(self, cpu): + """get the flatten parameters.""" + params = self.get_params(self.opt_state) + flatten_params, tree = tree_flatten(params) + if not hasattr(self, "tree"): + self.tree = tree + + if cpu: + flatten_params = list(map(np.asarray, flatten_params)) + return flatten_params + + def get_named_parameters(self, cpu): + """Get the named parameters. + + In jax, we need to construct a dict to contain the parameters. + """ + params = self.get_parameters(cpu) + if hasattr(self, "preset_keys"): + dict_params = {name:p for name, p in zip(self.preset_keys, params)} + else: + dict_params = {f"{idx}":p for idx, p in enumerate(params)} + return dict_params + + # TODO(HUI): used in load states or load parameters + def set_parameters(self, new_params): + """Use new parameters to replace model parameters. + + In jax, we need to construct a dict to contain the parameters. + + Args: + new_params (dict): New parameters to updates the current model. + """ + assert isinstance(new_params, dict) + + keys, new_params = unzip2(sorted(new_params.items(), key=lambda d: int(d[0]))) + self.preset_keys = keys + + if not hasattr(self, "tree"): + self.tree = tree_structure(self.get_params(self.opt_state)) + + states_flat, tree, subtrees = self.opt_state + + states = map(tree_unflatten, subtrees, states_flat) + + def update(param, state): + new_state = param, *state[1:] + return new_state + + new_states = map(update, new_params, states) + + new_states_flat, new_subtrees = unzip2(map(tree_flatten, new_states)) + + if not new_subtrees: + raise RuntimeError("subtrees of new params is empty.") + for idx, (subtree, new_subtree) in enumerate(zip(subtrees, new_subtrees)): + if new_subtree != subtree: + msg = ("input structur did not match the save params struture. " + "input {} and output {}.") + raise TypeError(msg.format(subtree, new_subtree)) + + self.opt_state = OptimizerState(new_states_flat, tree, new_subtrees) + + def reset_optimizer_for_params(self, params): + keys, params = unzip2(sorted(params.items(), key=lambda d: int(d[0]))) + self.tree = tree_structure(params) + self.opt_state = self.opt_init(params) + + def ones(self, shape, cpu=True): + if cpu: + return np.ones(shape) + else: + return jnp.ones(shape) + + def zeros(self, shape, cpu=True): + if cpu: + return np.zeros(shape) + else: + return jnp.zeros(shape) + + def ones_like(self, x, cpu=True): + if cpu: + return np.ones_like(x) + else: + return jnp.ones_like(x) + + def zeros_like(self, x, cpu=True): + if cpu: + return np.zeros_like(x) + else: + return jnp.zeros_like(x) + + def numel(self, v): + return np.size(v) + + def asarray(self, v): + return jnp.asarray(v) + + def clean_redundancy(self): + del self._train_loader + del self._validation_loader + + # TODO(HUI): use pickle to serialize parameters or states and save it. + def save_parameters(self, checkpoint): + raise NotImplementedError( + "save_parameters is not support in jax operator.") + + def load_parameters(self, checkpoint): + raise NotImplementedError( + "load_parameters is not support in jax operator.") + + def save_states(self, states): + raise NotImplementedError( + "save_states is not support in jax operator.") + + def get_states(self, states): + raise NotImplementedError( + "get_states is not support in jax operator.") + + def load_states(self, checkpoint): + raise NotImplementedError( + "load_states is not support in jax operator.") + diff --git a/examples/jax/.ray.lock b/examples/jax/.ray.lock new file mode 100755 index 0000000..e69de29 diff --git a/examples/jax/default_train.csv b/examples/jax/default_train.csv new file mode 100644 index 0000000..0d23b47 --- /dev/null +++ b/examples/jax/default_train.csv @@ -0,0 +1,2 @@ +count_train,mean_train_s,last_train_s,total_train_s,pass_data_train,throughout_train_d +50,2.456741285324097,2.3998360633850098,164.64879870414734,6400,38.87061460739823 diff --git a/examples/jax/jax_util/__init__.py b/examples/jax/jax_util/__init__.py new file mode 100644 index 0000000..955ceb9 --- /dev/null +++ b/examples/jax/jax_util/__init__.py @@ -0,0 +1,2 @@ +from .datasets import mnist, Dataloader +from .resnet import ResNet18, ResNet50, ResNet101 \ No newline at end of file diff --git a/examples/jax/jax_util/datasets.py b/examples/jax/jax_util/datasets.py new file mode 100644 index 0000000..6bc9385 --- /dev/null +++ b/examples/jax/jax_util/datasets.py @@ -0,0 +1,281 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Datasets used in examples.""" + + +import array +import gzip +import os +from os import path +import struct +import urllib.request +from jax.api import F +import jax.numpy as jnp +from jax import jit + +import numpy as np +import numpy.random as npr +import pickle +from functools import partial + +_DATA = "/tmp/jax_example_data/" + + +def _download(url, filename, dataset_name="mnist"): + """Download a url to a file in the JAX data temp directory.""" + root = os.path.join(_DATA,dataset_name) + if not path.exists(root): + os.makedirs(root) + out_file = path.join(root, filename) + if not path.isfile(out_file): + urllib.request.urlretrieve(url, out_file) + print("downloaded {} to {}".format(url, root)) + + +def _partial_flatten(x): + """Flatten all but the first dimension of an ndarray.""" + return np.reshape(x, (x.shape[0], -1)) + + +def _one_hot(x, k, dtype=np.float32): + """Create a one-hot encoding of x of size k.""" + return np.asarray(x[:, None] == np.arange(k), dtype) + +# @partial(jit, static_argnums=1) +def _one_hot_jit(x, k, dtype=np.float32): + """Create a one-hot encoding of x of size k.""" + return jnp.asarray(x[:, None] == jnp.arange(0, k), dtype) + +def mnist_raw(): + """Download and parse the raw MNIST dataset.""" + # CVDF mirror of http://yann.lecun.com/exdb/mnist/ + base_url = "https://storage.googleapis.com/cvdf-datasets/mnist/" + + def parse_labels(filename): + with gzip.open(filename, "rb") as fh: + _ = struct.unpack(">II", fh.read(8)) + return np.array(array.array("B", fh.read()), dtype=np.uint8) + + def parse_images(filename): + with gzip.open(filename, "rb") as fh: + _, num_data, rows, cols = struct.unpack(">IIII", fh.read(16)) + return np.array(array.array("B", fh.read()), + dtype=np.uint8).reshape(num_data, rows, cols) + + for filename in ["train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz", + "t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"]: + _download(base_url + filename, filename) + + train_images = parse_images(path.join(_DATA, "mnist", "train-images-idx3-ubyte.gz")) + train_labels = parse_labels(path.join(_DATA, "mnist", "train-labels-idx1-ubyte.gz")) + test_images = parse_images(path.join(_DATA, "mnist", "t10k-images-idx3-ubyte.gz")) + test_labels = parse_labels(path.join(_DATA, "mnist", "t10k-labels-idx1-ubyte.gz")) + + return train_images, train_labels, test_images, test_labels + + +def mnist(permute_train=False): + """Download, parse and process MNIST data to unit scale and one-hot labels.""" + train_images, train_labels, test_images, test_labels = mnist_raw() + + train_images = _partial_flatten(train_images) / np.float32(255.) + test_images = _partial_flatten(test_images) / np.float32(255.) + train_labels = _one_hot(train_labels, 10) + test_labels = _one_hot(test_labels, 10) + + if permute_train: + perm = np.random.RandomState(0).permutation(train_images.shape[0]) + train_images = train_images[perm] + train_labels = train_labels[perm] + + return train_images, train_labels, test_images, test_labels + +def cifa100_raw(): + """Download and parse the raw MNIST dataset.""" + base_url = "http://www.cs.toronto.edu/~kriz/" + + def load_CIFAR_batch(root, mode="train"): + """ load single batch of cifar """ + if mode == "train": + filename = path.join(root, "train") + elif mode == "test": + filename = path.join(root, "test") + else: + raise RuntimeError("Error: unrecognized mode", + " Got {}".format(mode)) + + with open(filename, 'rb')as f: + datadict = pickle.load(f,encoding='bytes') + X = datadict[b'data'] + Y = datadict[b'fine_labels'] + if mode == "train": + X = X.reshape(50000, 3, 32, 32) + else: + X = X.reshape(10000, 3, 32, 32) + return np.array(X), np.array(Y) + + for filename in ["cifar-100-python.tar.gz"]: + _download(base_url + filename, filename, dataset_name="cifa100") + + root = path.join(_DATA, "cifa100") + + if not os.path.exists(path.join(root, "cifar-100-python.tar.gz")): + os.system("tar xvf {} -C {}".format(path.join(root, "cifar-100-python.tar.gz"), + root)) + + train_images, train_labels = load_CIFAR_batch(path.join(root, "cifar-100-python"), + mode="train") + test_images, test_labels = load_CIFAR_batch(path.join(root, "cifar-100-python"), + mode="test") + + # b"fine_label_names" b"coarse_label_names" + # meta_path = path.join(root, "cifar-100-python", "meta") + return train_images, train_labels, test_images, test_labels + +def cifa100(permute_train=False): + """Download, parse and process cida100 data to unit scale and one-hot labels.""" + train_images, train_labels, test_images, test_labels = cifa100_raw() + + train_images = _partial_flatten(train_images) / np.float32(255.) + test_images = _partial_flatten(test_images) / np.float32(255.) + train_labels = _one_hot(train_labels, 100) + test_labels = _one_hot(test_labels, 100) + + if permute_train: + perm = np.random.RandomState(0).permutation(train_images.shape[0]) + train_images = train_images[perm] + train_labels = train_labels[perm] + + return train_images, train_labels, test_images, test_labels + + +def cifa10_raw(): + """Download and parse the raw MNIST dataset.""" + base_url = "http://www.cs.toronto.edu/~kriz/" + + def load_CIFAR_batch(root, mode="train"): + """ load single batch of cifar """ + if mode == "train": + filenames = [] + for i in range(1,6): + filenames.append(path.join(root, f"data_batch_{i}")) + elif mode == "test": + filenames = [path.join(root, "test_batch")] + else: + raise RuntimeError("Error: unrecognized mode", + " Got {}".format(mode)) + print(filenames) + datas = [] + labels = [] + for filename in filenames: + with open(filename, 'rb')as f: + datadict = pickle.load(f,encoding='bytes') + X = datadict[b'data'] + Y = datadict[b'labels'] + X = X.reshape(10000, 3, 32, 32) + datas.append(X) + labels.append(Y) + return np.concatenate(datas, axis=0), np.concatenate(labels) + + for filename in ["cifar-10-python.tar.gz"]: + _download(base_url + filename, filename, dataset_name="cifa10") + + root = path.join(_DATA, "cifa10") + + if not os.path.exists(path.join(root, "cifar-10-batches-py")): + os.system("tar xvf {} -C {}".format(path.join(root, "cifar-10-python.tar.gz"), + root)) + + train_images, train_labels = load_CIFAR_batch(path.join(root, "cifar-10-batches-py"), + mode="train") + test_images, test_labels = load_CIFAR_batch(path.join(root, "cifar-10-batches-py"), + mode="test") + print(test_images.shape) + + # b"fine_label_names" b"coarse_label_names" + # meta_path = path.join(root, "cifar-100-python", "meta") + return train_images, train_labels, test_images, test_labels + + +def cifa10(permute_train=False): + """Download, parse and process cida100 data to unit scale and one-hot labels.""" + train_images, train_labels, test_images, test_labels = cifa10_raw() + + train_images = _partial_flatten(train_images) / np.float32(255.) + test_images = _partial_flatten(test_images) / np.float32(255.) + train_labels = _one_hot(train_labels, 10) + test_labels = _one_hot(test_labels, 10) + + if permute_train: + perm = np.random.RandomState(0).permutation(train_images.shape[0]) + train_images = train_images[perm] + train_labels = train_labels[perm] + + return train_images, train_labels, test_images, test_labels + + +class Dataloader: + def __init__(self, data, target, batch_size=128, shuffle=False): + ''' + data: shape(width, height, channel, num) + target: shape(num, num_classes) + ''' + self.data = data + self.target = target + self.batch_size = batch_size + num_data = self.target.shape[0] + num_complete_batches, leftover = divmod(num_data, batch_size) + self.num_batches = num_complete_batches + bool(leftover) + self.shuffle = shuffle + + def synth_batches(self): + num_imgs = self.target.shape[0] + rng = npr.RandomState(npr.randint(10)) + perm = rng.permutation(num_imgs) if self.shuffle else np.arange(num_imgs) + for i in range(self.num_batches): + batch_idx = perm[i * self.batch_size:(i + 1) * self.batch_size] + img_batch = self.data[:, :, :, batch_idx] + label_batch = self.target[batch_idx] + yield img_batch, label_batch + + def __iter__(self): + return self.synth_batches() + + def __len__(self): + return self.num_batches + + +if __name__ == "__main__": + train_images, train_labels, test_images, test_labels = cifa10() + + print(type(train_images), type(train_labels)) + print(train_images.shape, train_labels.shape) + print(type(test_images), type(test_labels)) + print(test_images.shape, test_labels.shape) + + train_images, train_labels, test_images, test_labels = cifa100() + + print(type(train_images), type(train_labels)) + print(train_images.shape, train_labels.shape) + print(type(test_images), type(test_labels)) + print(test_images.shape, test_labels.shape) + + # cifa10_filepath = path.join(_DATA, "cifa10", "cifar-10-batches-py/test_batch") + # with open(cifa10_filepath, 'rb')as f: + # datadict = pickle.load(f,encoding='bytes') + # print(datadict.keys()) + # print(datadict[b"data"]) + # print(type(datadict[b"data"])) + # print(len(datadict[b"labels"])) \ No newline at end of file diff --git a/examples/jax/jax_util/resnet.py b/examples/jax/jax_util/resnet.py new file mode 100644 index 0000000..7387b10 --- /dev/null +++ b/examples/jax/jax_util/resnet.py @@ -0,0 +1,221 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A mock-up showing a ResNet50 network with training on synthetic data. + +This file uses the stax neural network definition library and the optimizers +optimization library. +""" + +import numpy.random as npr + +import jax.numpy as jnp +from jax import jit, grad, random +from jax.experimental import optimizers +from jax.experimental import stax +from jax.experimental.stax import (AvgPool, BatchNorm, Conv, Dense, FanInSum, + FanOut, Flatten, GeneralConv, Identity, + MaxPool, Relu, LogSoftmax) + + +# ResNet blocks compose other layers + +def ConvBlock(kernel_size, filters, strides=(2, 2)): + ks = kernel_size + filters1, filters2, filters3 = filters + Main = stax.serial( + Conv(filters1, (1, 1), strides), BatchNorm(), Relu, + Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu, + Conv(filters3, (1, 1)), BatchNorm()) + Shortcut = stax.serial(Conv(filters3, (1, 1), strides), BatchNorm()) + return stax.serial(FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) + + +def IdentityBlock(kernel_size, filters): + ks = kernel_size + filters1, filters2 = filters + def make_main(input_shape): + # the number of output channels depends on the number of input channels + return stax.serial( + Conv(filters1, (1, 1)), BatchNorm(), Relu, + Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu, + Conv(input_shape[3], (1, 1)), BatchNorm()) + Main = stax.shape_dependent(make_main) + return stax.serial(FanOut(2), stax.parallel(Main, Identity), FanInSum, Relu) + + +def BasicBlock(kernel_size, filters, strides=(1, 1)): + ks = kernel_size + filters1, filters2 = filters + Main = stax.serial( + Conv(filters1, (ks, ks), strides, padding='SAME'), BatchNorm(), Relu, + Conv(filters2, (ks, ks), strides, padding='SAME'), BatchNorm()) + + Shortcut = stax.serial(Conv(filters2, (1, 1), strides), BatchNorm()) + return stax.serial(FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) + +def BasicBlock_withoutBN(kernel_size, filters, strides=(1, 1)): + ks = kernel_size + filters1, filters2 = filters + Main = stax.serial( + Conv(filters1, (ks, ks), strides, padding='SAME'), Relu, + Conv(filters2, (ks, ks), strides, padding='SAME')) + + Shortcut = stax.serial(Conv(filters2, (1, 1), strides)) + return stax.serial(FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) + + +def IdentityBlock_withoutBN(kernel_size, filters): + ks = kernel_size + filters1, filters2 = filters + def make_main(input_shape): + # the number of output channels depends on the number of input channels + return stax.serial( + Conv(filters1, (1, 1)), Relu, + Conv(filters2, (ks, ks), padding='SAME'), Relu, + Conv(input_shape[3], (1, 1))) + Main = stax.shape_dependent(make_main) + return stax.serial(FanOut(2), stax.parallel(Main, Identity), FanInSum, Relu) + +# ResNet architectures compose layers and ResNet blocks + +def ResNet101(num_classes): + return stax.serial( + GeneralConv(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME'), + BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), + ConvBlock(3, [64, 64, 256], strides=(1, 1)), + IdentityBlock(3, [64, 64]), + IdentityBlock(3, [64, 64]), + ConvBlock(3, [128, 128, 512]), + IdentityBlock(3, [128, 128]), + IdentityBlock(3, [128, 128]), + IdentityBlock(3, [128, 128]), + ConvBlock(3, [256, 256, 1024]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + ConvBlock(3, [512, 512, 2048]), + IdentityBlock(3, [512, 512]), + IdentityBlock(3, [512, 512]), + AvgPool((7, 7), padding="SAME"), Flatten, Dense(num_classes), LogSoftmax) + + +def ResNet50(num_classes): + return stax.serial( + GeneralConv(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME'), + BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), + ConvBlock(3, [64, 64, 256], strides=(1, 1)), + IdentityBlock(3, [64, 64]), + IdentityBlock(3, [64, 64]), + ConvBlock(3, [128, 128, 512]), + IdentityBlock(3, [128, 128]), + IdentityBlock(3, [128, 128]), + IdentityBlock(3, [128, 128]), + ConvBlock(3, [256, 256, 1024]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + ConvBlock(3, [512, 512, 2048]), + IdentityBlock(3, [512, 512]), + IdentityBlock(3, [512, 512]), + AvgPool((7, 7), padding="SAME"), Flatten, Dense(num_classes), LogSoftmax) + + +def ResNet18(num_classes): + return stax.serial( + GeneralConv(('HWCN', 'OIHW', 'NHWC'), 1, (7, 7), (2, 2), 'SAME'), + BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), + BasicBlock(3, [64, 64]), + IdentityBlock(3, [64, 64]), + BasicBlock(3, [128, 128]), + IdentityBlock(3, [128, 128]), + BasicBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), + BasicBlock(3, [512, 512]), + IdentityBlock(3, [512, 512]), + AvgPool((7, 7), padding="SAME"), Flatten, Dense(num_classes), LogSoftmax) + + +def MLP(num_classes): + return stax.serial( + Flatten, + Dense(32), BatchNorm(), Relu, + Dense(128), BatchNorm(), Relu, + Dense(num_classes), LogSoftmax) + + +if __name__ == "__main__": + rng_key = random.PRNGKey(0) + + batch_size = 8 + num_classes = 1001 + input_shape = (224, 224, 3, batch_size) + step_size = 0.1 + num_steps = 10 + + init_fun, predict_fun = ResNet50(num_classes) + _, init_params = init_fun(rng_key, input_shape) + + def loss(params, batch): + inputs, targets = batch + logits = predict_fun(params, inputs) + return -jnp.sum(logits * targets) + + def accuracy(params, batch): + inputs, targets = batch + target_class = jnp.argmax(targets, axis=-1) + predicted_class = jnp.argmax(predict_fun(params, inputs), axis=-1) + return jnp.mean(predicted_class == target_class) + + def synth_batches(): + rng = npr.RandomState(0) + while True: + images = rng.rand(*input_shape).astype('float32') + labels = rng.randint(num_classes, size=(batch_size, 1)) + onehot_labels = labels == jnp.arange(num_classes) + yield images, onehot_labels + + opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=0.9) + batches = synth_batches() + + @jit + def update(i, opt_state, batch): + params = get_params(opt_state) + return opt_update(i, grad(loss)(params, batch), opt_state) + + opt_state = opt_init(init_params) + for i in range(num_steps): + opt_state = update(i, opt_state, next(batches)) + trained_params = get_params(opt_state) diff --git a/examples/jax/mnist_jax_example.py b/examples/jax/mnist_jax_example.py new file mode 100644 index 0000000..09a36ad --- /dev/null +++ b/examples/jax/mnist_jax_example.py @@ -0,0 +1,120 @@ +import os +import argparse + +from filelock import FileLock + +from tqdm import trange + +import ray +from distml.operator.jax_operator import JAXTrainingOperator +from distml.strategy.allreduce_strategy import AllReduceStrategy + +from ray.util.sgd.utils import BATCH_SIZE, override + +import numpy as np +import numpy.random as npr +import jax +from jax import jit, grad, random +from jax.tree_util import tree_flatten +from jax.experimental import optimizers +from jax.lib import xla_client +import jax.numpy as jnp +from jax_util.resnet import ResNet18, ResNet50, ResNet101 +from jax_util.datasets import mnist, Dataloader + + +def initialization_hook(): + # Need this for avoiding a connection restart issue on AWS. + os.environ["NCCL_SOCKET_IFNAME"] = "^docker0,lo" + os.environ["NCCL_LL_THRESHOLD"] = "0" + + # set the below if needed + # print("NCCL DEBUG SET") + # os.environ["NCCL_DEBUG"] = "INFO" + + +class MnistTrainingOperator(JAXTrainingOperator): + @override(JAXTrainingOperator) + def setup(self, config): + batch_size = config["batch_size"] + rng_key = random.PRNGKey(0) + input_shape = (28, 28, 1, batch_size) + lr = config["lr"] + model_name = config["model_name"] + num_classes = config["num_classes"] + + if model_name == "resnet18": + init_fun, predict_fun = ResNet18(num_classes) + elif model_name == "resnet50": + init_fun, predict_fun = ResNet50(num_classes) + elif model_name == "resnet101": + init_fun, predict_fun = ResNet101(num_classes) + else: + raise RuntimeError("Unrecognized model name") + + _, init_params = init_fun(rng_key, input_shape) + + opt_init, opt_update, get_params = optimizers.adam(lr) + opt_state = opt_init(init_params) + + with FileLock(".ray.lock"): + train_images, train_labels, test_images, test_labels = mnist() + + train_images = train_images.reshape(train_images.shape[0], 1, 28, 28).transpose(2, 3, 1, 0) + test_images = test_images.reshape(test_images.shape[0], 1, 28, 28).transpose(2, 3, 1, 0) + + train_loader = Dataloader(train_images, train_labels, batch_size=batch_size, shuffle=True) + test_loader = Dataloader(test_images, test_labels, batch_size=batch_size) + + self.register(model=[opt_state, init_fun, predict_fun], optimizer=[opt_init, opt_update, get_params], criterion=lambda logits, targets:-jnp.sum(logits * targets)) + + self.register_data(train_loader=train_loader, validation_loader=test_loader) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--address", + required=False, + type=str, + help="the address to use for connecting to the Ray cluster") + parser.add_argument( + "--num-workers", + "-n", + type=int, + default=2, + help="Sets number of workers for training.") + parser.add_argument( + "--num-epochs", type=int, default=20, help="Number of epochs to train.") + parser.add_argument( + "--fp16", + action="store_true", + default=False, + help="Enables FP16 training with apex. Requires `use-gpu`.") + parser.add_argument( + "--model-name", type=str, default="resnet18", help="model, Optional: resnet18, resnet50, resnet101.") + + args, _ = parser.parse_known_args() + + if args.address: + ray.init(args.address) + else: + ray.init(num_gpus=args.num_workers, num_cpus=args.num_workers * 2, log_to_driver=True) + + strategy = AllReduceStrategy( + training_operator_cls=MnistTrainingOperator, + world_size=args.num_workers, + operator_config={ + "lr": 0.01, + "batch_size": 128 , + "num_workers": args.num_workers, + "num_classes": 10, + "model_name": args.model_name + }) + + for i in range(args.num_epochs): + strategy.train() + print(strategy.validate()) + + strategy.shutdown() + print("success!") From da251fbd218ac415fb39b09b4ef10f054abff740 Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Thu, 29 Apr 2021 01:58:10 +0800 Subject: [PATCH 03/13] lint --- distml/operator/jax_operator.py | 112 ++++++----- examples/jax/jax_util/__init__.py | 4 +- examples/jax/jax_util/datasets.py | 95 +++++----- examples/jax/jax_util/resnet.py | 299 ++++++++++++++---------------- examples/jax/mnist_jax_example.py | 60 +++--- format.sh | 6 +- 6 files changed, 294 insertions(+), 282 deletions(-) diff --git a/distml/operator/jax_operator.py b/distml/operator/jax_operator.py index 3ac8cd4..b560617 100644 --- a/distml/operator/jax_operator.py +++ b/distml/operator/jax_operator.py @@ -1,23 +1,17 @@ import numpy as np import cupy as cp -import jax -from jax import grad, value_and_grad +from jax import value_and_grad import jax.numpy as jnp from jax.lib import xla_client -from jax.dlpack import from_dlpack, to_dlpack -from jax.tree_util import tree_flatten, tree_unflatten, tree_structure, tree_map, build_tree +from jax.dlpack import from_dlpack +from jax.tree_util import tree_flatten, tree_unflatten, tree_structure from jax._src.util import unzip2 from jax.experimental.optimizers import OptimizerState from .base_operator import TrainingOperator -from distml.util import ThroughputCollection, func_timer -from ray.util.sgd.utils import TimerCollection, AverageMeterCollection - -import time class JAXTrainingOperator(TrainingOperator): - def __init__(self, operator_config): super(JAXTrainingOperator, self).__init__(operator_config) # Should be set by users in the `register` function. @@ -25,7 +19,7 @@ def __init__(self, operator_config): self.opt_state = None self.init_fun = None self.predict_fun = None - # optimizer methods + # optimizer methods self.opt_init = None self.opt_update = None self.get_params = None @@ -39,47 +33,51 @@ def __init__(self, operator_config): self.setup(operator_config) if hasattr(operator_config, "jit_mode"): - assert not operator_config["jit_mode"], "Not support jit in jax operator." + if operator_config["jit_mode"]: + raise NotImplementedError("Not support jit in jax operator.") self.train_step_num = 0 def setup(self, *args, **kwargs): """Function that needs to be override by users. - + Example: - # some code is the same for all users, maybe we can put it in register. + # some code is the same for all users, + # maybe we can put it in register. rng_key = random.PRNGKey(0) input_shape = (28, 28, 1, batch_size) lr=0.01 init_fun, predict_fun = ResNet18(num_classes) _, init_params = init_fun(rng_key, input_shape) - + opt_init, opt_update, get_params = optimizers.adam(lr) opt_state = opt_init(init_params) - - self.register(model=(opt_state, get_params, predict_fun), optimizer=opt_update, criterion=lambda logits, targets:-jnp.sum(logits * targets)) - + + self.register(model=(opt_state, get_params, predict_fun), + optimizer=opt_update, + criterion=lambda logits, \ + targets:-jnp.sum(logits * targets)) + """ - pass - def register(self, + def register(self, *, - model, - optimizer, - criterion, - lr_schedulers=None, + model, + optimizer, + criterion, + lr_schedulers=None, jit_mode=False): """Register a few critical information about the model to operator.""" self.criterion = criterion if lr_schedulers: self.lr_schedulers = lr_schedulers - print("WARNING: jax not support learning rate scheduler." + print("WARNING: jax not support learning rate scheduler." "This will not work.") - + self._register_model(model) self._register_optimizer(optimizer) - + def _register_model(self, model): """register model components. @@ -108,7 +106,8 @@ def _get_validation_loader(self): def loss_func(self, params, batch): """A function to calculate predictions and loss value. - This function is going to be decorated by `grad` in Jax to calculate gradients. + This function is going to be decorated by + `grad` in Jax to calculate gradients. Args: batch (tuple): a data batch containing a feature/target pair. @@ -133,20 +132,28 @@ def derive_updates(self, batch): assert tree == self.opt_state[1] if hasattr(self, "preset_keys"): - gradient_dict = {k:g for k, g in zip(self.preset_keys, gradient_dict)} + gradient_dict = { + k: g + for k, g in zip(self.preset_keys, gradient_dict) + } else: - gradient_dict = {f"{idx}":g for idx, g in enumerate(gradient_dict)} + gradient_dict = { + f"{idx}": g + for idx, g in enumerate(gradient_dict) + } return loss_val.item(), gradient_dict - + def apply_updates(self, updates): """Set and apply the updates using the opt_update in Jax. Args: updates (dict): a dictionary of parameter name and updates. """ - keys, updates = unzip2(sorted(updates.items(), key=lambda d: int(d[0]))) + keys, updates = unzip2( + sorted(updates.items(), key=lambda d: int(d[0]))) updates = tree_unflatten(self.opt_state[1], updates) - self.opt_state = self.opt_update(self.train_step_num, updates, self.opt_state) + self.opt_state = self.opt_update(self.train_step_num, updates, + self.opt_state) self.train_step_num += 1 def to_cupy(self, tensor): @@ -158,8 +165,9 @@ def to_cupy(self, tensor): def to_operator_tensor(self, tensor): """Convert a cupy tensor to jax tensor. - - There comes a bug. The layouts of tensor explained by cupy and jax are different. But dlpack doesn't convert the layout. + + There comes a bug. The layouts of tensor explained by cupy + and jax are different. But dlpack doesn't convert the layout. """ if isinstance(tensor, list): return list(map(self.to_operator_tensor, tensor)) @@ -177,8 +185,8 @@ def get_jax_dlpack(self, tensor): Jax api might cause different pointer address after the conversion. We use the xla api to avoid this bug in jax api. """ - return xla_client._xla.buffer_to_dlpack_managed_tensor(tensor.device_buffer, - take_ownership=False) + return xla_client._xla.buffer_to_dlpack_managed_tensor( + tensor.device_buffer, take_ownership=False) def validate_batch(self, batch): """Perform validation over a data batch. @@ -220,32 +228,36 @@ def get_parameters(self, cpu): def get_named_parameters(self, cpu): """Get the named parameters. - + In jax, we need to construct a dict to contain the parameters. """ params = self.get_parameters(cpu) if hasattr(self, "preset_keys"): - dict_params = {name:p for name, p in zip(self.preset_keys, params)} + dict_params = { + name: p + for name, p in zip(self.preset_keys, params) + } else: - dict_params = {f"{idx}":p for idx, p in enumerate(params)} + dict_params = {f"{idx}": p for idx, p in enumerate(params)} return dict_params - # TODO(HUI): used in load states or load parameters + # TODO(HUI): used in load states or load parameters def set_parameters(self, new_params): """Use new parameters to replace model parameters. - + In jax, we need to construct a dict to contain the parameters. - + Args: new_params (dict): New parameters to updates the current model. """ assert isinstance(new_params, dict) - keys, new_params = unzip2(sorted(new_params.items(), key=lambda d: int(d[0]))) + keys, new_params = unzip2( + sorted(new_params.items(), key=lambda d: int(d[0]))) self.preset_keys = keys if not hasattr(self, "tree"): - self.tree = tree_structure(self.get_params(self.opt_state)) + self.tree = tree_structure(self.get_params(self.opt_state)) states_flat, tree, subtrees = self.opt_state @@ -261,10 +273,12 @@ def update(param, state): if not new_subtrees: raise RuntimeError("subtrees of new params is empty.") - for idx, (subtree, new_subtree) in enumerate(zip(subtrees, new_subtrees)): + for idx, (subtree, new_subtree) in enumerate( + zip(subtrees, new_subtrees)): if new_subtree != subtree: - msg = ("input structur did not match the save params struture. " - "input {} and output {}.") + msg = ( + "input structur did not match the save params struture. " + "input {} and output {}.") raise TypeError(msg.format(subtree, new_subtree)) self.opt_state = OptimizerState(new_states_flat, tree, new_subtrees) @@ -322,10 +336,8 @@ def save_states(self, states): "save_states is not support in jax operator.") def get_states(self, states): - raise NotImplementedError( - "get_states is not support in jax operator.") + raise NotImplementedError("get_states is not support in jax operator.") def load_states(self, checkpoint): raise NotImplementedError( "load_states is not support in jax operator.") - diff --git a/examples/jax/jax_util/__init__.py b/examples/jax/jax_util/__init__.py index 955ceb9..bcf82e0 100644 --- a/examples/jax/jax_util/__init__.py +++ b/examples/jax/jax_util/__init__.py @@ -1,2 +1,2 @@ -from .datasets import mnist, Dataloader -from .resnet import ResNet18, ResNet50, ResNet101 \ No newline at end of file +from .datasets import mnist, Dataloader # noqa: F401 +from .resnet import ResNet18, ResNet50, ResNet101 # noqa: F401 diff --git a/examples/jax/jax_util/datasets.py b/examples/jax/jax_util/datasets.py index 6bc9385..a2b8cf0 100644 --- a/examples/jax/jax_util/datasets.py +++ b/examples/jax/jax_util/datasets.py @@ -11,31 +11,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Datasets used in examples.""" - import array import gzip import os from os import path import struct import urllib.request -from jax.api import F import jax.numpy as jnp -from jax import jit import numpy as np import numpy.random as npr import pickle -from functools import partial _DATA = "/tmp/jax_example_data/" def _download(url, filename, dataset_name="mnist"): """Download a url to a file in the JAX data temp directory.""" - root = os.path.join(_DATA,dataset_name) + root = os.path.join(_DATA, dataset_name) if not path.exists(root): os.makedirs(root) out_file = path.join(root, filename) @@ -53,11 +48,13 @@ def _one_hot(x, k, dtype=np.float32): """Create a one-hot encoding of x of size k.""" return np.asarray(x[:, None] == np.arange(k), dtype) + # @partial(jit, static_argnums=1) def _one_hot_jit(x, k, dtype=np.float32): """Create a one-hot encoding of x of size k.""" return jnp.asarray(x[:, None] == jnp.arange(0, k), dtype) + def mnist_raw(): """Download and parse the raw MNIST dataset.""" # CVDF mirror of http://yann.lecun.com/exdb/mnist/ @@ -71,23 +68,33 @@ def parse_labels(filename): def parse_images(filename): with gzip.open(filename, "rb") as fh: _, num_data, rows, cols = struct.unpack(">IIII", fh.read(16)) - return np.array(array.array("B", fh.read()), - dtype=np.uint8).reshape(num_data, rows, cols) - - for filename in ["train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz", - "t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"]: + return np.array( + array.array("B", fh.read()), dtype=np.uint8).reshape( + num_data, rows, cols) + + for filename in [ + "train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz", + "t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz" + ]: _download(base_url + filename, filename) - train_images = parse_images(path.join(_DATA, "mnist", "train-images-idx3-ubyte.gz")) - train_labels = parse_labels(path.join(_DATA, "mnist", "train-labels-idx1-ubyte.gz")) - test_images = parse_images(path.join(_DATA, "mnist", "t10k-images-idx3-ubyte.gz")) - test_labels = parse_labels(path.join(_DATA, "mnist", "t10k-labels-idx1-ubyte.gz")) + train_images = parse_images( + path.join(_DATA, "mnist", "train-images-idx3-ubyte.gz")) + train_labels = parse_labels( + path.join(_DATA, "mnist", "train-labels-idx1-ubyte.gz")) + test_images = parse_images( + path.join(_DATA, "mnist", "t10k-images-idx3-ubyte.gz")) + test_labels = parse_labels( + path.join(_DATA, "mnist", "t10k-labels-idx1-ubyte.gz")) return train_images, train_labels, test_images, test_labels def mnist(permute_train=False): - """Download, parse and process MNIST data to unit scale and one-hot labels.""" + """ + Download, parse and process MNIST data + to unit scale and one-hot labels. + """ train_images, train_labels, test_images, test_labels = mnist_raw() train_images = _partial_flatten(train_images) / np.float32(255.) @@ -102,6 +109,7 @@ def mnist(permute_train=False): return train_images, train_labels, test_images, test_labels + def cifa100_raw(): """Download and parse the raw MNIST dataset.""" base_url = "http://www.cs.toronto.edu/~kriz/" @@ -116,8 +124,8 @@ def load_CIFAR_batch(root, mode="train"): raise RuntimeError("Error: unrecognized mode", " Got {}".format(mode)) - with open(filename, 'rb')as f: - datadict = pickle.load(f,encoding='bytes') + with open(filename, 'rb') as f: + datadict = pickle.load(f, encoding='bytes') X = datadict[b'data'] Y = datadict[b'fine_labels'] if mode == "train": @@ -132,20 +140,23 @@ def load_CIFAR_batch(root, mode="train"): root = path.join(_DATA, "cifa100") if not os.path.exists(path.join(root, "cifar-100-python.tar.gz")): - os.system("tar xvf {} -C {}".format(path.join(root, "cifar-100-python.tar.gz"), - root)) + os.system("tar xvf {} -C {}".format( + path.join(root, "cifar-100-python.tar.gz"), root)) - train_images, train_labels = load_CIFAR_batch(path.join(root, "cifar-100-python"), - mode="train") - test_images, test_labels = load_CIFAR_batch(path.join(root, "cifar-100-python"), - mode="test") + train_images, train_labels = load_CIFAR_batch( + path.join(root, "cifar-100-python"), mode="train") + test_images, test_labels = load_CIFAR_batch( + path.join(root, "cifar-100-python"), mode="test") # b"fine_label_names" b"coarse_label_names" # meta_path = path.join(root, "cifar-100-python", "meta") return train_images, train_labels, test_images, test_labels + def cifa100(permute_train=False): - """Download, parse and process cida100 data to unit scale and one-hot labels.""" + """ + Download, parse and process cida100 data to unit scale and one-hot labels. + """ train_images, train_labels, test_images, test_labels = cifa100_raw() train_images = _partial_flatten(train_images) / np.float32(255.) @@ -169,7 +180,7 @@ def load_CIFAR_batch(root, mode="train"): """ load single batch of cifar """ if mode == "train": filenames = [] - for i in range(1,6): + for i in range(1, 6): filenames.append(path.join(root, f"data_batch_{i}")) elif mode == "test": filenames = [path.join(root, "test_batch")] @@ -180,8 +191,8 @@ def load_CIFAR_batch(root, mode="train"): datas = [] labels = [] for filename in filenames: - with open(filename, 'rb')as f: - datadict = pickle.load(f,encoding='bytes') + with open(filename, 'rb') as f: + datadict = pickle.load(f, encoding='bytes') X = datadict[b'data'] Y = datadict[b'labels'] X = X.reshape(10000, 3, 32, 32) @@ -195,13 +206,13 @@ def load_CIFAR_batch(root, mode="train"): root = path.join(_DATA, "cifa10") if not os.path.exists(path.join(root, "cifar-10-batches-py")): - os.system("tar xvf {} -C {}".format(path.join(root, "cifar-10-python.tar.gz"), - root)) + os.system("tar xvf {} -C {}".format( + path.join(root, "cifar-10-python.tar.gz"), root)) - train_images, train_labels = load_CIFAR_batch(path.join(root, "cifar-10-batches-py"), - mode="train") - test_images, test_labels = load_CIFAR_batch(path.join(root, "cifar-10-batches-py"), - mode="test") + train_images, train_labels = load_CIFAR_batch( + path.join(root, "cifar-10-batches-py"), mode="train") + test_images, test_labels = load_CIFAR_batch( + path.join(root, "cifar-10-batches-py"), mode="test") print(test_images.shape) # b"fine_label_names" b"coarse_label_names" @@ -210,7 +221,10 @@ def load_CIFAR_batch(root, mode="train"): def cifa10(permute_train=False): - """Download, parse and process cida100 data to unit scale and one-hot labels.""" + """ + Download, parse and process cida100 data + to unit scale and one-hot labels. + """ train_images, train_labels, test_images, test_labels = cifa10_raw() train_images = _partial_flatten(train_images) / np.float32(255.) @@ -243,7 +257,8 @@ def __init__(self, data, target, batch_size=128, shuffle=False): def synth_batches(self): num_imgs = self.target.shape[0] rng = npr.RandomState(npr.randint(10)) - perm = rng.permutation(num_imgs) if self.shuffle else np.arange(num_imgs) + perm = rng.permutation(num_imgs) if self.shuffle else np.arange( + num_imgs) for i in range(self.num_batches): batch_idx = perm[i * self.batch_size:(i + 1) * self.batch_size] img_batch = self.data[:, :, :, batch_idx] @@ -271,11 +286,3 @@ def __len__(self): print(train_images.shape, train_labels.shape) print(type(test_images), type(test_labels)) print(test_images.shape, test_labels.shape) - - # cifa10_filepath = path.join(_DATA, "cifa10", "cifar-10-batches-py/test_batch") - # with open(cifa10_filepath, 'rb')as f: - # datadict = pickle.load(f,encoding='bytes') - # print(datadict.keys()) - # print(datadict[b"data"]) - # print(type(datadict[b"data"])) - # print(len(datadict[b"labels"])) \ No newline at end of file diff --git a/examples/jax/jax_util/resnet.py b/examples/jax/jax_util/resnet.py index 7387b10..f3e89f5 100644 --- a/examples/jax/jax_util/resnet.py +++ b/examples/jax/jax_util/resnet.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """A mock-up showing a ResNet50 network with training on synthetic data. This file uses the stax neural network definition library and the optimizers @@ -28,194 +27,178 @@ FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax) - # ResNet blocks compose other layers + def ConvBlock(kernel_size, filters, strides=(2, 2)): - ks = kernel_size - filters1, filters2, filters3 = filters - Main = stax.serial( - Conv(filters1, (1, 1), strides), BatchNorm(), Relu, - Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu, - Conv(filters3, (1, 1)), BatchNorm()) - Shortcut = stax.serial(Conv(filters3, (1, 1), strides), BatchNorm()) - return stax.serial(FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) + ks = kernel_size + filters1, filters2, filters3 = filters + Main = stax.serial( + Conv(filters1, (1, 1), strides), BatchNorm(), Relu, + Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu, + Conv(filters3, (1, 1)), BatchNorm()) + Shortcut = stax.serial(Conv(filters3, (1, 1), strides), BatchNorm()) + return stax.serial( + FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) def IdentityBlock(kernel_size, filters): - ks = kernel_size - filters1, filters2 = filters - def make_main(input_shape): - # the number of output channels depends on the number of input channels + ks = kernel_size + filters1, filters2 = filters + + def make_main(input_shape): + # the number of output channels depends on the number of input channels + return stax.serial( + Conv(filters1, (1, 1)), BatchNorm(), Relu, + Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu, + Conv(input_shape[3], (1, 1)), BatchNorm()) + + Main = stax.shape_dependent(make_main) return stax.serial( - Conv(filters1, (1, 1)), BatchNorm(), Relu, - Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu, - Conv(input_shape[3], (1, 1)), BatchNorm()) - Main = stax.shape_dependent(make_main) - return stax.serial(FanOut(2), stax.parallel(Main, Identity), FanInSum, Relu) + FanOut(2), stax.parallel(Main, Identity), FanInSum, Relu) def BasicBlock(kernel_size, filters, strides=(1, 1)): - ks = kernel_size - filters1, filters2 = filters - Main = stax.serial( - Conv(filters1, (ks, ks), strides, padding='SAME'), BatchNorm(), Relu, - Conv(filters2, (ks, ks), strides, padding='SAME'), BatchNorm()) + ks = kernel_size + filters1, filters2 = filters + Main = stax.serial( + Conv(filters1, (ks, ks), strides, padding='SAME'), BatchNorm(), Relu, + Conv(filters2, (ks, ks), strides, padding='SAME'), BatchNorm()) + + Shortcut = stax.serial(Conv(filters2, (1, 1), strides), BatchNorm()) + return stax.serial( + FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) - Shortcut = stax.serial(Conv(filters2, (1, 1), strides), BatchNorm()) - return stax.serial(FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) def BasicBlock_withoutBN(kernel_size, filters, strides=(1, 1)): - ks = kernel_size - filters1, filters2 = filters - Main = stax.serial( - Conv(filters1, (ks, ks), strides, padding='SAME'), Relu, - Conv(filters2, (ks, ks), strides, padding='SAME')) + ks = kernel_size + filters1, filters2 = filters + Main = stax.serial( + Conv(filters1, (ks, ks), strides, padding='SAME'), Relu, + Conv(filters2, (ks, ks), strides, padding='SAME')) - Shortcut = stax.serial(Conv(filters2, (1, 1), strides)) - return stax.serial(FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) + Shortcut = stax.serial(Conv(filters2, (1, 1), strides)) + return stax.serial( + FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) def IdentityBlock_withoutBN(kernel_size, filters): - ks = kernel_size - filters1, filters2 = filters - def make_main(input_shape): - # the number of output channels depends on the number of input channels + ks = kernel_size + filters1, filters2 = filters + + def make_main(input_shape): + # the number of output channels depends on the number of input channels + return stax.serial( + Conv(filters1, (1, 1)), Relu, + Conv(filters2, (ks, ks), padding='SAME'), Relu, + Conv(input_shape[3], (1, 1))) + + Main = stax.shape_dependent(make_main) return stax.serial( - Conv(filters1, (1, 1)), Relu, - Conv(filters2, (ks, ks), padding='SAME'), Relu, - Conv(input_shape[3], (1, 1))) - Main = stax.shape_dependent(make_main) - return stax.serial(FanOut(2), stax.parallel(Main, Identity), FanInSum, Relu) + FanOut(2), stax.parallel(Main, Identity), FanInSum, Relu) + # ResNet architectures compose layers and ResNet blocks + def ResNet101(num_classes): - return stax.serial( - GeneralConv(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME'), - BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), - ConvBlock(3, [64, 64, 256], strides=(1, 1)), - IdentityBlock(3, [64, 64]), - IdentityBlock(3, [64, 64]), - ConvBlock(3, [128, 128, 512]), - IdentityBlock(3, [128, 128]), - IdentityBlock(3, [128, 128]), - IdentityBlock(3, [128, 128]), - ConvBlock(3, [256, 256, 1024]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - ConvBlock(3, [512, 512, 2048]), - IdentityBlock(3, [512, 512]), - IdentityBlock(3, [512, 512]), - AvgPool((7, 7), padding="SAME"), Flatten, Dense(num_classes), LogSoftmax) + return stax.serial( + GeneralConv(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME'), + BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), + ConvBlock(3, [64, 64, 256], strides=(1, + 1)), IdentityBlock(3, [64, 64]), + IdentityBlock(3, [64, 64]), ConvBlock(3, [128, 128, 512]), + IdentityBlock(3, [128, 128]), IdentityBlock(3, [128, 128]), + IdentityBlock(3, [128, 128]), ConvBlock(3, [256, 256, 1024]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + ConvBlock(3, [512, 512, 2048]), IdentityBlock(3, [512, 512]), + IdentityBlock(3, [512, 512]), AvgPool((7, 7), padding="SAME"), Flatten, + Dense(num_classes), LogSoftmax) def ResNet50(num_classes): - return stax.serial( - GeneralConv(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME'), - BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), - ConvBlock(3, [64, 64, 256], strides=(1, 1)), - IdentityBlock(3, [64, 64]), - IdentityBlock(3, [64, 64]), - ConvBlock(3, [128, 128, 512]), - IdentityBlock(3, [128, 128]), - IdentityBlock(3, [128, 128]), - IdentityBlock(3, [128, 128]), - ConvBlock(3, [256, 256, 1024]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - ConvBlock(3, [512, 512, 2048]), - IdentityBlock(3, [512, 512]), - IdentityBlock(3, [512, 512]), - AvgPool((7, 7), padding="SAME"), Flatten, Dense(num_classes), LogSoftmax) + return stax.serial( + GeneralConv(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME'), + BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), + ConvBlock(3, [64, 64, 256], strides=(1, 1)), IdentityBlock( + 3, [64, 64]), IdentityBlock(3, [64, 64]), + ConvBlock(3, [128, 128, 512]), IdentityBlock(3, [128, 128]), + IdentityBlock(3, [128, 128]), IdentityBlock(3, [128, 128]), + ConvBlock(3, [256, 256, 1024]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + IdentityBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + ConvBlock(3, [512, 512, 2048]), IdentityBlock(3, [512, 512]), + IdentityBlock(3, [512, 512]), AvgPool((7, 7), padding="SAME"), Flatten, + Dense(num_classes), LogSoftmax) def ResNet18(num_classes): - return stax.serial( - GeneralConv(('HWCN', 'OIHW', 'NHWC'), 1, (7, 7), (2, 2), 'SAME'), - BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), - BasicBlock(3, [64, 64]), - IdentityBlock(3, [64, 64]), - BasicBlock(3, [128, 128]), - IdentityBlock(3, [128, 128]), - BasicBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - BasicBlock(3, [512, 512]), - IdentityBlock(3, [512, 512]), - AvgPool((7, 7), padding="SAME"), Flatten, Dense(num_classes), LogSoftmax) + return stax.serial( + GeneralConv(('HWCN', 'OIHW', 'NHWC'), 1, (7, 7), (2, 2), 'SAME'), + BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), + BasicBlock(3, [64, 64]), IdentityBlock(3, [64, 64]), + BasicBlock(3, [128, 128]), IdentityBlock(3, [128, 128]), + BasicBlock(3, [256, 256]), IdentityBlock(3, [256, 256]), + BasicBlock(3, [512, 512]), IdentityBlock(3, [512, 512]), + AvgPool((7, 7), padding="SAME"), Flatten, Dense(num_classes), + LogSoftmax) def MLP(num_classes): - return stax.serial( - Flatten, - Dense(32), BatchNorm(), Relu, - Dense(128), BatchNorm(), Relu, - Dense(num_classes), LogSoftmax) + return stax.serial(Flatten, Dense(32), BatchNorm(), Relu, Dense(128), + BatchNorm(), Relu, Dense(num_classes), LogSoftmax) if __name__ == "__main__": - rng_key = random.PRNGKey(0) - - batch_size = 8 - num_classes = 1001 - input_shape = (224, 224, 3, batch_size) - step_size = 0.1 - num_steps = 10 - - init_fun, predict_fun = ResNet50(num_classes) - _, init_params = init_fun(rng_key, input_shape) - - def loss(params, batch): - inputs, targets = batch - logits = predict_fun(params, inputs) - return -jnp.sum(logits * targets) - - def accuracy(params, batch): - inputs, targets = batch - target_class = jnp.argmax(targets, axis=-1) - predicted_class = jnp.argmax(predict_fun(params, inputs), axis=-1) - return jnp.mean(predicted_class == target_class) - - def synth_batches(): - rng = npr.RandomState(0) - while True: - images = rng.rand(*input_shape).astype('float32') - labels = rng.randint(num_classes, size=(batch_size, 1)) - onehot_labels = labels == jnp.arange(num_classes) - yield images, onehot_labels - - opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=0.9) - batches = synth_batches() - - @jit - def update(i, opt_state, batch): - params = get_params(opt_state) - return opt_update(i, grad(loss)(params, batch), opt_state) - - opt_state = opt_init(init_params) - for i in range(num_steps): - opt_state = update(i, opt_state, next(batches)) - trained_params = get_params(opt_state) + rng_key = random.PRNGKey(0) + + batch_size = 8 + num_classes = 1001 + input_shape = (224, 224, 3, batch_size) + step_size = 0.1 + num_steps = 10 + + init_fun, predict_fun = ResNet50(num_classes) + _, init_params = init_fun(rng_key, input_shape) + + def loss(params, batch): + inputs, targets = batch + logits = predict_fun(params, inputs) + return -jnp.sum(logits * targets) + + def accuracy(params, batch): + inputs, targets = batch + target_class = jnp.argmax(targets, axis=-1) + predicted_class = jnp.argmax(predict_fun(params, inputs), axis=-1) + return jnp.mean(predicted_class == target_class) + + def synth_batches(): + rng = npr.RandomState(0) + while True: + images = rng.rand(*input_shape).astype('float32') + labels = rng.randint(num_classes, size=(batch_size, 1)) + onehot_labels = labels == jnp.arange(num_classes) + yield images, onehot_labels + + opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=0.9) + batches = synth_batches() + + @jit + def update(i, opt_state, batch): + params = get_params(opt_state) + return opt_update(i, grad(loss)(params, batch), opt_state) + + opt_state = opt_init(init_params) + for i in range(num_steps): + opt_state = update(i, opt_state, next(batches)) + trained_params = get_params(opt_state) diff --git a/examples/jax/mnist_jax_example.py b/examples/jax/mnist_jax_example.py index 09a36ad..2335b5d 100644 --- a/examples/jax/mnist_jax_example.py +++ b/examples/jax/mnist_jax_example.py @@ -3,23 +3,16 @@ from filelock import FileLock -from tqdm import trange - import ray from distml.operator.jax_operator import JAXTrainingOperator from distml.strategy.allreduce_strategy import AllReduceStrategy -from ray.util.sgd.utils import BATCH_SIZE, override +from ray.util.sgd.utils import override -import numpy as np -import numpy.random as npr -import jax -from jax import jit, grad, random -from jax.tree_util import tree_flatten +from jax import random from jax.experimental import optimizers -from jax.lib import xla_client import jax.numpy as jnp -from jax_util.resnet import ResNet18, ResNet50, ResNet101 +from jax_util.resnet import ResNet18, ResNet50, ResNet101 from jax_util.datasets import mnist, Dataloader @@ -53,22 +46,30 @@ def setup(self, config): raise RuntimeError("Unrecognized model name") _, init_params = init_fun(rng_key, input_shape) - + opt_init, opt_update, get_params = optimizers.adam(lr) opt_state = opt_init(init_params) - + with FileLock(".ray.lock"): train_images, train_labels, test_images, test_labels = mnist() - - train_images = train_images.reshape(train_images.shape[0], 1, 28, 28).transpose(2, 3, 1, 0) - test_images = test_images.reshape(test_images.shape[0], 1, 28, 28).transpose(2, 3, 1, 0) - train_loader = Dataloader(train_images, train_labels, batch_size=batch_size, shuffle=True) - test_loader = Dataloader(test_images, test_labels, batch_size=batch_size) - - self.register(model=[opt_state, init_fun, predict_fun], optimizer=[opt_init, opt_update, get_params], criterion=lambda logits, targets:-jnp.sum(logits * targets)) - - self.register_data(train_loader=train_loader, validation_loader=test_loader) + train_images = train_images.reshape(train_images.shape[0], 1, 28, + 28).transpose(2, 3, 1, 0) + test_images = test_images.reshape(test_images.shape[0], 1, 28, + 28).transpose(2, 3, 1, 0) + + train_loader = Dataloader( + train_images, train_labels, batch_size=batch_size, shuffle=True) + test_loader = Dataloader( + test_images, test_labels, batch_size=batch_size) + + self.register( + model=[opt_state, init_fun, predict_fun], + optimizer=[opt_init, opt_update, get_params], + criterion=lambda logits, targets: -jnp.sum(logits * targets)) + + self.register_data( + train_loader=train_loader, validation_loader=test_loader) if __name__ == "__main__": @@ -85,28 +86,37 @@ def setup(self, config): default=2, help="Sets number of workers for training.") parser.add_argument( - "--num-epochs", type=int, default=20, help="Number of epochs to train.") + "--num-epochs", + type=int, + default=20, + help="Number of epochs to train.") parser.add_argument( "--fp16", action="store_true", default=False, help="Enables FP16 training with apex. Requires `use-gpu`.") parser.add_argument( - "--model-name", type=str, default="resnet18", help="model, Optional: resnet18, resnet50, resnet101.") + "--model-name", + type=str, + default="resnet18", + help="model, Optional: resnet18, resnet50, resnet101.") args, _ = parser.parse_known_args() if args.address: ray.init(args.address) else: - ray.init(num_gpus=args.num_workers, num_cpus=args.num_workers * 2, log_to_driver=True) + ray.init( + num_gpus=args.num_workers, + num_cpus=args.num_workers * 2, + log_to_driver=True) strategy = AllReduceStrategy( training_operator_cls=MnistTrainingOperator, world_size=args.num_workers, operator_config={ "lr": 0.01, - "batch_size": 128 , + "batch_size": 128, "num_workers": args.num_workers, "num_classes": 10, "model_name": args.model_name diff --git a/format.sh b/format.sh index bad8cb9..d5abce5 100755 --- a/format.sh +++ b/format.sh @@ -106,14 +106,14 @@ format_changed() { yapf --in-place "${YAPF_EXCLUDES[@]}" "${YAPF_FLAGS[@]}" if which flake8 >/dev/null; then git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.py' | xargs -P 5 \ - flake8 '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E226,E24,E704,W503,W504,W605 + flake8 '"' --ignore=N,I,C408,E121,E123,E126,E226,E24,E704,W503,W504,W605 fi fi if ! git diff --diff-filter=ACRM --quiet --exit-code "$MERGEBASE" -- '*.pyx' '*.pxd' '*.pxi' &>/dev/null; then if which flake8 >/dev/null; then git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.pyx' '*.pxd' '*.pxi' | xargs -P 5 \ - flake8 '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 + flake8 '"' --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 fi fi } @@ -121,7 +121,7 @@ format_changed() { # Format all files, and print the diff to stdout for travis. format_all() { yapf --diff "${YAPF_FLAGS[@]}" "${YAPF_EXCLUDES[@]}" distml - flake8 '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 distml + flake8 '"' --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 distml } # This flag formats individual files. --files *must* be the first command line From 225abfd2208cc7a2076fd4ed4f6ac8efb6ca59c6 Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Thu, 29 Apr 2021 02:10:14 +0800 Subject: [PATCH 04/13] setup string --- distml/operator/jax_operator.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/distml/operator/jax_operator.py b/distml/operator/jax_operator.py index b560617..a96e40f 100644 --- a/distml/operator/jax_operator.py +++ b/distml/operator/jax_operator.py @@ -53,11 +53,11 @@ def setup(self, *args, **kwargs): opt_init, opt_update, get_params = optimizers.adam(lr) opt_state = opt_init(init_params) - self.register(model=(opt_state, get_params, predict_fun), - optimizer=opt_update, - criterion=lambda logits, \ - targets:-jnp.sum(logits * targets)) + criterion = lambda logits, targets:-jnp.sum(logits * targets) + self.register(model=(opt_state, init_fun, predict_fun), + optimizer=(opt_init, opt_update, get_params), + criterion=criterion) """ pass From 560dd7594724f56daf167034aea628a87f6d3b99 Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Fri, 30 Apr 2021 00:21:26 +0800 Subject: [PATCH 05/13] reset format.sh --- format.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/format.sh b/format.sh index d5abce5..c03b7b9 100755 --- a/format.sh +++ b/format.sh @@ -46,7 +46,7 @@ builtin cd "$ROOT" || exit 1 # Add the upstream remote if it doesn't exist if ! git remote -v | grep -q upstream; then - git remote add 'upstream' 'https://yuan.cm/https://github.com/ray-project/distml.git' + git remote add 'upstream' 'https://github.com/ray-project/distml.git' fi FLAKE8_VERSION=$(flake8 --version | awk '{print $1}') @@ -106,14 +106,14 @@ format_changed() { yapf --in-place "${YAPF_EXCLUDES[@]}" "${YAPF_FLAGS[@]}" if which flake8 >/dev/null; then git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.py' | xargs -P 5 \ - flake8 '"' --ignore=N,I,C408,E121,E123,E126,E226,E24,E704,W503,W504,W605 + flake8 --inline-quotes '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E226,E24,E704,W503,W504,W605 fi fi if ! git diff --diff-filter=ACRM --quiet --exit-code "$MERGEBASE" -- '*.pyx' '*.pxd' '*.pxi' &>/dev/null; then if which flake8 >/dev/null; then git diff --name-only --diff-filter=ACRM "$MERGEBASE" -- '*.pyx' '*.pxd' '*.pxi' | xargs -P 5 \ - flake8 '"' --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 + flake8 --inline-quotes '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 fi fi } @@ -121,7 +121,7 @@ format_changed() { # Format all files, and print the diff to stdout for travis. format_all() { yapf --diff "${YAPF_FLAGS[@]}" "${YAPF_EXCLUDES[@]}" distml - flake8 '"' --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 distml + flake8 --inline-quotes '"' --no-avoid-escape --ignore=N,I,C408,E121,E123,E126,E211,E225,E226,E227,E24,E704,E999,W503,W504,W605 distml } # This flag formats individual files. --files *must* be the first command line From b8d2ec91b099adec168394183dc6bf221088e33b Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Fri, 30 Apr 2021 02:01:59 +0800 Subject: [PATCH 06/13] init ps strategy --- distml/strategy/ps_strategy.py | 702 +++++++++++++++++++++++++++++++++ examples/jax/default_train.csv | 2 - 2 files changed, 702 insertions(+), 2 deletions(-) create mode 100644 distml/strategy/ps_strategy.py delete mode 100644 examples/jax/default_train.csv diff --git a/distml/strategy/ps_strategy.py b/distml/strategy/ps_strategy.py new file mode 100644 index 0000000..382daeb --- /dev/null +++ b/distml/strategy/ps_strategy.py @@ -0,0 +1,702 @@ +import ray +import ray.util.collective as col + +import numpy as np + +import distml.strategy.util as util +from distml.strategy.base_trainer import BaseTrainer +from .util import ThroughputCollection + +import logging + +logger = logging.getLogger(__name__) + + +class ParameterServerStrategy(BaseTrainer): + """Strategy that trains a model via collective AllReduce. + + Args: + training_operator_cls (TrainingOperator): + Custom training operator class. + operator_config (dict): operator config specified by users. + initialization_hook (function): A function to call on all training + workers when they are first initialized. This could be useful to + set environment variables for all the worker processes. + num_workers (int): The number of workers. + num_ps (int): The number of parameter servers. + num_cpus_per_worker (int): number of CPUs allocated per worker. + num_gpus_per_worker (int): number of GPUs allocated per worker. + num_cpus_per_server (int): number of CPUs allocated per server. + num_gpus_per_server (int): number of GPUs allocated per server. + """ + + def __init__(self, + *, + training_operator_cls, + operator_config=None, + initialization_hook=None, + num_workers=1, + num_ps=1, + num_cpus_per_worker=1, + num_gpus_per_worker=1, + num_cpus_per_server=1, + num_gpus_per_server=1, + **kwargs): + self.assignments = None + + assert num_ps + self.num_ps = num_ps + self.num_workers = num_workers + self.num_cpus_per_server = num_cpus_per_server + self.num_gpus_per_server = num_gpus_per_server + + super(ParameterServerStrategy, self).\ + __init__(training_operator_cls=training_operator_cls, + operator_config=operator_config, + initialization_hook=initialization_hook, + num_cpus_per_worker=num_cpus_per_worker, + num_gpus_per_worker=num_gpus_per_worker, + **kwargs) + + # PS strategy needs some other prep-up. + self._init_strategy() + + if operator_config and operator_config.get("batch_size"): + self._global_batch_size = operator_config.get("batch_size") + if self._global_batch_size: + self._collector = ThroughputCollection( + batch_size=self._global_batch_size) + else: + self._collector = ThroughputCollection() + + def _init_strategy(self): + """Do initialization for the distributed strategy.""" + # All sync with worker 0 + init_weights_id = self.worker_group.get_named_parameters(cpu=True) + + self._round_robin_sharding() + + # set assignments to every worker + self.worker_group.set_assignments(self.assignments) + + # all workers get synced + for i, worker in enumerate(self.worker_group.actors): + if i != 0: + ray.get([worker.set_parameters.remote(init_weights_id)]) + + # now spawn parameter server actors + shard_ids = self.worker_group.split_parameters(self.assignments) + + # TODO(HUI): use scatter to send parameters + for server_idx, server in enumerate(self.server_group.actors): + this_shard_ref = self.worker_group.actors[0].index_shard.remote( + shard_ids, server_idx) + ray.get([server.set_params.remote(this_shard_ref)]) + + def _start_workers(self): + """Create worker(actor), maybe need worker group to manager these workers. + Or, send these workers to strategy to manager? + + set workers or worker group + set worker info, record rank, backend, use_num_gpus? + """ + # TODO (Hao): infer the per-replica batch size here... + + # so here we get two set of params that will be passed around: + # (1) Those for setting up training logic in training_operator, + # including: batchsize, use_tqdm, user defined operator_config. + operator_config = self._operator_config.copy() + params = dict( + training_operator_cls=self.training_operator_cls, + operator_config=operator_config) + # (2) params for setting up collective group + # and the strategy-related things; + + # For now, we do not have many of them though. + dist_params_worker = dict( + strategy="ps", + is_server=False, + group_name="default", + num_ps=self.num_ps, + num_workers=self.num_workers, + ) + + dist_params_server = dict( + strategy="ps", + is_server=True, + group_name="default", + num_ps=self.num_ps, + num_workers=self.num_workers, + ) + + # (3) other arguments that used to init the DataParallelGrup + workergroup_init_args = { + "params": params, + "dist_params": dist_params_worker, + "num_cpus_per_actor": self.num_cpus_per_worker, + "num_gpus_per_actor": self.num_gpus_per_worker, + } + + servergroup_init_args = { + "params": params, + "dist_params": dist_params_server, + "num_cpus_per_actor": self.num_cpus_per_server, + "num_gpus_per_actor": self.num_gpus_per_server, + } + + # Should we make two groups for worker and server? + self.worker_group = DataParallelGroup(**workergroup_init_args) + self.server_group = DataParallelGroup(**servergroup_init_args) + + # Once the group is created, we start it. + self.worker_group.start_actors(self.num_workers) + self.server_group.start_actors( + self.num_ps) # server at the last num_ps processes. + + worker_rets = self.worker_group.test_connection() + server_rets = self.server_group.test_connection() + ray.get(worker_rets + server_rets) + ray.get(self.worker_group.setup_operator()) + ray.get(self.server_group.setup_operator()) + + self.server_group.clean_redundancy() + + def shutdown(self, force=False): + self.worker_group.shutdown(force=force) + self.server_group.shutdown(force=force) + + def save_parameters(self, checkpoint): + # TODO(HUI): ps save parameters. + # First, worker rank 0 should pull the latest parameter from servers + # Then, worker rank 0 save parameters + self.worker_group.save_parameters(checkpoint) + + def load_parameters(self, checkpoint): + # TODO(HUI): ps load parameters. + # shard parameters and send to all servers. + self.server_group.load_parameters(checkpoint) + + def _round_robin_sharding(self): + """Generate the assignment of variable to servers.""" + parameter_distribution = ray.get( + self.worker_group.actors[0].params_distribution.remote()) + assignments = [0 for _ in parameter_distribution] + loads = [0 for _ in range(self.num_ps)] + for i, var_size in enumerate(parameter_distribution): + min_ps_index = loads.index(min(loads)) + loads[min_ps_index] += var_size + assignments[i] = min_ps_index + print("Load of each ps {}".format(loads)) + self.assignments = assignments + + def train(self, num_steps=None): + # TODO (Hao): add fault tolerance using `max_retries`. + steps = num_steps if num_steps \ + else self.worker_group.get_data_loader_len() + + # TODO(HUI): Record server rank instead of using num_ps. + # TODO(Hao): this call should be hidden inside Replica. + # train one epoch + self.worker_group.make_iterator() + for idx in range(steps): + with self._collector.record("train"): + metrics = self.train_batch() + logger.info("Step: {}/{}".format(idx, steps)) + return metrics + + def validate(self, num_steps=None): + steps = num_steps if num_steps \ + else self.worker_group.get_data_loader_len(training=False) + self.worker_group.make_iterator(training=False) + + # TODO(HUI): Construct a better tool to save validate results. + for idx in range(steps): + batch_metrics = self.worker_group.validate_batch() + # Validate results should be the same in all workers + return batch_metrics + + def train_batch(self): + loss_vals = [] + rets = [] + metrics = {} + + for worker_idx, worker in enumerate(self.worker_group.actors): + for server_idx, server in enumerate(self.server_group.actors): + # every server sends its shard to the worker + server.send_params.remote(worker_idx) + # the worker receives shards from ps, compute loss, gradients + # and sends these gradients to every server + loss_val = worker.compute.remote() + loss_vals.append(loss_val) + + for worker_idx, worker in enumerate(self.worker_group.actors): + for server in self.server_group.actors: + rets.append(server.update.remote(worker_idx)) + + loss_vals = ray.get(loss_vals) + ray.get(rets) + train_loss_list = [d["train_loss"] for d in loss_vals] + metrics["train_loss"] = np.mean(train_loss_list) + return metrics + + +class PS(object): + def __init__(self, training_operator_cls, operator_config): + self.training_operator_cls = training_operator_cls + self.operator_config = operator_config + + self.grad_counts = None + self.params = dict() + + def setup_operator(self): + # figure out the signature of training_operator_cls later. + self.training_operator = self.training_operator_cls( + self.operator_config) + + def setup_collective_group(self, + rank, + num_ps, + num_workers, + backend="nccl", + group_name="default"): + # rank should be true rank. means, rank has already plus num_worker. + self.rank = rank + self.num_ps = num_ps + self.num_workers = num_workers + self.group_name = group_name + self.group_size = num_ps + num_workers + self._init_grad_counts() + # the last num_ps processes are servers. + col.init_collective_group( + num_ps + num_workers, rank, backend=backend, group_name=group_name) + + def test_connection(self): + for i in range(self.num_workers): + recv = util.zeros((1, ), cpu=False) + col.recv(recv, i, self.group_name) + assert recv == 1 + for i in range(self.num_workers): + send = util.ones((1, ), cpu=False) + col.send(send, i, self.group_name) + return + + def _init_grad_counts(self): + self.grad_counts = [0] * self.num_workers + + def _init_grad_buffer(self): + self.grad_buffer = { + k: self.training_operator.zeros_like(v, cpu=False) + for k, v in self.params.items() + } + + def get_params(self): + return self.params + + def set_params(self, params): + # params should in GPU when calling this function. + for k, v in params.items(): + self.params[k] = self.training_operator.asarray(v) + + # param is a dict, if needed list, should convert in operator. + self.training_operator.reset_optimizer_for_params(self.params) + self._init_grad_buffer() + + def apply_updates(self, grad_buffer): + # TODO(HUI): gradient divide by num_workers + self.training_operator.apply_updates(grad_buffer) + self.params = self.training_operator.get_named_parameters(cpu=False) + + def _inc_gradients(self, gradients): + for name, p in self.get_params().items(): + if gradients[name] is not None: + self.grad_buffer[name] += gradients[name] + + def send_params(self, dst_rank): + """ Send this param shard to the destination worker """ + for name, v in self.params.items(): + cv = self.training_operator.to_cupy(v) + col.send(cv, dst_rank, self.group_name) + + def update(self, src_rank): + """Receive gradients and update""" + keys = list(self.params.keys()) + grads = dict() + recv_list = [] + + for key in keys: + to_recv = self.params[key] + recv_list.append( + self.training_operator.zeros(to_recv.shape, cpu=False)) + + for i in range(len(keys)): + v = self.training_operator.to_cupy(recv_list[i]) + col.recv(v, src_rank, self.group_name) + + for i in range(len(keys)): + grads[keys[i]] = recv_list[i] + + self._inc_gradients(grads) + if not self.grad_counts[src_rank]: + self.grad_counts[src_rank] = 1 + else: + raise RuntimeError(f"This worker {src_rank} send gradients again.") + if sum(self.grad_counts) == self.num_workers: + self.apply_updates(self.grad_buffer) + + self._init_grad_buffer() + self._init_grad_counts() + return True + + def clean_redundancy(self): + self.training_operator.clean_redundancy() + + def shutdown(self): + # destroy the collective group resources on this process + col.destroy_collective_group(self.group_name) + if self.training_operator: + del self.training_operator + return 1 + + +class Worker(object): + def __init__(self, training_operator_cls, operator_config): + self.training_operator_cls = training_operator_cls + self.operator_config = operator_config + + # collective-related information + self.group_size = None + self.rank = None + self.group_name = None + self.assignments = None + + def setup_operator(self): + # figure out the signature of training_operator_cls later. + self.training_operator = self.training_operator_cls( + self.operator_config) + + def setup_collective_group(self, + rank, + num_ps, + num_workers, + backend="nccl", + group_name="default"): + self.rank = rank + self.num_ps = num_ps + self.num_workers = num_workers + self.group_name = group_name + self.group_size = num_ps + num_workers + self.name_list = [[] for i in range(num_ps)] + + # the last num_ps processes are servers. + col.init_collective_group( + num_ps + num_workers, rank, backend=backend, group_name=group_name) + + def test_connection(self): + for i in range(self.num_ps): + send = util.ones((1, ), cpu=False) + col.send(send, self.num_workers + i, self.group_name) + for i in range(self.num_ps): + recv = util.zeros((1, ), cpu=False) + col.recv(recv, self.num_workers + i, self.group_name) + assert recv == 1 + return + + def params_distribution(self): + distribution = [] + weights = self.get_named_parameters(cpu=True) + for k, v in weights.items(): + distribution.append(self.training_operator.numel(v)) + return distribution + + def make_iterator(self, training=True): + """Convert loader to be an iterator at the start of an epoch.""" + # TODO(Hao): need to check whether reaching the boundary of iterator + # instead of making a new one every time. + if training: + self.training_iterator = iter( + self.training_operator._get_train_loader()) + else: + self.validation_iterator = iter( + self.training_operator._get_validation_loader()) + + def get_data_loader_len(self, training=True): + """Return the number of batches in the data loader.""" + loader = self.training_operator._get_train_loader() if training \ + else self.training_operator._get_validation_loader() + if hasattr(loader, "__len__"): + return len(loader) + else: + raise RuntimeError( + "Data loader has no attribute `__len__`. " + "Please set `num_steps` in `train()` or `validate()`.") + + def derive_updates(self, batch): + # TODO (Hao): handling data loader next. + # TODO (Hao): change it to derive_update and apply_update. + return self.training_operator.derive_updates(batch) + + def compute_gradients(self, params): + """ + Update worker parameters that received from server. + Compute gradients and return named gradients. + """ + self.set_parameters(params) + + try: + batch = next(self.training_iterator) + except StopIteration and NameError: + self.make_iterator() + batch = next(self.training_iterator) + + # different from original core ps. + # Here derive_updates return loss_val and graident in order. + loss_val, grads = self.training_operator.derive_updates(batch) + assert isinstance(grads, dict) + + return loss_val, grads + + def split_gradients(self, grad, assignments): + # assuming messages are gradients or parameters + # this grad is ready to be called by apply_gradients in ParameterServer + num_shards = np.unique(np.array(assignments)).size + shards = [dict() for i in range(num_shards)] + for i, (k, v) in enumerate(grad.items()): + shards[assignments[i]][k] = v + return shards + + def split_parameters(self, assignments): + params = self.get_named_parameters(cpu=False) + num_shards = np.unique(np.array(assignments)).size + shards = [dict() for i in range(num_shards)] + for i, (k, v) in enumerate(params.items()): + shards[assignments[i]][k] = v + return shards + + def index_shard(self, shards, index): + return shards[index] + + def set_parameters(self, params): + return self.training_operator.set_parameters(params) + + def get_parameters(self, cpu): + return self.training_operator.get_parameters(cpu) + + def get_named_parameters(self, cpu): + return self.training_operator.get_named_parameters(cpu) + + def get_gradients(self): + # training_operator call gradients or we save gradient in replica + # when derive_updates. + return self.training_operator.get_gradients() + + def set_assignments(self, assignments): + self.assignments = assignments + keys = list(self.get_named_parameters(cpu=False).keys()) + for i, a in enumerate(self.assignments): + self.name_list[a].append(keys[i]) + + def compute(self): + """Returns the loss, and send gradients to servers""" + metrics = {} + + weights = self.get_named_parameters(cpu=False) + params = dict() + + # 1. Create the receive lists to group collective calls + recv_list = [] + for i in range(self.num_ps): + recv_list.append([]) + param_shard_keys = self.name_list[i] + for key in param_shard_keys: + to_recv = weights[key] + recv_list[-1].append( + self.training_operator.ones(to_recv.shape, cpu=False)) + + # 2. Receive params from servers + for i in range(self.num_ps): + for j in range(len(self.name_list[i])): + v = self.training_operator.to_cupy(recv_list[i][j]) + col.recv(v, self.num_workers + i, self.group_name) + + # 3. Set params in workers and compute gradients. + for i in range(self.num_ps): + param_shard_keys = self.name_list[i] + for j in range(len(param_shard_keys)): + params[param_shard_keys[j]] = recv_list[i][j] + + loss_val, grad = self.compute_gradients(params) + metrics["train_loss"] = loss_val + + # 4. Shard gradients and send to servers. + split_grad = self.split_gradients(grad, self.assignments) + for i in range(self.num_ps): + this_shard = self.index_shard(split_grad, i) + for _, v in this_shard.items(): + cv = self.training_operator.to_cupy(v) + col.send(cv, self.num_workers + i, self.group_name) + return metrics + + def validate_batch(self): + try: + batch = next(self.validation_iterator) + except StopIteration and TypeError: + self.make_iterator(training=False) + batch = next(self.validation_iterator) + batch_metric = self.training_operator.validate_batch(batch) + return batch_metric + + def shutdown(self): + # destroy the collective group resources on this process + col.destroy_collective_group(self.group_name) + if self.training_operator: + del self.training_operator + return 1 + + +class DataParallelGroup: + """Spawn a group a replicas for data-parallel training.""" + + def __init__(self, params, dist_params, num_cpus_per_actor, + num_gpus_per_actor): + self._params = params + self._dist_params = dist_params + self._num_cpus_per_actor = num_cpus_per_actor + self._num_gpus_per_actor = num_gpus_per_actor + + self.is_server = self._dist_params["is_server"] + self.num_ps = self._dist_params["num_ps"] + self.num_workers = self._dist_params["num_workers"] + + self._distributed_actors = None + + def _setup_collective_group(self, num_replicas): + if self._dist_params["strategy"] == "ps": + num_ps = self._dist_params["num_ps"] + num_workers = self._dist_params["num_workers"] + is_server = self.is_server + rets = [ + actor.setup_collective_group.remote( + rank=i + is_server * num_workers, + num_workers=num_workers, + num_ps=num_ps, + backend="nccl") + for i, actor in enumerate(self._distributed_actors) + ] + else: # this can be extend for allreduce. + raise RuntimeError("Unrecognized strategy.") + return rets + + def setup_operator(self): + setups = [ + actor.setup_operator.remote() + for i, actor in enumerate(self._distributed_actors) + ] + return setups + + def start_actors(self, num_actors): + if self.is_server: + RemoteActor = ray.remote( + num_cpus=self._num_cpus_per_actor, + num_gpus=self._num_gpus_per_actor)(PS) + else: + RemoteActor = ray.remote( + num_cpus=self._num_cpus_per_actor, + num_gpus=self._num_gpus_per_actor)(Worker) + + self._distributed_actors = [ + RemoteActor.remote(**self._params) for _ in range(num_actors) + ] + + # setup the rank and group in each replica + ray.get(self._setup_collective_group(len(self._distributed_actors))) + + def test_connection(self): + rets = [ + actor.test_connection.remote() + for _, actor in enumerate(self.actors) + ] + return rets + + def set_assignments(self, assignments): + rets = [ + actor.set_assignments.remote(assignments) + for _, actor in enumerate(self.actors) + ] + return rets + + def _make_iterator(self, training): + return [actor.make_iterator.remote(training) for actor in self.actors] + + def make_iterator(self, training=True): + ray.get(self._make_iterator(training)) + + def get_data_loader_len(self, training=True): + """Return the number of batches in the data loader.""" + lens = ray.get([ + actor.get_data_loader_len.remote(training=training) + for actor in self.actors + ]) + + if len(set(lens)) != 1: + # TODO(Hao): is this correct after we add distributed data loader? + raise RuntimeError( + "All actors should have the same dataloader len.") + return lens[0] + + def validate_batch(self): + rets = [ + actor.validate_batch.remote() + for _, actor in enumerate(self.actors) + ] + stats = ray.get(rets) + return stats + + def shutdown(self, force=False): + rets = [actor.shutdown.remote() for _, actor in enumerate(self.actors)] + stats = ray.get(rets) + return stats + + def reset(self): + pass + + @property + def actors(self): + return self._distributed_actors + + def save_parameters(self, checkpoint): + rets = [self.actors[0].save_parameters.remote(checkpoint)] + ray.get(rets) + + def load_parameters(self, checkpoint): + rets = [ + actor.load_parameters.remote(checkpoint) + for _, actor in enumerate(self.actors) + ] + ray.get(rets) + + def set_parameters(self, params): + rets = [ + actor.set_parameters.remote(params) + for _, actor in enumerate(self.actors) + ] + ray.get(rets) + + def get_parameters(self, cpu=False): + ret = self.actors[0].get_parameters.remote(cpu) + return ray.get([ret])[0] + + def get_named_parameters(self, cpu=False): + ret = self.actors[0].get_named_parameters.remote(cpu) + return ray.get([ret])[0] + + def split_parameters(self, assignments): + ret = self.actors[0].split_parameters.remote(assignments) + return ray.get([ret])[0] + + def clean_redundancy(self): + """Clean dataloader. Only for servers""" + rets = [ + actor.clean_redundancy.remote() + for _, actor in enumerate(self.actors) + ] + ray.get(rets) diff --git a/examples/jax/default_train.csv b/examples/jax/default_train.csv deleted file mode 100644 index 0d23b47..0000000 --- a/examples/jax/default_train.csv +++ /dev/null @@ -1,2 +0,0 @@ -count_train,mean_train_s,last_train_s,total_train_s,pass_data_train,throughout_train_d -50,2.456741285324097,2.3998360633850098,164.64879870414734,6400,38.87061460739823 From a68dbf3ffcadcee6e21ac002de83b4e6dd5f57fe Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Fri, 30 Apr 2021 02:03:15 +0800 Subject: [PATCH 07/13] delete some trash files --- examples/jax/.ray.lock | 0 examples/jax/default_train.csv | 2 -- 2 files changed, 2 deletions(-) delete mode 100755 examples/jax/.ray.lock delete mode 100644 examples/jax/default_train.csv diff --git a/examples/jax/.ray.lock b/examples/jax/.ray.lock deleted file mode 100755 index e69de29..0000000 diff --git a/examples/jax/default_train.csv b/examples/jax/default_train.csv deleted file mode 100644 index 0d23b47..0000000 --- a/examples/jax/default_train.csv +++ /dev/null @@ -1,2 +0,0 @@ -count_train,mean_train_s,last_train_s,total_train_s,pass_data_train,throughout_train_d -50,2.456741285324097,2.3998360633850098,164.64879870414734,6400,38.87061460739823 From e3792ab33b03a5b3a62cf1322db89c09df4cf075 Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Fri, 30 Apr 2021 02:27:12 +0800 Subject: [PATCH 08/13] jax ps example --- distml/strategy/ps_strategy.py | 10 +++--- examples/jax/mnist_jax_example.py | 54 +++++++++++++++++++++++++------ 2 files changed, 49 insertions(+), 15 deletions(-) diff --git a/distml/strategy/ps_strategy.py b/distml/strategy/ps_strategy.py index 382daeb..99fef65 100644 --- a/distml/strategy/ps_strategy.py +++ b/distml/strategy/ps_strategy.py @@ -3,16 +3,16 @@ import numpy as np -import distml.strategy.util as util -from distml.strategy.base_trainer import BaseTrainer -from .util import ThroughputCollection +import distml.util as util +from distml.strategy.base_strategy import BaseStrategy +from distml.util import ThroughputCollection import logging logger = logging.getLogger(__name__) -class ParameterServerStrategy(BaseTrainer): +class ParameterServerStrategy(BaseStrategy): """Strategy that trains a model via collective AllReduce. Args: @@ -240,7 +240,7 @@ def train_batch(self): return metrics -class PS(object): +class PS(object): def __init__(self, training_operator_cls, operator_config): self.training_operator_cls = training_operator_cls self.operator_config = operator_config diff --git a/examples/jax/mnist_jax_example.py b/examples/jax/mnist_jax_example.py index 2335b5d..cc265ae 100644 --- a/examples/jax/mnist_jax_example.py +++ b/examples/jax/mnist_jax_example.py @@ -6,6 +6,7 @@ import ray from distml.operator.jax_operator import JAXTrainingOperator from distml.strategy.allreduce_strategy import AllReduceStrategy +from distml.strategy.ps_strategy import ParameterServerStrategy from ray.util.sgd.utils import override @@ -72,6 +73,35 @@ def setup(self, config): train_loader=train_loader, validation_loader=test_loader) +def make_ar_strategy(args): + strategy = AllReduceStrategy( + training_operator_cls=MnistTrainingOperator, + world_size=args.num_workers, + operator_config={ + "lr": 0.01, + "batch_size": 128, + "num_workers": args.num_workers, + "num_classes": 10, + "model_name": args.model_name + }) + return strategy + + +def make_ps_strategy(args): + strategy = ParameterServerStrategy( + training_operator_cls=MnistTrainingOperator, + world_size=args.num_workers, + num_workers=args.num_workers - args.num_ps, + num_ps=args.num_ps, + operator_config={ + "lr": 0.01, + "batch_size": 128, + "num_classes": 10, + "model_name": args.model_name + }) + return strategy + + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( @@ -85,6 +115,11 @@ def setup(self, config): type=int, default=2, help="Sets number of workers for training.") + parser.add_argument( + "--num-ps", + type=int, + default=1, + help="Sets number of servers for training. Only for ps_strategy.") parser.add_argument( "--num-epochs", type=int, @@ -100,6 +135,8 @@ def setup(self, config): type=str, default="resnet18", help="model, Optional: resnet18, resnet50, resnet101.") + parser.add_argument( + "--strategy", type=str, default="ar", help="model, Optional: ar, ps.") args, _ = parser.parse_known_args() @@ -111,16 +148,13 @@ def setup(self, config): num_cpus=args.num_workers * 2, log_to_driver=True) - strategy = AllReduceStrategy( - training_operator_cls=MnistTrainingOperator, - world_size=args.num_workers, - operator_config={ - "lr": 0.01, - "batch_size": 128, - "num_workers": args.num_workers, - "num_classes": 10, - "model_name": args.model_name - }) + if args.strategy == "ar": + strategy = make_ar_strategy(args) + elif args.strategy == "ps": + strategy = make_ps_strategy(args) + else: + raise RuntimeError("Unrecognized trainer type. Except 'ar' or 'ps'" + "Got {}".format(args.strategy)) for i in range(args.num_epochs): strategy.train() From 21340f4310592c20eee0457330c68f6418d19afc Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Sat, 15 May 2021 01:50:55 +0800 Subject: [PATCH 09/13] base_data_parallel_group and add typing in function params --- distml/strategy/allreduce_strategy.py | 139 +++++++------- distml/strategy/base_strategy.py | 126 ++++++++++++- distml/strategy/ps_strategy.py | 258 ++++++++++++++------------ examples/jax/mnist_jax_example.py | 14 +- 4 files changed, 333 insertions(+), 204 deletions(-) diff --git a/distml/strategy/allreduce_strategy.py b/distml/strategy/allreduce_strategy.py index 5eb92b8..3016625 100644 --- a/distml/strategy/allreduce_strategy.py +++ b/distml/strategy/allreduce_strategy.py @@ -1,8 +1,9 @@ import logging +from typing import Callable, Mapping, Any, Optional import ray import ray.util.collective as col -from distml.strategy.base_strategy import BaseStrategy +from distml.strategy.base_strategy import BaseStrategy, BaseDataParallelGroup from distml.util import ThroughputCollection import numpy as np @@ -29,17 +30,21 @@ class AllReduceStrategy(BaseStrategy): def __init__(self, *, training_operator_cls, - operator_config=None, - initialization_hook=None, - world_size=2, - num_cpus_per_worker=1, - num_gpus_per_worker=1, + operator_config: Optional[Mapping[str, Any]] = None, + initialization_hook: Optional[Callable] = None, + world_size: int = 2, + backend: str = "nccl", + group_name: str = "default", + num_cpus_per_worker: int = 1, + num_gpus_per_worker: int = 1, **kwargs): super(AllReduceStrategy, self). \ __init__(training_operator_cls=training_operator_cls, operator_config=operator_config, initialization_hook=initialization_hook, world_size=world_size, + backend=backend, + group_name=group_name, num_cpus_per_worker=num_cpus_per_worker, num_gpus_per_worker=num_gpus_per_worker, **kwargs) @@ -52,7 +57,7 @@ def __init__(self, else: self._collector = ThroughputCollection() - def train(self, num_steps=None): + def train(self, num_steps: Optional[int] = None): """Run the training on parallel workers. Args: @@ -74,7 +79,7 @@ def train(self, num_steps=None): print("Step: {}/{}".format(idx, steps)) return metrics - def validate(self, num_steps=None): + def validate(self, num_steps: Optional[int] = None): """Evaluates the model on the validation data. Args: @@ -111,26 +116,26 @@ def _start_workers(self): # (2) params for setting up collective group and strategy prep-ups. dist_params = dict( strategy="allreduce", - backend="nccl", - group_name="default", + backend=self.backend, + group_name=self.group_name, ) group_init_args = dict( - replica_params=replica_params, + actor_params=replica_params, dist_params=dist_params, initialization_hook=self.initialization_hook, - num_cpus_per_worker=self.num_cpus_per_worker, - num_gpus_per_worker=self.num_gpus_per_worker) + num_cpus_per_actor=self.num_cpus_per_worker, + num_gpus_per_actor=self.num_gpus_per_worker) self.data_parallel_group = DataParallelGroup(**group_init_args) # Once the group is created, we start it. self.data_parallel_group.start_replicas(self.world_size) - def shutdown(self, force=False): + def shutdown(self, force: bool = False): self.data_parallel_group.shutdown(force=force) - def save_parameters(self, checkpoint): + def save_parameters(self, checkpoint: str): self.data_parallel_group.save_parameters(checkpoint) - def load_parameters(self, checkpoint): + def load_parameters(self, checkpoint: str): self.data_parallel_group.load_parameters(checkpoint) def _init_strategy(self): @@ -144,7 +149,7 @@ class Replica: and Ray collective group setup. """ - def __init__(self, training_operator_cls, operator_config): + def __init__(self, training_operator_cls, operator_config: Optional[Mapping[str, Any]]): self.training_operator_cls = training_operator_cls self.operator_config = operator_config # Training operator @@ -165,17 +170,17 @@ def setup_operator(self): operator_config=self.operator_config) def setup_collective_group(self, - rank, - world_size, - backend, - group_name="default"): + rank: int, + world_size: str, + backend: str, + group_name: str = "default"): self._rank = rank self._group_name = group_name self._world_size = world_size col.init_collective_group( world_size, rank, backend=backend, group_name=group_name) - def make_iterator(self, training=True): + def make_iterator(self, training: bool = True): """Convert loader to be an iterator at the start of an epoch.""" # TODO(Hao): need to check whether reaching the boundary of iterator # instead of making a new one every time. @@ -184,7 +189,7 @@ def make_iterator(self, training=True): else: self.validation_iterator = iter(self.validation_loader) - def get_data_loader_len(self, training=True): + def get_data_loader_len(self, training: bool = True): """Return the number of batches in the data loader.""" loader = self.train_loader if training \ else self.validation_loader @@ -195,7 +200,7 @@ def get_data_loader_len(self, training=True): "Data loader has no attribute `__len__`. " "Please set `num_steps` in `train()` or `validate()`.") - def train_batch(self): + def train_batch(self) -> dict: metrics = {} try: batch = next(self.train_iterator) @@ -209,16 +214,14 @@ def train_batch(self): for _, g in updates.items(): cg = self.training_operator.to_cupy(g) col.allreduce(cg) - # TODO(Hao): this is conflicting with Runhui's code though. cg = cg / float(self.world_size) self.apply_updates(updates) return metrics - def derive_updates(self, batch): + def derive_updates(self, batch) -> dict: return self.training_operator.derive_updates(batch) def apply_updates(self, updates): - # TODO(Hao): conflicting with Runhui's code on averaging grads self.training_operator.apply_updates(updates) def updates_transform(self, updates): @@ -240,13 +243,13 @@ def shutdown(self): del self.training_operator return 1 - def save_parameters(self, checkpoint): + def save_parameters(self, checkpoint: str): self.training_operator.save_parameters(checkpoint) - def load_parameters(self, checkpoint): + def load_parameters(self, checkpoint: str): self.training_operator.load_parameters(checkpoint) - def apply(self, fn): + def apply(self, fn: Callable): """Apply a function in the replica process.""" return fn() @@ -271,45 +274,43 @@ def group_name(self): return self._group_name -class DataParallelGroup: - """Spawn a group a replicas for data-parallel training.""" - - def __init__(self, replica_params, dist_params, initialization_hook, - num_cpus_per_worker, num_gpus_per_worker): - self._replica_params = replica_params - self._dist_params = dist_params - - # try to unroll the dist_params - self._backend = self._dist_params["backend"] - self._group_name = self._dist_params["group_name"] +class DataParallelGroup(BaseDataParallelGroup): + """Spawn a replica group for data-parallel training.""" - self._initialization_hook = initialization_hook - self._num_cpus_per_worker = num_cpus_per_worker - self._num_gpus_per_worker = num_gpus_per_worker + def __init__(self, + actor_params: Mapping[str, Any], + dist_params: Mapping[str, Any], + num_cpus_per_actor: int, + num_gpus_per_actor: int, + initialization_hook: Optional[Callable]): + super(DataParallelGroup, self).__init__(actor_params=actor_params, + dist_params=dist_params, + num_cpus_per_actor=num_cpus_per_actor, + num_gpus_per_actor=num_gpus_per_actor, + initialization_hook=initialization_hook) self._replicas = None @property - def replicas(self): - return self._replicas - - @property - def world_size(self): - return len(self._replicas) + def _replica_params(self): + return self._actor_params @property - def backend(self): - return self._backend + def replicas(self): + return self._actors @property def group_name(self): return self._group_name - def start_replicas(self, num_replicas): + def start_replicas(self, num_replicas: int): + self._start_actors(num_replicas) + + def _start_actors(self, num_replicas: int): assert num_replicas > 1 RemoteReplica = ray.remote( - num_cpus=self._num_cpus_per_worker, - num_gpus=self._num_gpus_per_worker)(Replica) - self._replicas = [ + num_cpus=self._num_cpus_per_actor, + num_gpus=self._num_gpus_per_actor)(Replica) + self._actors = [ RemoteReplica.remote(**self._replica_params) for _ in range(num_replicas) ] @@ -327,16 +328,16 @@ def start_replicas(self, num_replicas): operator_setups = self._setup_operator() ray.get(operator_setups) - def _make_iterator(self, training): + def _make_iterator(self, training: bool): return [ replica.make_iterator.remote(training=training) for replica in self.replicas ] - def make_iterator(self, training=True): + def make_iterator(self, training: bool = True): ray.get(self._make_iterator(training=training)) - def get_data_loader_len(self, training=True): + def get_data_loader_len(self, training: bool = True): """Return the number of batches in the data loader.""" lens = ray.get([ replica.get_data_loader_len.remote(training=training) @@ -361,7 +362,7 @@ def validate_batch(self): stats = ray.get(rets) return stats - def shutdown(self, force=False): + def shutdown(self, force: bool = False): rets = [replica.shutdown.remote() for replica in self.replicas] stats = ray.get(rets) return stats @@ -369,11 +370,11 @@ def shutdown(self, force=False): def reset(self): pass - def save_parameters(self, checkpoint): + def save_parameters(self, checkpoint: str): rets = [self.replicas[0].save_parameters.remote(checkpoint)] ray.get(rets) - def load_parameters(self, checkpoint): + def load_parameters(self, checkpoint: str): rets = [ replica.load_parameters.remote(checkpoint) for _, replica in enumerate(self.replicas) @@ -387,15 +388,15 @@ def set_parameters(self, params): ] ray.get(rets) - def get_parameters(self, cpu=False): + def get_parameters(self, cpu: bool = False): ret = self.replicas[0].get_parameters.remote(cpu) return ray.get(ret)[0] - def get_named_parameters(self, cpu=False): + def get_named_parameters(self, cpu: bool = False): ret = self.replicas[0].get_named_parameters.remote(cpu) return ray.get([ret])[0] - def apply_all_replicas(self, fn): + def apply_all_replicas(self, fn: Callable): """Apply fn in all replica processes and wait until completion.""" return ray.get(self._apply_all_replicas(fn)) @@ -404,13 +405,13 @@ def _apply_all_replicas(self, fn): return [replica.apply.remote(fn) for replica in self.replicas] def _setup_collective_group(self, - world_size, - backend, - group_name="default"): + group_size: int, + backend: int, + group_name: str = "default"): refs = [ replica.setup_collective_group.remote( rank=i, - world_size=world_size, + world_size=group_size, backend=backend, group_name=group_name) for i, replica in enumerate(self.replicas) diff --git a/distml/strategy/base_strategy.py b/distml/strategy/base_strategy.py index 69e3b0a..27a92d0 100644 --- a/distml/strategy/base_strategy.py +++ b/distml/strategy/base_strategy.py @@ -1,6 +1,7 @@ from abc import ABCMeta from abc import abstractmethod import logging +from typing import AbstractSet, Callable, Any, Mapping, Optional import ray @@ -11,11 +12,13 @@ class BaseStrategy(metaclass=ABCMeta): def __init__(self, *, training_operator_cls, - operator_config=None, - initialization_hook=None, - world_size=2, - num_cpus_per_worker=1, - num_gpus_per_worker=1, + operator_config: Optional[Mapping[str, Any]] = None, + initialization_hook: Optional[Callable] = None, + world_size: int = 2, + backend: str = "nccl", + group_name: str = "default", + num_cpus_per_worker: int = 1, + num_gpus_per_worker: int = 1, **kwargs): self.training_operator_cls = training_operator_cls self.initialization_hook = initialization_hook @@ -24,6 +27,8 @@ def __init__(self, "ray.util.distml does not support single-process training " "at this moment.") self.world_size = world_size + self.backend = backend + self.group_name = group_name self.num_cpus_per_worker = num_cpus_per_worker self.num_gpus_per_worker = num_gpus_per_worker self._operator_config = {} if not operator_config \ @@ -47,7 +52,7 @@ def validate(self): raise NotImplementedError() @abstractmethod - def save_parameters(self, checkpoint): + def save_parameters(self, checkpoint: str): """Saves the Trainer state to the provided checkpoint path. Args: @@ -56,7 +61,12 @@ def save_parameters(self, checkpoint): raise NotImplementedError() @abstractmethod - def load_parameters(self, checkpoint): + def load_parameters(self, checkpoint: str): + """Loads the Trainer state to the provided checkpoint path. + + Args: + checkpoint (str): Path to target checkpoint file. + """ raise NotImplementedError() @abstractmethod @@ -70,6 +80,106 @@ def _init_strategy(self): raise NotImplementedError() @abstractmethod - def shutdown(self, force=False): + def shutdown(self, force: bool = False): """Kill all workers.""" raise NotImplementedError() + + +class BaseDataParallelGroup: + """Spawn a actor group for data-parallel training.""" + + def __init__(self, + actor_params: Mapping[str, Any], + dist_params: Mapping[str, Any], + num_cpus_per_actor: int, + num_gpus_per_actor: int, + initialization_hook: Optional[Callable], + **kwargs): + self._actor_params = actor_params + self._dist_params = dist_params + self._backend = self._dist_params["backend"] + self._group_name = self._dist_params["group_name"] + self._num_cpus_per_actor = num_cpus_per_actor + self._num_gpus_per_actor = num_gpus_per_actor + self._initialization_hook = initialization_hook + + # try to unroll the dist_params + self._backend = self._dist_params["backend"] + self._group_name = self._dist_params["group_name"] + + @property + def world_size(self): + return len(self._actors) + + @property + def backend(self): + return self._backend + + @property + def group_name(self): + return self._group_name + + @abstractmethod + def _setup_collective_group(self, *args, **kwargs): + """All actors setup operators.""" + raise NotImplementedError() + + @abstractmethod + def setup_operator(self): + """All actors setup operators.""" + raise NotImplementedError() + + @abstractmethod + def _start_actors(self, num_actors): + """Start all actors.""" + raise NotImplementedError() + + @abstractmethod + def make_iterator(self, training: bool = True): + """Make iterator.""" + raise NotImplementedError() + + @abstractmethod + def get_data_loader_len(self, training: bool = True): + """Return the number of batches in the data loader.""" + raise NotImplementedError() + + @abstractmethod + def validate_batch(self): + """Validate one batch and return batch metrics.""" + raise NotImplementedError() + + @abstractmethod + def shutdown(self, force: bool = False): + """Shutdown all actors.""" + raise NotImplementedError() + + @abstractmethod + def reset(self): + """Reset group.""" + raise NotImplementedError() + + @abstractmethod + def save_parameters(self, checkpoint: str): + """Let the first actor save parameters.""" + raise NotImplementedError() + + @abstractmethod + def load_parameters(self, checkpoint: str): + """All actor load parameters from checkpoint.""" + raise NotImplementedError() + + @abstractmethod + def set_parameters(self, params): + """Input params and replace the model parameters.""" + raise NotImplementedError() + + @abstractmethod + def get_parameters(self, cpu: bool = False): + """Return parameters from the first actor.""" + raise NotImplementedError() + + @abstractmethod + def get_named_parameters(self, cpu: bool = False): + """Return named parameters from the first actor.""" + raise NotImplementedError() diff --git a/distml/strategy/ps_strategy.py b/distml/strategy/ps_strategy.py index 99fef65..0143365 100644 --- a/distml/strategy/ps_strategy.py +++ b/distml/strategy/ps_strategy.py @@ -1,10 +1,11 @@ +from typing import Tuple, List, Callable, Mapping, Union, Any, Optional, Sequence import ray import ray.util.collective as col import numpy as np import distml.util as util -from distml.strategy.base_strategy import BaseStrategy +from distml.strategy.base_strategy import BaseStrategy, BaseDataParallelGroup from distml.util import ThroughputCollection import logging @@ -13,7 +14,7 @@ class ParameterServerStrategy(BaseStrategy): - """Strategy that trains a model via collective AllReduce. + """Strategy that trains a model via parameter server. Args: training_operator_cls (TrainingOperator): @@ -22,7 +23,7 @@ class ParameterServerStrategy(BaseStrategy): initialization_hook (function): A function to call on all training workers when they are first initialized. This could be useful to set environment variables for all the worker processes. - num_workers (int): The number of workers. + num_worker (int): The number of workers. num_ps (int): The number of parameter servers. num_cpus_per_worker (int): number of CPUs allocated per worker. num_gpus_per_worker (int): number of GPUs allocated per worker. @@ -33,27 +34,35 @@ class ParameterServerStrategy(BaseStrategy): def __init__(self, *, training_operator_cls, - operator_config=None, - initialization_hook=None, - num_workers=1, - num_ps=1, - num_cpus_per_worker=1, - num_gpus_per_worker=1, - num_cpus_per_server=1, - num_gpus_per_server=1, + operator_config: Optional[Mapping[str, Any]] = None, + initialization_hook: Optional[Callable] = None, + world_size: int = 2, + num_worker: int = 1, + num_ps: int = 1, + backend="nccl", + group_name="default", + num_cpus_per_worker: int = 1, + num_gpus_per_worker: int = 1, + num_cpus_per_server: int = 1, + num_gpus_per_server: int = 1, **kwargs): - self.assignments = None - assert num_ps + assert world_size == num_ps + num_worker, \ + "'world_size' should be equal to 'num_ps' plus 'num_worker'" + + self.assignments = None self.num_ps = num_ps - self.num_workers = num_workers + self.num_worker = num_worker self.num_cpus_per_server = num_cpus_per_server self.num_gpus_per_server = num_gpus_per_server - super(ParameterServerStrategy, self).\ + super(ParameterServerStrategy, self). \ __init__(training_operator_cls=training_operator_cls, operator_config=operator_config, initialization_hook=initialization_hook, + world_size=world_size, + backend=backend, + group_name=group_name, num_cpus_per_worker=num_cpus_per_worker, num_gpus_per_worker=num_gpus_per_worker, **kwargs) @@ -94,17 +103,13 @@ def _init_strategy(self): ray.get([server.set_params.remote(this_shard_ref)]) def _start_workers(self): - """Create worker(actor), maybe need worker group to manager these workers. - Or, send these workers to strategy to manager? - - set workers or worker group - set worker info, record rank, backend, use_num_gpus? + """Start worker group and server group. """ # TODO (Hao): infer the per-replica batch size here... # so here we get two set of params that will be passed around: # (1) Those for setting up training logic in training_operator, - # including: batchsize, use_tqdm, user defined operator_config. + # including: batch size, user defined operator_config. operator_config = self._operator_config.copy() params = dict( training_operator_cls=self.training_operator_cls, @@ -116,62 +121,66 @@ def _start_workers(self): dist_params_worker = dict( strategy="ps", is_server=False, - group_name="default", + backend=self.backend, + group_name=self.group_name, num_ps=self.num_ps, - num_workers=self.num_workers, + num_worker=self.num_worker, ) dist_params_server = dict( strategy="ps", is_server=True, - group_name="default", + backend=self.backend, + group_name=self.group_name, num_ps=self.num_ps, - num_workers=self.num_workers, + num_worker=self.num_worker, ) # (3) other arguments that used to init the DataParallelGrup - workergroup_init_args = { - "params": params, - "dist_params": dist_params_worker, - "num_cpus_per_actor": self.num_cpus_per_worker, - "num_gpus_per_actor": self.num_gpus_per_worker, - } + worker_group_init_args = dict( + actor_params=params, + dist_params=dist_params_worker, + num_cpus_per_actor=self.num_cpus_per_worker, + num_gpus_per_actor=self.num_gpus_per_worker, + initialization_hook=self.initialization_hook, + ) - servergroup_init_args = { - "params": params, - "dist_params": dist_params_server, - "num_cpus_per_actor": self.num_cpus_per_server, - "num_gpus_per_actor": self.num_gpus_per_server, - } + server_group_init_args = dict( + actor_params=params, + dist_params=dist_params_server, + num_cpus_per_actor=self.num_cpus_per_server, + num_gpus_per_actor=self.num_gpus_per_server, + initialization_hook=self.initialization_hook, + ) # Should we make two groups for worker and server? - self.worker_group = DataParallelGroup(**workergroup_init_args) - self.server_group = DataParallelGroup(**servergroup_init_args) + self.worker_group = DataParallelGroup(**worker_group_init_args) + self.server_group = DataParallelGroup(**server_group_init_args) # Once the group is created, we start it. - self.worker_group.start_actors(self.num_workers) - self.server_group.start_actors( - self.num_ps) # server at the last num_ps processes. + self.worker_group._start_actors(self.num_worker) + # server at the last num_ps processes. + self.server_group._start_actors(self.num_ps) - worker_rets = self.worker_group.test_connection() - server_rets = self.server_group.test_connection() - ray.get(worker_rets + server_rets) + # worker_rets = self.worker_group.test_connection() + # server_rets = self.server_group.test_connection() + # ray.get(worker_rets + server_rets) ray.get(self.worker_group.setup_operator()) ray.get(self.server_group.setup_operator()) self.server_group.clean_redundancy() - def shutdown(self, force=False): + def shutdown(self, force: bool = False): self.worker_group.shutdown(force=force) self.server_group.shutdown(force=force) - def save_parameters(self, checkpoint): + def save_parameters(self, checkpoint: str): # TODO(HUI): ps save parameters. # First, worker rank 0 should pull the latest parameter from servers # Then, worker rank 0 save parameters self.worker_group.save_parameters(checkpoint) - def load_parameters(self, checkpoint): + def load_parameters(self, checkpoint: str): # TODO(HUI): ps load parameters. # shard parameters and send to all servers. self.server_group.load_parameters(checkpoint) @@ -189,7 +198,7 @@ def _round_robin_sharding(self): print("Load of each ps {}".format(loads)) self.assignments = assignments - def train(self, num_steps=None): + def train(self, num_steps: Optional[int] = None) -> dict: # TODO (Hao): add fault tolerance using `max_retries`. steps = num_steps if num_steps \ else self.worker_group.get_data_loader_len() @@ -201,10 +210,10 @@ def train(self, num_steps=None): for idx in range(steps): with self._collector.record("train"): metrics = self.train_batch() - logger.info("Step: {}/{}".format(idx, steps)) + print("Step: {}/{}".format(idx, steps)) return metrics - def validate(self, num_steps=None): + def validate(self, num_steps: Optional[int] = None): steps = num_steps if num_steps \ else self.worker_group.get_data_loader_len(training=False) self.worker_group.make_iterator(training=False) @@ -215,7 +224,7 @@ def validate(self, num_steps=None): # Validate results should be the same in all workers return batch_metrics - def train_batch(self): + def train_batch(self) -> dict: loss_vals = [] rets = [] metrics = {} @@ -241,7 +250,7 @@ def train_batch(self): class PS(object): - def __init__(self, training_operator_cls, operator_config): + def __init__(self, training_operator_cls, operator_config: Optional[Mapping[str, Any]]): self.training_operator_cls = training_operator_cls self.operator_config = operator_config @@ -254,34 +263,34 @@ def setup_operator(self): self.operator_config) def setup_collective_group(self, - rank, - num_ps, - num_workers, - backend="nccl", - group_name="default"): + rank: int, + num_ps: int, + num_worker: int, + backend: str = "nccl", + group_name: str = "default"): # rank should be true rank. means, rank has already plus num_worker. self.rank = rank self.num_ps = num_ps - self.num_workers = num_workers + self.num_worker = num_worker self.group_name = group_name - self.group_size = num_ps + num_workers + self.group_size = num_ps + num_worker self._init_grad_counts() # the last num_ps processes are servers. col.init_collective_group( - num_ps + num_workers, rank, backend=backend, group_name=group_name) + num_ps + num_worker, rank, backend=backend, group_name=group_name) def test_connection(self): - for i in range(self.num_workers): - recv = util.zeros((1, ), cpu=False) + for i in range(self.num_worker): + recv = util.zeros((1,), cpu=False) col.recv(recv, i, self.group_name) assert recv == 1 - for i in range(self.num_workers): - send = util.ones((1, ), cpu=False) + for i in range(self.num_worker): + send = util.ones((1,), cpu=False) col.send(send, i, self.group_name) return def _init_grad_counts(self): - self.grad_counts = [0] * self.num_workers + self.grad_counts = [0] * self.num_worker def _init_grad_buffer(self): self.grad_buffer = { @@ -289,7 +298,7 @@ def _init_grad_buffer(self): for k, v in self.params.items() } - def get_params(self): + def get_params(self) -> dict: return self.params def set_params(self, params): @@ -302,7 +311,7 @@ def set_params(self, params): self._init_grad_buffer() def apply_updates(self, grad_buffer): - # TODO(HUI): gradient divide by num_workers + # TODO(HUI): gradient divide by num_worker self.training_operator.apply_updates(grad_buffer) self.params = self.training_operator.get_named_parameters(cpu=False) @@ -311,13 +320,13 @@ def _inc_gradients(self, gradients): if gradients[name] is not None: self.grad_buffer[name] += gradients[name] - def send_params(self, dst_rank): + def send_params(self, dst_rank: int): """ Send this param shard to the destination worker """ for name, v in self.params.items(): cv = self.training_operator.to_cupy(v) col.send(cv, dst_rank, self.group_name) - def update(self, src_rank): + def update(self, src_rank: int): """Receive gradients and update""" keys = list(self.params.keys()) grads = dict() @@ -340,7 +349,7 @@ def update(self, src_rank): self.grad_counts[src_rank] = 1 else: raise RuntimeError(f"This worker {src_rank} send gradients again.") - if sum(self.grad_counts) == self.num_workers: + if sum(self.grad_counts) == self.num_worker: self.apply_updates(self.grad_buffer) self._init_grad_buffer() @@ -359,7 +368,7 @@ def shutdown(self): class Worker(object): - def __init__(self, training_operator_cls, operator_config): + def __init__(self, training_operator_cls, operator_config: Optional[Mapping[str, Any]]): self.training_operator_cls = training_operator_cls self.operator_config = operator_config @@ -375,29 +384,29 @@ def setup_operator(self): self.operator_config) def setup_collective_group(self, - rank, - num_ps, - num_workers, - backend="nccl", - group_name="default"): + rank: int, + num_ps: int, + num_worker: int, + backend: str = "nccl", + group_name: str = "default"): self.rank = rank self.num_ps = num_ps - self.num_workers = num_workers + self.num_worker = num_worker self.group_name = group_name - self.group_size = num_ps + num_workers + self.group_size = num_ps + num_worker self.name_list = [[] for i in range(num_ps)] # the last num_ps processes are servers. col.init_collective_group( - num_ps + num_workers, rank, backend=backend, group_name=group_name) + num_ps + num_worker, rank, backend=backend, group_name=group_name) def test_connection(self): for i in range(self.num_ps): - send = util.ones((1, ), cpu=False) - col.send(send, self.num_workers + i, self.group_name) + send = util.ones((1,), cpu=False) + col.send(send, self.num_worker + i, self.group_name) for i in range(self.num_ps): - recv = util.zeros((1, ), cpu=False) - col.recv(recv, self.num_workers + i, self.group_name) + recv = util.zeros((1,), cpu=False) + col.recv(recv, self.num_worker + i, self.group_name) assert recv == 1 return @@ -408,7 +417,7 @@ def params_distribution(self): distribution.append(self.training_operator.numel(v)) return distribution - def make_iterator(self, training=True): + def make_iterator(self, training: bool = True): """Convert loader to be an iterator at the start of an epoch.""" # TODO(Hao): need to check whether reaching the boundary of iterator # instead of making a new one every time. @@ -419,7 +428,7 @@ def make_iterator(self, training=True): self.validation_iterator = iter( self.training_operator._get_validation_loader()) - def get_data_loader_len(self, training=True): + def get_data_loader_len(self, training: bool = True): """Return the number of batches in the data loader.""" loader = self.training_operator._get_train_loader() if training \ else self.training_operator._get_validation_loader() @@ -430,9 +439,8 @@ def get_data_loader_len(self, training=True): "Data loader has no attribute `__len__`. " "Please set `num_steps` in `train()` or `validate()`.") - def derive_updates(self, batch): + def derive_updates(self, batch: Sequence[Any]): # TODO (Hao): handling data loader next. - # TODO (Hao): change it to derive_update and apply_update. return self.training_operator.derive_updates(batch) def compute_gradients(self, params): @@ -472,16 +480,16 @@ def split_parameters(self, assignments): shards[assignments[i]][k] = v return shards - def index_shard(self, shards, index): + def index_shard(self, shards, index: int): return shards[index] def set_parameters(self, params): return self.training_operator.set_parameters(params) - def get_parameters(self, cpu): + def get_parameters(self, cpu: bool): return self.training_operator.get_parameters(cpu) - def get_named_parameters(self, cpu): + def get_named_parameters(self, cpu: bool): return self.training_operator.get_named_parameters(cpu) def get_gradients(self): @@ -516,7 +524,7 @@ def compute(self): for i in range(self.num_ps): for j in range(len(self.name_list[i])): v = self.training_operator.to_cupy(recv_list[i][j]) - col.recv(v, self.num_workers + i, self.group_name) + col.recv(v, self.num_worker + i, self.group_name) # 3. Set params in workers and compute gradients. for i in range(self.num_ps): @@ -533,7 +541,7 @@ def compute(self): this_shard = self.index_shard(split_grad, i) for _, v in this_shard.items(): cv = self.training_operator.to_cupy(v) - col.send(cv, self.num_workers + i, self.group_name) + col.send(cv, self.num_worker + i, self.group_name) return metrics def validate_batch(self): @@ -553,33 +561,42 @@ def shutdown(self): return 1 -class DataParallelGroup: - """Spawn a group a replicas for data-parallel training.""" - - def __init__(self, params, dist_params, num_cpus_per_actor, - num_gpus_per_actor): - self._params = params - self._dist_params = dist_params - self._num_cpus_per_actor = num_cpus_per_actor - self._num_gpus_per_actor = num_gpus_per_actor +class DataParallelGroup(BaseDataParallelGroup): + """Spawn a actor group for data-parallel training.""" + def __init__(self, + actor_params: Mapping[str, Any], + dist_params: Mapping[str, Any], + num_cpus_per_actor: int, + num_gpus_per_actor: int, + initialization_hook: Optional[Callable]): + super(DataParallelGroup, self).__init__(actor_params=actor_params, + dist_params=dist_params, + num_cpus_per_actor=num_cpus_per_actor, + num_gpus_per_actor=num_gpus_per_actor, + initialization_hook=initialization_hook) self.is_server = self._dist_params["is_server"] self.num_ps = self._dist_params["num_ps"] - self.num_workers = self._dist_params["num_workers"] + self.num_worker = self._dist_params["num_worker"] self._distributed_actors = None - def _setup_collective_group(self, num_replicas): + def _setup_collective_group(self, + num_ps: int, + num_worker: int, + backend: int, + group_name: str = "default"): if self._dist_params["strategy"] == "ps": - num_ps = self._dist_params["num_ps"] - num_workers = self._dist_params["num_workers"] is_server = self.is_server + rets = [ actor.setup_collective_group.remote( - rank=i + is_server * num_workers, - num_workers=num_workers, + rank=i + is_server * num_worker, + num_worker=num_worker, num_ps=num_ps, - backend="nccl") + backend=backend, + group_name=group_name + ) for i, actor in enumerate(self._distributed_actors) ] else: # this can be extend for allreduce. @@ -593,7 +610,7 @@ def setup_operator(self): ] return setups - def start_actors(self, num_actors): + def _start_actors(self, num_actors: int): if self.is_server: RemoteActor = ray.remote( num_cpus=self._num_cpus_per_actor, @@ -604,11 +621,12 @@ def start_actors(self, num_actors): num_gpus=self._num_gpus_per_actor)(Worker) self._distributed_actors = [ - RemoteActor.remote(**self._params) for _ in range(num_actors) + RemoteActor.remote(**self._actor_params) for _ in range(num_actors) ] # setup the rank and group in each replica - ray.get(self._setup_collective_group(len(self._distributed_actors))) + ray.get(self._setup_collective_group( + self.num_ps, self.num_worker, self.backend, self.group_name)) def test_connection(self): rets = [ @@ -624,13 +642,13 @@ def set_assignments(self, assignments): ] return rets - def _make_iterator(self, training): + def _make_iterator(self, training: bool): return [actor.make_iterator.remote(training) for actor in self.actors] - def make_iterator(self, training=True): + def make_iterator(self, training: bool = True): ray.get(self._make_iterator(training)) - def get_data_loader_len(self, training=True): + def get_data_loader_len(self, training: bool = True): """Return the number of batches in the data loader.""" lens = ray.get([ actor.get_data_loader_len.remote(training=training) @@ -651,7 +669,7 @@ def validate_batch(self): stats = ray.get(rets) return stats - def shutdown(self, force=False): + def shutdown(self, force: bool = False): rets = [actor.shutdown.remote() for _, actor in enumerate(self.actors)] stats = ray.get(rets) return stats @@ -663,11 +681,11 @@ def reset(self): def actors(self): return self._distributed_actors - def save_parameters(self, checkpoint): + def save_parameters(self, checkpoint: str): rets = [self.actors[0].save_parameters.remote(checkpoint)] ray.get(rets) - def load_parameters(self, checkpoint): + def load_parameters(self, checkpoint: str): rets = [ actor.load_parameters.remote(checkpoint) for _, actor in enumerate(self.actors) @@ -681,11 +699,11 @@ def set_parameters(self, params): ] ray.get(rets) - def get_parameters(self, cpu=False): + def get_parameters(self, cpu: bool = False): ret = self.actors[0].get_parameters.remote(cpu) return ray.get([ret])[0] - def get_named_parameters(self, cpu=False): + def get_named_parameters(self, cpu: bool = False): ret = self.actors[0].get_named_parameters.remote(cpu) return ray.get([ret])[0] diff --git a/examples/jax/mnist_jax_example.py b/examples/jax/mnist_jax_example.py index 494c8fa..db7abe2 100644 --- a/examples/jax/mnist_jax_example.py +++ b/examples/jax/mnist_jax_example.py @@ -81,11 +81,11 @@ def criterion(logits, targets): def make_ar_strategy(args): strategy = AllReduceStrategy( training_operator_cls=MnistTrainingOperator, - world_size=args.num_workers, + world_size=args.num_worker, operator_config={ "lr": 0.01, "batch_size": 128, - "num_workers": args.num_workers, + "num_worker": args.num_worker, "num_classes": 10, "model_name": args.model_name }, @@ -96,8 +96,8 @@ def make_ar_strategy(args): def make_ps_strategy(args): strategy = ParameterServerStrategy( training_operator_cls=MnistTrainingOperator, - world_size=args.num_workers, - num_workers=args.num_workers - args.num_ps, + world_size=args.num_worker, + num_worker=args.num_worker - args.num_ps, num_ps=args.num_ps, operator_config={ "lr": 0.01, @@ -116,7 +116,7 @@ def make_ps_strategy(args): type=str, help="the address to use for connecting to the Ray cluster") parser.add_argument( - "--num-workers", + "--num-worker", "-n", type=int, default=2, @@ -150,8 +150,8 @@ def make_ps_strategy(args): ray.init(args.address) else: ray.init( - num_gpus=args.num_workers, - num_cpus=args.num_workers * 2, + num_gpus=args.num_worker, + num_cpus=args.num_worker * 2, log_to_driver=True) if args.strategy == "ar": From 21d9a3503ac9e263ba6e0a033403b683c8e42592 Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Tue, 18 May 2021 00:39:41 +0800 Subject: [PATCH 10/13] lint --- distml/operator/jax_operator.py | 28 +++++---- distml/strategy/allreduce_strategy.py | 32 +++++----- distml/strategy/base_strategy.py | 11 ++-- distml/strategy/ps_strategy.py | 88 ++++++++++++++++----------- examples/jax/mnist_jax_example.py | 1 - 5 files changed, 86 insertions(+), 74 deletions(-) diff --git a/distml/operator/jax_operator.py b/distml/operator/jax_operator.py index 640e6f0..2ae070b 100644 --- a/distml/operator/jax_operator.py +++ b/distml/operator/jax_operator.py @@ -1,3 +1,5 @@ +from typing import Any, Mapping, Optional + import numpy as np import cupy as cp @@ -14,7 +16,7 @@ class JAXTrainingOperator(TrainingOperator): - def __init__(self, operator_config): + def __init__(self, operator_config: Optional[Mapping[str, Any]]): super(JAXTrainingOperator, self).__init__(operator_config) # Should be set by users in the `register` function. # model methods @@ -64,7 +66,7 @@ def setup(self, *args, **kwargs): raise NotImplementedError("Please override this function to register " "your model, optimizer, and criterion.") - def register(self, *, model, optimizer, criterion, jit_mode=False): + def register(self, *, model, optimizer, criterion, jit_mode: bool = False): """Register a few critical information about the model to operator. Args: @@ -273,7 +275,7 @@ def validate_batch(self, batch): "samples_num": samples_num } - def get_parameters(self, cpu): + def get_parameters(self, cpu: bool): """get the flatten parameters.""" params = self.get_params(self.opt_state) flatten_params, tree = tree_flatten(params) @@ -284,7 +286,7 @@ def get_parameters(self, cpu): flatten_params = list(map(np.asarray, flatten_params)) return flatten_params - def get_named_parameters(self, cpu): + def get_named_parameters(self, cpu: bool): """Get the named parameters. In jax, we need to construct a dict to contain the parameters. @@ -335,7 +337,7 @@ def update(param, state): zip(subtrees, new_subtrees)): if new_subtree != subtree: msg = ( - "input structur did not match the save params struture. " + "input structure did not match the save params structure. " "input {} and output {}.") raise TypeError(msg.format(subtree, new_subtree)) @@ -350,25 +352,25 @@ def reset_optimizer_for_params(self, params): self.tree = tree_structure(params) self.opt_state = self.opt_init(params) - def ones(self, shape, cpu=True): + def ones(self, shape, cpu: bool = True): if cpu: return np.ones(shape) else: return jnp.ones(shape) - def zeros(self, shape, cpu=True): + def zeros(self, shape, cpu: bool = True): if cpu: return np.zeros(shape) else: return jnp.zeros(shape) - def ones_like(self, x, cpu=True): + def ones_like(self, x, cpu: bool = True): if cpu: return np.ones_like(x) else: return jnp.ones_like(x) - def zeros_like(self, x, cpu=True): + def zeros_like(self, x, cpu: bool = True): if cpu: return np.zeros_like(x) else: @@ -385,21 +387,21 @@ def clean_redundancy(self): del self._validation_loader # TODO(HUI): use pickle to serialize parameters or states and save it. - def save_parameters(self, checkpoint): + def save_parameters(self, checkpoint: str): raise NotImplementedError( "save_parameters is not support in jax operator.") - def load_parameters(self, checkpoint): + def load_parameters(self, checkpoint: str): raise NotImplementedError( "load_parameters is not support in jax operator.") - def save_states(self, checkpoint): + def save_states(self, checkpoint: str): raise NotImplementedError( "save_states is not support in jax operator.") def get_states(self): raise NotImplementedError("get_states is not support in jax operator.") - def load_states(self, checkpoint): + def load_states(self, checkpoint: str): raise NotImplementedError( "load_states is not support in jax operator.") diff --git a/distml/strategy/allreduce_strategy.py b/distml/strategy/allreduce_strategy.py index 3016625..dc557a8 100644 --- a/distml/strategy/allreduce_strategy.py +++ b/distml/strategy/allreduce_strategy.py @@ -1,5 +1,5 @@ import logging -from typing import Callable, Mapping, Any, Optional +from typing import Callable, Mapping, Any, Optional, Dict import ray import ray.util.collective as col @@ -57,7 +57,7 @@ def __init__(self, else: self._collector = ThroughputCollection() - def train(self, num_steps: Optional[int] = None): + def train(self, num_steps: Optional[int] = None) -> Dict: """Run the training on parallel workers. Args: @@ -79,7 +79,7 @@ def train(self, num_steps: Optional[int] = None): print("Step: {}/{}".format(idx, steps)) return metrics - def validate(self, num_steps: Optional[int] = None): + def validate(self, num_steps: Optional[int] = None) -> Dict: """Evaluates the model on the validation data. Args: @@ -149,7 +149,8 @@ class Replica: and Ray collective group setup. """ - def __init__(self, training_operator_cls, operator_config: Optional[Mapping[str, Any]]): + def __init__(self, training_operator_cls, + operator_config: Optional[Mapping[str, Any]]): self.training_operator_cls = training_operator_cls self.operator_config = operator_config # Training operator @@ -189,7 +190,7 @@ def make_iterator(self, training: bool = True): else: self.validation_iterator = iter(self.validation_loader) - def get_data_loader_len(self, training: bool = True): + def get_data_loader_len(self, training: bool = True) -> int: """Return the number of batches in the data loader.""" loader = self.train_loader if training \ else self.validation_loader @@ -200,7 +201,7 @@ def get_data_loader_len(self, training: bool = True): "Data loader has no attribute `__len__`. " "Please set `num_steps` in `train()` or `validate()`.") - def train_batch(self) -> dict: + def train_batch(self) -> Dict: metrics = {} try: batch = next(self.train_iterator) @@ -218,7 +219,7 @@ def train_batch(self) -> dict: self.apply_updates(updates) return metrics - def derive_updates(self, batch) -> dict: + def derive_updates(self, batch) -> Dict: return self.training_operator.derive_updates(batch) def apply_updates(self, updates): @@ -277,17 +278,16 @@ def group_name(self): class DataParallelGroup(BaseDataParallelGroup): """Spawn a replica group for data-parallel training.""" - def __init__(self, - actor_params: Mapping[str, Any], - dist_params: Mapping[str, Any], - num_cpus_per_actor: int, + def __init__(self, actor_params: Mapping[str, Any], + dist_params: Mapping[str, Any], num_cpus_per_actor: int, num_gpus_per_actor: int, initialization_hook: Optional[Callable]): - super(DataParallelGroup, self).__init__(actor_params=actor_params, - dist_params=dist_params, - num_cpus_per_actor=num_cpus_per_actor, - num_gpus_per_actor=num_gpus_per_actor, - initialization_hook=initialization_hook) + super(DataParallelGroup, self).__init__( + actor_params=actor_params, + dist_params=dist_params, + num_cpus_per_actor=num_cpus_per_actor, + num_gpus_per_actor=num_gpus_per_actor, + initialization_hook=initialization_hook) self._replicas = None @property diff --git a/distml/strategy/base_strategy.py b/distml/strategy/base_strategy.py index 27a92d0..4ad1e80 100644 --- a/distml/strategy/base_strategy.py +++ b/distml/strategy/base_strategy.py @@ -1,7 +1,7 @@ from abc import ABCMeta from abc import abstractmethod import logging -from typing import AbstractSet, Callable, Any, Mapping, Optional +from typing import Callable, Any, Mapping, Optional import ray @@ -88,13 +88,10 @@ def shutdown(self, force: bool = False): class BaseDataParallelGroup: """Spawn a actor group for data-parallel training.""" - def __init__(self, - actor_params: Mapping[str, Any], - dist_params: Mapping[str, Any], - num_cpus_per_actor: int, + def __init__(self, actor_params: Mapping[str, Any], + dist_params: Mapping[str, Any], num_cpus_per_actor: int, num_gpus_per_actor: int, - initialization_hook: Optional[Callable], - **kwargs): + initialization_hook: Optional[Callable], **kwargs): self._actor_params = actor_params self._dist_params = dist_params self._backend = self._dist_params["backend"] diff --git a/distml/strategy/ps_strategy.py b/distml/strategy/ps_strategy.py index 0143365..a3486b8 100644 --- a/distml/strategy/ps_strategy.py +++ b/distml/strategy/ps_strategy.py @@ -1,4 +1,4 @@ -from typing import Tuple, List, Callable, Mapping, Union, Any, Optional, Sequence +from typing import List, Callable, Mapping, Any, Optional, Sequence, Dict import ray import ray.util.collective as col @@ -39,8 +39,8 @@ def __init__(self, world_size: int = 2, num_worker: int = 1, num_ps: int = 1, - backend="nccl", - group_name="default", + backend: str = "nccl", + group_name: str = "default", num_cpus_per_worker: int = 1, num_gpus_per_worker: int = 1, num_cpus_per_server: int = 1, @@ -103,10 +103,7 @@ def _init_strategy(self): ray.get([server.set_params.remote(this_shard_ref)]) def _start_workers(self): - """Start worker group and server group. - """ - # TODO (Hao): infer the per-replica batch size here... - + """Start worker group and server group.""" # so here we get two set of params that will be passed around: # (1) Those for setting up training logic in training_operator, # including: batch size, user defined operator_config. @@ -198,7 +195,16 @@ def _round_robin_sharding(self): print("Load of each ps {}".format(loads)) self.assignments = assignments - def train(self, num_steps: Optional[int] = None) -> dict: + def train(self, num_steps: Optional[int] = None) -> Dict: + """Run the training on parallel workers. + + Args: + num_steps (int): number of steps to train. If none, the + function will simply train for one epoch. + + Returns: + None + """ # TODO (Hao): add fault tolerance using `max_retries`. steps = num_steps if num_steps \ else self.worker_group.get_data_loader_len() @@ -213,7 +219,14 @@ def train(self, num_steps: Optional[int] = None) -> dict: print("Step: {}/{}".format(idx, steps)) return metrics - def validate(self, num_steps: Optional[int] = None): + def validate(self, num_steps: Optional[int] = None) -> Dict: + """Evaluates the model on the validation data. + + Args: + num_steps (int): number of batches to evaluate. If None, the + function will simply validate across the entire validation + dataset. + """ steps = num_steps if num_steps \ else self.worker_group.get_data_loader_len(training=False) self.worker_group.make_iterator(training=False) @@ -224,7 +237,7 @@ def validate(self, num_steps: Optional[int] = None): # Validate results should be the same in all workers return batch_metrics - def train_batch(self) -> dict: + def train_batch(self) -> Dict: loss_vals = [] rets = [] metrics = {} @@ -250,7 +263,8 @@ def train_batch(self) -> dict: class PS(object): - def __init__(self, training_operator_cls, operator_config: Optional[Mapping[str, Any]]): + def __init__(self, training_operator_cls, + operator_config: Optional[Mapping[str, Any]]): self.training_operator_cls = training_operator_cls self.operator_config = operator_config @@ -281,11 +295,11 @@ def setup_collective_group(self, def test_connection(self): for i in range(self.num_worker): - recv = util.zeros((1,), cpu=False) + recv = util.zeros((1, ), cpu=False) col.recv(recv, i, self.group_name) assert recv == 1 for i in range(self.num_worker): - send = util.ones((1,), cpu=False) + send = util.ones((1, ), cpu=False) col.send(send, i, self.group_name) return @@ -368,7 +382,8 @@ def shutdown(self): class Worker(object): - def __init__(self, training_operator_cls, operator_config: Optional[Mapping[str, Any]]): + def __init__(self, training_operator_cls, + operator_config: Optional[Mapping[str, Any]]): self.training_operator_cls = training_operator_cls self.operator_config = operator_config @@ -402,15 +417,15 @@ def setup_collective_group(self, def test_connection(self): for i in range(self.num_ps): - send = util.ones((1,), cpu=False) + send = util.ones((1, ), cpu=False) col.send(send, self.num_worker + i, self.group_name) for i in range(self.num_ps): - recv = util.zeros((1,), cpu=False) + recv = util.zeros((1, ), cpu=False) col.recv(recv, self.num_worker + i, self.group_name) assert recv == 1 return - def params_distribution(self): + def params_distribution(self) -> List: distribution = [] weights = self.get_named_parameters(cpu=True) for k, v in weights.items(): @@ -428,7 +443,7 @@ def make_iterator(self, training: bool = True): self.validation_iterator = iter( self.training_operator._get_validation_loader()) - def get_data_loader_len(self, training: bool = True): + def get_data_loader_len(self, training: bool = True) -> int: """Return the number of batches in the data loader.""" loader = self.training_operator._get_train_loader() if training \ else self.training_operator._get_validation_loader() @@ -439,7 +454,7 @@ def get_data_loader_len(self, training: bool = True): "Data loader has no attribute `__len__`. " "Please set `num_steps` in `train()` or `validate()`.") - def derive_updates(self, batch: Sequence[Any]): + def derive_updates(self, batch: Sequence[Any]) -> Dict: # TODO (Hao): handling data loader next. return self.training_operator.derive_updates(batch) @@ -463,7 +478,7 @@ def compute_gradients(self, params): return loss_val, grads - def split_gradients(self, grad, assignments): + def split_gradients(self, grad, assignments) -> List: # assuming messages are gradients or parameters # this grad is ready to be called by apply_gradients in ParameterServer num_shards = np.unique(np.array(assignments)).size @@ -472,7 +487,7 @@ def split_gradients(self, grad, assignments): shards[assignments[i]][k] = v return shards - def split_parameters(self, assignments): + def split_parameters(self, assignments) -> List: params = self.get_named_parameters(cpu=False) num_shards = np.unique(np.array(assignments)).size shards = [dict() for i in range(num_shards)] @@ -484,12 +499,12 @@ def index_shard(self, shards, index: int): return shards[index] def set_parameters(self, params): - return self.training_operator.set_parameters(params) + self.training_operator.set_parameters(params) - def get_parameters(self, cpu: bool): + def get_parameters(self, cpu: bool) -> List: return self.training_operator.get_parameters(cpu) - def get_named_parameters(self, cpu: bool): + def get_named_parameters(self, cpu: bool) -> Dict: return self.training_operator.get_named_parameters(cpu) def get_gradients(self): @@ -564,17 +579,16 @@ def shutdown(self): class DataParallelGroup(BaseDataParallelGroup): """Spawn a actor group for data-parallel training.""" - def __init__(self, - actor_params: Mapping[str, Any], - dist_params: Mapping[str, Any], - num_cpus_per_actor: int, + def __init__(self, actor_params: Mapping[str, Any], + dist_params: Mapping[str, Any], num_cpus_per_actor: int, num_gpus_per_actor: int, initialization_hook: Optional[Callable]): - super(DataParallelGroup, self).__init__(actor_params=actor_params, - dist_params=dist_params, - num_cpus_per_actor=num_cpus_per_actor, - num_gpus_per_actor=num_gpus_per_actor, - initialization_hook=initialization_hook) + super(DataParallelGroup, self).__init__( + actor_params=actor_params, + dist_params=dist_params, + num_cpus_per_actor=num_cpus_per_actor, + num_gpus_per_actor=num_gpus_per_actor, + initialization_hook=initialization_hook) self.is_server = self._dist_params["is_server"] self.num_ps = self._dist_params["num_ps"] self.num_worker = self._dist_params["num_worker"] @@ -595,8 +609,7 @@ def _setup_collective_group(self, num_worker=num_worker, num_ps=num_ps, backend=backend, - group_name=group_name - ) + group_name=group_name) for i, actor in enumerate(self._distributed_actors) ] else: # this can be extend for allreduce. @@ -625,8 +638,9 @@ def _start_actors(self, num_actors: int): ] # setup the rank and group in each replica - ray.get(self._setup_collective_group( - self.num_ps, self.num_worker, self.backend, self.group_name)) + ray.get( + self._setup_collective_group(self.num_ps, self.num_worker, + self.backend, self.group_name)) def test_connection(self): rets = [ diff --git a/examples/jax/mnist_jax_example.py b/examples/jax/mnist_jax_example.py index db7abe2..1a189b3 100644 --- a/examples/jax/mnist_jax_example.py +++ b/examples/jax/mnist_jax_example.py @@ -8,7 +8,6 @@ from distml.strategy.allreduce_strategy import AllReduceStrategy from distml.strategy.ps_strategy import ParameterServerStrategy - from ray.util.sgd.utils import override from jax import random From 43498f8af7a901fc98ac3891095110efb1f8fabd Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Tue, 8 Jun 2021 01:00:23 +0800 Subject: [PATCH 11/13] strategy load/save states --- distml/operator/base_operator.py | 8 +- distml/operator/jax_operator.py | 153 ++++++++++++++++--- distml/operator/torch_operator.py | 106 ++++++++++++- distml/strategy/allreduce_strategy.py | 106 ++++++++----- distml/strategy/base_strategy.py | 36 ++++- distml/strategy/ps_strategy.py | 206 +++++++++++++++++--------- examples/jax/mnist_jax_example.py | 7 + 7 files changed, 485 insertions(+), 137 deletions(-) diff --git a/distml/operator/base_operator.py b/distml/operator/base_operator.py index 892134b..9af342d 100644 --- a/distml/operator/base_operator.py +++ b/distml/operator/base_operator.py @@ -1,6 +1,7 @@ """Abstract class for framework-specific training operators.""" from abc import ABCMeta from abc import abstractmethod +from typing import Optional class TrainingOperator(metaclass=ABCMeta): @@ -90,7 +91,7 @@ def load_custom_states(self, states, *args, **kwargs): pass @abstractmethod - def save_states(self, checkpoint): + def save_states(self, checkpoint: str): """Save the states to a file path. This function shall be instantiated in framework-specific operator @@ -104,7 +105,10 @@ def get_states(self): raise NotImplementedError() @abstractmethod - def load_states(self, checkpoint): + def load_states(self, + states=None, + checkpoint: Optional[str] = None, + keys: Optional[bool] = None): """Load the states from a file path. This functions shall be instantiated in framework-specific operators diff --git a/distml/operator/jax_operator.py b/distml/operator/jax_operator.py index 2ae070b..6feff06 100644 --- a/distml/operator/jax_operator.py +++ b/distml/operator/jax_operator.py @@ -1,4 +1,8 @@ -from typing import Any, Mapping, Optional +import os +import pickle +import warnings + +from typing import Any, Mapping, Optional, List, Dict import numpy as np import cupy as cp @@ -16,7 +20,7 @@ class JAXTrainingOperator(TrainingOperator): - def __init__(self, operator_config: Optional[Mapping[str, Any]]): + def __init__(self, *, operator_config: Optional[Mapping[str, Any]]): super(JAXTrainingOperator, self).__init__(operator_config) # Should be set by users in the `register` function. # model methods @@ -29,11 +33,14 @@ def __init__(self, operator_config: Optional[Mapping[str, Any]]): self.get_params = None self.criterion = None + self.lr_scheduler = None # Data loaders for training and validation, registered by users. self._train_loader = None self._validation_loader = None + self._custom_states = None + self.setup(operator_config) if hasattr(operator_config, "jit_mode"): @@ -267,15 +274,15 @@ def validate_batch(self, batch): targets_class = jnp.argmax(targets, axis=1) acc = jnp.mean(prediction_class == targets_class) - samples_num = targets.shape[0] + num_sample = targets.shape[0] return { "val_loss": loss.item(), "val_accuracy": acc.item(), - "samples_num": samples_num + "num_sample": num_sample } - def get_parameters(self, cpu: bool): + def get_parameters(self, cpu: bool) -> List: """get the flatten parameters.""" params = self.get_params(self.opt_state) flatten_params, tree = tree_flatten(params) @@ -284,9 +291,11 @@ def get_parameters(self, cpu: bool): if cpu: flatten_params = list(map(np.asarray, flatten_params)) + else: + flatten_params = list(map(jnp.asarray, flatten_params)) return flatten_params - def get_named_parameters(self, cpu: bool): + def get_named_parameters(self, cpu: bool) -> Dict: """Get the named parameters. In jax, we need to construct a dict to contain the parameters. @@ -299,6 +308,7 @@ def get_named_parameters(self, cpu: bool): } else: dict_params = {f"{idx}": p for idx, p in enumerate(params)} + return dict_params # TODO(HUI): used in load states or load parameters @@ -312,6 +322,9 @@ def set_parameters(self, new_params): """ assert isinstance(new_params, dict) + # make sure all params in GPU. Should be controlled of use_gpu. + new_params = {k: jax.device_put(v) for k, v in new_params.items()} + keys, new_params = unzip2( sorted(new_params.items(), key=lambda d: int(d[0]))) self.preset_keys = keys @@ -349,6 +362,8 @@ def reset_optimizer_for_params(self, params): "Got {}".format(type(params))) keys, params = unzip2(sorted(params.items(), key=lambda d: int(d[0]))) + + self.preset_keys = keys # The keys to index the params. self.tree = tree_structure(params) self.opt_state = self.opt_init(params) @@ -383,25 +398,117 @@ def asarray(self, v): return jnp.asarray(v) def clean_redundancy(self): - del self._train_loader - del self._validation_loader + if self._train_loader: + del self._train_loader + self._train_loader = None + if self._validation_loader: + del self._validation_loader + self._validation_loader = None - # TODO(HUI): use pickle to serialize parameters or states and save it. - def save_parameters(self, checkpoint: str): - raise NotImplementedError( - "save_parameters is not support in jax operator.") + def register_custom_states(self, custom_states): + self._custom_states = custom_states - def load_parameters(self, checkpoint: str): - raise NotImplementedError( - "load_parameters is not support in jax operator.") + def get_custom_states(self): + return self._custom_states - def save_states(self, checkpoint: str): - raise NotImplementedError( - "save_states is not support in jax operator.") + def get_states(self) -> Dict: + """Return the states of this training operator.""" + + states_flat, tree, subtrees = self.opt_state - def get_states(self): - raise NotImplementedError("get_states is not support in jax operator.") + states_unflat = map(tree_unflatten, subtrees, states_flat) - def load_states(self, checkpoint: str): - raise NotImplementedError( - "load_states is not support in jax operator.") + states_unflat_dict = { + str(idx): value + for idx, value in enumerate(states_unflat) + } + + states = { + "opt_state": states_unflat_dict, + } + + if self._custom_states: + states.update({"custom": self.get_custom_states()}) + + if self.lr_scheduler and hasattr(self.lr_scheduler, + "get_state_dict()"): + states.update({"lr_scheduler": self.lr_scheduler.get_state_dict()}) + + return states + + def save_states(self, checkpoint: str): + states = self.get_states() + with open(checkpoint, "wb") as f: + pickle.dump(states, f) + + def load_states(self, + states=None, + checkpoint: Optional[str] = None, + keys: Optional[bool] = None): + if checkpoint: + assert ".pkl" in checkpoint, \ + "checkpoint should be a .pkl file. Got {}".format(checkpoint) + if not os.path.exists(checkpoint): + raise RuntimeError("Checkpoint file doesn't exists.") + with open(checkpoint, "rb") as f: + states = pickle.load(f) + + if states: + new_opt_states = states.get("opt_state", None) + custom_states = states.get("custom_states", None) + lr_scheduler_states = states.get("lr_scheduler", None) + + if not new_opt_states: + raise RuntimeError("subtrees of new params is empty.") + + assert isinstance(new_opt_states, dict) + + if not keys: + keys = tuple([ + str(idx) + for idx in range(len(self.get_parameters(cpu=False))) + ]) + else: + # construct_opt_states_dict = OrderedDict() + construct_opt_states_dict = dict() + for key in keys: + construct_opt_states_dict[key] = new_opt_states[key] + new_opt_states = construct_opt_states_dict + + new_keys, new_opt_states = unzip2( + sorted(new_opt_states.items(), key=lambda d: int(d[0]))) + + keys = tuple(keys) + new_keys = tuple(new_keys) + assert keys == new_keys, \ + "checkpoint key doesn't match the model params." + + states_flat, tree, subtrees = self.opt_state + states_flat_2, subtrees_2 = unzip2( + map(tree_flatten, new_opt_states)) + + if not subtrees_2: + raise RuntimeError("subtrees of new params is empty.") + for idx, (subtree, subtree_2) in enumerate( + zip(subtrees, subtrees_2)): + if subtree_2 != subtree: + msg = ("input structure did not match the save params " + "structure. input {} and output {}.") + raise TypeError(msg.format(subtree, subtree_2)) + + self.opt_state = OptimizerState(states_flat_2, tree, subtrees_2) + + if custom_states: + self._custom_states.update(custom_states) + + if lr_scheduler_states: + if hasattr(self.lr_scheduler, "set_states_dict"): + self.lr_scheduler.set_states_dict(lr_scheduler_states) + else: + warnings.warn( + "lr scheduler must have `set_states_dict` method" + " to support loading lr scheduler states.") + else: + raise RuntimeError("This checkpoint is empty." + "Got checkpoint {}, states {}".format( + checkpoint, states)) diff --git a/distml/operator/torch_operator.py b/distml/operator/torch_operator.py index 3ddd667..d0a98f8 100644 --- a/distml/operator/torch_operator.py +++ b/distml/operator/torch_operator.py @@ -168,9 +168,55 @@ def validate_batch(self, batch): loss = criterion(output, target) # Todo(Hao): report accuracy instead loss here. - batch_metric = {"val_loss": loss.item()} + batch_metric = {"val_loss": loss.item(), "num_sample": target.size(0)} return batch_metric + def get_named_parameters(self, cpu): + named_params = self._model.named_parameters() + is_cuda = next(self._model.parameters()).is_cuda + output_params = {} + + if cpu: + if is_cuda: + for key, p in named_params: + output_params[key] = p.cpu() + else: + for key, p in named_params: + output_params[key] = p + else: + if not is_cuda: + for key, p in named_params: + # TODO(HUI): should put in specific device. + named_params[key] = p.cuda() + else: + for key, p in named_params: + output_params[key] = p + + return output_params + + def get_parameters(self, cpu): + params = self._model.parameters() + is_cuda = next(self._model.parameters()).is_cuda + output_params = [] + + if cpu: + if is_cuda: + for p in params: + output_params.append(p.cpu()) + else: + for p in params: + output_params.append(p) + else: + if not is_cuda: + for idx, p in enumerate(params): + # TODO(HUI): should put in specific device. + output_params(p.cuda()) + else: + for p in params: + output_params.append(p) + + return output_params + def get_states(self): """Return the states of this training operator.""" states = { @@ -196,12 +242,47 @@ def load_states(self, states=None, checkpoint=None): self._lr_scheduler.load_state_dict(states["lr_scheduler"]) self.load_custom_states(states["custom"]) + def _load_from_checkpoint(self, checkpoint): + return torch.load(checkpoint) + def save_states(self, checkpoint): """Save the states to a file path.""" states = self.get_states() # TODO(Hao): test this. torch.save(states, checkpoint) + def clean_redundancy(self): + del self._train_loader + del self._validation_loader + + def set_parameters(self, params): + if isinstance(params, dict): + self._model.load_state_dict(params) + else: + raise RuntimeError("params is not dict." + "Got {}".format(type(params))) + + def reset_optimizer_for_params(self, params): + if isinstance(params, dict): + params_list = [] + + for k, v in params.items(): + params_list.append(v) + params = params_list + + _optimizer = self._optimizer + + _optimizer.param_groups = [] + + param_groups = list(params) + if len(param_groups) == 0: + raise ValueError("optimizer got an empty parameter list") + if not isinstance(param_groups[0], dict): + param_groups = [{'params': param_groups}] + + for param_group in param_groups: + _optimizer.add_param_group(param_group) + @staticmethod def _get_gradients(model): """Return the gradient updates of the model as a Python dict. @@ -241,3 +322,26 @@ def _set_gradients(model, grads): # to(p.grad.device) # else: # p.grad = torch.from_numpy(gradients[name]) + + def ones(self, shape, cpu: bool = True): + tensor = torch.ones(shape) + return tensor if cpu else tensor.cuda() + + def zeros(self, shape, cpu: bool = True): + tensor = torch.zeros(shape) + return tensor if cpu else tensor.cuda() + + def ones_like(self, x, cpu: bool = True): + tensor = torch.ones_like(x) + return tensor if cpu else tensor.cuda() + + def zeros_like(self, x, cpu: bool = True): + tensor = torch.zeros_like(x) + return tensor if cpu else tensor.cuda() + + @staticmethod + def numel(tensor): + return tensor.numel() + + def asarray(self, v): + return torch.as_tensor(v) diff --git a/distml/strategy/allreduce_strategy.py b/distml/strategy/allreduce_strategy.py index dc557a8..1cc3d82 100644 --- a/distml/strategy/allreduce_strategy.py +++ b/distml/strategy/allreduce_strategy.py @@ -1,13 +1,14 @@ import logging -from typing import Callable, Mapping, Any, Optional, Dict +from typing import List, Callable, Mapping, Any, Optional, Dict import ray import ray.util.collective as col -from distml.strategy.base_strategy import BaseStrategy, BaseDataParallelGroup -from distml.util import ThroughputCollection +from ray.util.sgd.utils import AverageMeterCollection import numpy as np +from distml.strategy.base_strategy import BaseStrategy, BaseDataParallelGroup + logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) @@ -49,13 +50,11 @@ def __init__(self, num_gpus_per_worker=num_gpus_per_worker, **kwargs) self._global_batch_size = None + if operator_config and operator_config.get("batch_size"): self._global_batch_size = operator_config.get("batch_size") - if self._global_batch_size: - self._collector = ThroughputCollection( - batch_size=self._global_batch_size) - else: - self._collector = ThroughputCollection() + + self._init_strategy() def train(self, num_steps: Optional[int] = None) -> Dict: """Run the training on parallel workers. @@ -65,8 +64,10 @@ def train(self, num_steps: Optional[int] = None) -> Dict: function will simply train for one epoch. Returns: - None + metric (dict): metric of the training set. """ + # TODO(HUI): metric use hook to control. + # TODO (Hao): add fault tolerance using `max_retries`. steps = num_steps if num_steps \ else self.data_parallel_group.get_data_loader_len() @@ -74,10 +75,9 @@ def train(self, num_steps: Optional[int] = None) -> Dict: # TODO(Hao): this call should be hidden inside Replica. self.data_parallel_group.make_iterator() for idx in range(steps): - with self._collector.record("train"): - metrics = self.data_parallel_group.train_batch() + metric = self.data_parallel_group.train_batch() print("Step: {}/{}".format(idx, steps)) - return metrics + return metric def validate(self, num_steps: Optional[int] = None) -> Dict: """Evaluates the model on the validation data. @@ -86,18 +86,35 @@ def validate(self, num_steps: Optional[int] = None) -> Dict: num_steps (int): number of batches to evaluate. If None, the function will simply validate across the entire validation dataset. + + Returns: + metric (dict): metric of the validate set. """ steps = num_steps if num_steps \ else self.data_parallel_group.get_data_loader_len(training=False) + + metrics = [ + AverageMeterCollection() + for _ in range(len(self.data_parallel_group.replicas)) + ] + self.data_parallel_group.make_iterator(training=False) for idx in range(steps): - with self._collector.record("validate"): - batch_metrics = self.data_parallel_group.validate_batch() - self._collector.update( - "validate", val_acc=batch_metrics[0]["val_loss"]) - self._collector.save("validate") + batch_metrics = self.data_parallel_group.validate_batch() + + for metric_idx, metric in enumerate(batch_metrics): + num_sample = metric.pop("num_sample") + metrics[metric_idx].update(metric, n=num_sample) + # TODO: validate result should be the same in all workers - return batch_metrics + return metrics[0].summary() + + def _init_strategy(self): + """Do initialization for the distributed strategy.""" + # All sync with replica 0 + init_weights = self.data_parallel_group.get_named_parameters(cpu=True) + # all replicas get synced + self.data_parallel_group.set_parameters(init_weights) def _start_workers(self): """Create distributed workers on the Ray cluster for distributed training. @@ -132,14 +149,14 @@ def _start_workers(self): def shutdown(self, force: bool = False): self.data_parallel_group.shutdown(force=force) - def save_parameters(self, checkpoint: str): - self.data_parallel_group.save_parameters(checkpoint) + def get_states(self): + return self.data_parallel_group.get_states() - def load_parameters(self, checkpoint: str): - self.data_parallel_group.load_parameters(checkpoint) + def save_states(self, checkpoint: str): + self.data_parallel_group.save_states(checkpoint) - def _init_strategy(self): - pass + def load_states(self, states=None, checkpoint: Optional[str] = None): + self.data_parallel_group.load_states(states, checkpoint) class Replica: @@ -205,7 +222,7 @@ def train_batch(self) -> Dict: metrics = {} try: batch = next(self.train_iterator) - except StopIteration and NameError: + except StopIteration or NameError: self.make_iterator() batch = next(self.train_iterator) loss_val, updates = self.derive_updates(batch) @@ -214,7 +231,7 @@ def train_batch(self) -> Dict: metrics["train_loss"] = loss_val for _, g in updates.items(): cg = self.training_operator.to_cupy(g) - col.allreduce(cg) + col.allreduce(cg, self.group_name) cg = cg / float(self.world_size) self.apply_updates(updates) return metrics @@ -231,7 +248,7 @@ def updates_transform(self, updates): def validate_batch(self): try: batch = next(self.validation_iterator) - except StopIteration and NameError: + except StopIteration or NameError: self.make_iterator(training=False) batch = next(self.validation_iterator) batch_metric = self.training_operator.validate_batch(batch) @@ -244,11 +261,23 @@ def shutdown(self): del self.training_operator return 1 - def save_parameters(self, checkpoint: str): - self.training_operator.save_parameters(checkpoint) + def get_states(self): + return self.training_operator.get_states() + + def save_states(self, checkpoint: str): + self.training_operator.save_states(checkpoint) + + def load_states(self, states=None, checkpoint: Optional[str] = None): + self.training_operator.load_states(states, checkpoint) - def load_parameters(self, checkpoint: str): - self.training_operator.load_parameters(checkpoint) + def get_parameters(self, cpu: bool) -> List: + return self.training_operator.get_parameters(cpu) + + def get_named_parameters(self, cpu: bool) -> Dict: + return self.training_operator.get_named_parameters(cpu) + + def set_parameters(self, params): + self.training_operator.set_parameters(params) def apply(self, fn: Callable): """Apply a function in the replica process.""" @@ -288,7 +317,6 @@ def __init__(self, actor_params: Mapping[str, Any], num_cpus_per_actor=num_cpus_per_actor, num_gpus_per_actor=num_gpus_per_actor, initialization_hook=initialization_hook) - self._replicas = None @property def _replica_params(self): @@ -370,14 +398,18 @@ def shutdown(self, force: bool = False): def reset(self): pass - def save_parameters(self, checkpoint: str): - rets = [self.replicas[0].save_parameters.remote(checkpoint)] + def get_states(self): + rets = [self.replicas[0].get_states.remote()] + return ray.get(rets)[0] + + def save_states(self, checkpoint: str): + rets = [self.replicas[0].save_states.remote(checkpoint)] ray.get(rets) - def load_parameters(self, checkpoint: str): + def load_states(self, states=None, checkpoint: Optional[str] = None): rets = [ - replica.load_parameters.remote(checkpoint) - for _, replica in enumerate(self.replicas) + replica.load_states.remote(states, checkpoint) + for replica in self.replicas ] ray.get(rets) diff --git a/distml/strategy/base_strategy.py b/distml/strategy/base_strategy.py index 4ad1e80..3f31237 100644 --- a/distml/strategy/base_strategy.py +++ b/distml/strategy/base_strategy.py @@ -1,7 +1,7 @@ from abc import ABCMeta from abc import abstractmethod import logging -from typing import Callable, Any, Mapping, Optional +from typing import Callable, Any, Mapping, Optional, Sequence import ray @@ -52,7 +52,7 @@ def validate(self): raise NotImplementedError() @abstractmethod - def save_parameters(self, checkpoint: str): + def save_states(self, checkpoint: str): """Saves the Trainer state to the provided checkpoint path. Args: @@ -61,11 +61,17 @@ def save_parameters(self, checkpoint: str): raise NotImplementedError() @abstractmethod - def load_parameters(self, checkpoint: str): - """Loads the Trainer state to the provided checkpoint path. + def load_states(self, + states=None, + checkpoint: Optional[str] = None, + keys: Optional[Sequence[str]] = None): + """Saves the Trainer state to the provided checkpoint path. Args: + states: States to load. checkpoint (str): Path to target checkpoint file. + keys (str): Keys of the params to load. + If None, using all states. """ raise NotImplementedError() @@ -157,13 +163,27 @@ def reset(self): raise NotImplementedError() @abstractmethod - def save_parameters(self, checkpoint: str): - """Let the first actor save parameters.""" + def save_states(self, checkpoint: str): + """Saves the Trainer state to the provided checkpoint path. + + Args: + checkpoint (str): Path to target checkpoint file. + """ raise NotImplementedError() @abstractmethod - def load_parameters(self, checkpoint: str): - """All actor load parameters from checkpoint.""" + def load_states(self, + states=None, + checkpoint: Optional[str] = None, + keys: Optional[Sequence[str]] = None): + """Saves the Trainer state to the provided checkpoint path. + + Args: + states: States to load. + checkpoint (str): Path to target checkpoint file. + keys (str): Keys of the params to load. + If None, using all states. + """ raise NotImplementedError() @abstractmethod diff --git a/distml/strategy/ps_strategy.py b/distml/strategy/ps_strategy.py index a3486b8..6709069 100644 --- a/distml/strategy/ps_strategy.py +++ b/distml/strategy/ps_strategy.py @@ -1,14 +1,14 @@ +import logging from typing import List, Callable, Mapping, Any, Optional, Sequence, Dict + import ray import ray.util.collective as col +from ray.util.sgd.utils import AverageMeterCollection import numpy as np import distml.util as util from distml.strategy.base_strategy import BaseStrategy, BaseDataParallelGroup -from distml.util import ThroughputCollection - -import logging logger = logging.getLogger(__name__) @@ -72,11 +72,6 @@ def __init__(self, if operator_config and operator_config.get("batch_size"): self._global_batch_size = operator_config.get("batch_size") - if self._global_batch_size: - self._collector = ThroughputCollection( - batch_size=self._global_batch_size) - else: - self._collector = ThroughputCollection() def _init_strategy(self): """Do initialization for the distributed strategy.""" @@ -171,16 +166,28 @@ def shutdown(self, force: bool = False): self.worker_group.shutdown(force=force) self.server_group.shutdown(force=force) - def save_parameters(self, checkpoint: str): - # TODO(HUI): ps save parameters. - # First, worker rank 0 should pull the latest parameter from servers - # Then, worker rank 0 save parameters - self.worker_group.save_parameters(checkpoint) + def get_states(self): + # worker0 pull latest params and return states. + for server_idx, server in enumerate(self.server_group.actors): + # every server sends its shard to the worker0 + server.send_params.remote(0) + # the worker0 receives shards from ps. + ret = self.worker_group.actors[0].recv_params.remote() + ray.get([ret]) - def load_parameters(self, checkpoint: str): - # TODO(HUI): ps load parameters. - # shard parameters and send to all servers. - self.server_group.load_parameters(checkpoint) + return self.worker_group.get_states() + + def save_states(self, checkpoint: str): + # worker0 pull latest params. + for server_idx, server in enumerate(self.server_group.actors): + server.send_params.remote(0) + ret = self.worker_group.actors[0].recv_params.remote() + ray.get([ret]) + # Then, worker0 save parameters. + self.worker_group.save_states(checkpoint) + + def load_states(self, states=None, checkpoint: Optional[str] = None): + self.server_group.load_states(states=states, checkpoint=checkpoint) def _round_robin_sharding(self): """Generate the assignment of variable to servers.""" @@ -203,7 +210,7 @@ def train(self, num_steps: Optional[int] = None) -> Dict: function will simply train for one epoch. Returns: - None + metrics (dict): metrics of training result. """ # TODO (Hao): add fault tolerance using `max_retries`. steps = num_steps if num_steps \ @@ -214,8 +221,7 @@ def train(self, num_steps: Optional[int] = None) -> Dict: # train one epoch self.worker_group.make_iterator() for idx in range(steps): - with self._collector.record("train"): - metrics = self.train_batch() + metrics = self.train_batch() print("Step: {}/{}".format(idx, steps)) return metrics @@ -231,11 +237,31 @@ def validate(self, num_steps: Optional[int] = None) -> Dict: else self.worker_group.get_data_loader_len(training=False) self.worker_group.make_iterator(training=False) + # Worker group pull latest params. + rets = [] + for worker_idx, worker in enumerate(self.worker_group.actors): + for server_idx, server in enumerate(self.server_group.actors): + # every server sends its shard to the worker + server.send_params.remote(worker_idx) + # the worker receives shards from ps, compute loss, gradients + # and sends these gradients to every server + ret = worker.recv_params.remote() + rets.append(ret) + ray.get(rets) + + metrics = [ + AverageMeterCollection() + for _ in range(len(self.worker_group.actors)) + ] + # TODO(HUI): Construct a better tool to save validate results. for idx in range(steps): batch_metrics = self.worker_group.validate_batch() + for metric_idx, metric in enumerate(batch_metrics): + num_sample = metric.pop("num_sample") + metrics[metric_idx].update(metric, n=num_sample) # Validate results should be the same in all workers - return batch_metrics + return metrics[0].summary() def train_batch(self) -> Dict: loss_vals = [] @@ -272,9 +298,9 @@ def __init__(self, training_operator_cls, self.params = dict() def setup_operator(self): - # figure out the signature of training_operator_cls later. + """Instantiate the training operator.""" self.training_operator = self.training_operator_cls( - self.operator_config) + operator_config=self.operator_config) def setup_collective_group(self, rank: int, @@ -282,7 +308,7 @@ def setup_collective_group(self, num_worker: int, backend: str = "nccl", group_name: str = "default"): - # rank should be true rank. means, rank has already plus num_worker. + # rank has already plus num_worker. self.rank = rank self.num_ps = num_ps self.num_worker = num_worker @@ -293,15 +319,18 @@ def setup_collective_group(self, col.init_collective_group( num_ps + num_worker, rank, backend=backend, group_name=group_name) + def apply(self, fn: Callable): + """Apply a function in the replica process.""" + return fn() + def test_connection(self): for i in range(self.num_worker): recv = util.zeros((1, ), cpu=False) - col.recv(recv, i, self.group_name) + col.recv(recv, i, group_name=self.group_name) assert recv == 1 for i in range(self.num_worker): send = util.ones((1, ), cpu=False) - col.send(send, i, self.group_name) - return + col.send(send, i, group_name=self.group_name) def _init_grad_counts(self): self.grad_counts = [0] * self.num_worker @@ -324,8 +353,21 @@ def set_params(self, params): self.training_operator.reset_optimizer_for_params(self.params) self._init_grad_buffer() + def load_states(self, states=None, checkpoint: Optional[str] = None): + self.training_operator.load_states( + states=states, + checkpoint=checkpoint, + keys=tuple(self.params.keys())) + + # # Update the params in actor aspect. + latest_params = self.training_operator.get_named_parameters(cpu=False) + + assert self.params.keys() == latest_params.keys() + + for key in latest_params.keys(): + self.params[key] = latest_params[key] + def apply_updates(self, grad_buffer): - # TODO(HUI): gradient divide by num_worker self.training_operator.apply_updates(grad_buffer) self.params = self.training_operator.get_named_parameters(cpu=False) @@ -338,7 +380,7 @@ def send_params(self, dst_rank: int): """ Send this param shard to the destination worker """ for name, v in self.params.items(): cv = self.training_operator.to_cupy(v) - col.send(cv, dst_rank, self.group_name) + col.send(cv, dst_rank, group_name=self.group_name) def update(self, src_rank: int): """Receive gradients and update""" @@ -396,7 +438,7 @@ def __init__(self, training_operator_cls, def setup_operator(self): # figure out the signature of training_operator_cls later. self.training_operator = self.training_operator_cls( - self.operator_config) + operator_config=self.operator_config) def setup_collective_group(self, rank: int, @@ -415,13 +457,17 @@ def setup_collective_group(self, col.init_collective_group( num_ps + num_worker, rank, backend=backend, group_name=group_name) + def apply(self, fn: Callable): + """Apply a function in the replica process.""" + return fn() + def test_connection(self): for i in range(self.num_ps): send = util.ones((1, ), cpu=False) - col.send(send, self.num_worker + i, self.group_name) + col.send(send, self.num_worker + i, group_name=self.group_name) for i in range(self.num_ps): recv = util.zeros((1, ), cpu=False) - col.recv(recv, self.num_worker + i, self.group_name) + col.recv(recv, self.num_worker + i, group_name=self.group_name) assert recv == 1 return @@ -458,16 +504,15 @@ def derive_updates(self, batch: Sequence[Any]) -> Dict: # TODO (Hao): handling data loader next. return self.training_operator.derive_updates(batch) - def compute_gradients(self, params): + def compute_gradients(self): """ Update worker parameters that received from server. Compute gradients and return named gradients. """ - self.set_parameters(params) try: batch = next(self.training_iterator) - except StopIteration and NameError: + except StopIteration or NameError: self.make_iterator() batch = next(self.training_iterator) @@ -479,6 +524,7 @@ def compute_gradients(self, params): return loss_val, grads def split_gradients(self, grad, assignments) -> List: + """Splitting gradients according to assignments.""" # assuming messages are gradients or parameters # this grad is ready to be called by apply_gradients in ParameterServer num_shards = np.unique(np.array(assignments)).size @@ -488,6 +534,7 @@ def split_gradients(self, grad, assignments) -> List: return shards def split_parameters(self, assignments) -> List: + """Splitting parameters according to assignments.""" params = self.get_named_parameters(cpu=False) num_shards = np.unique(np.array(assignments)).size shards = [dict() for i in range(num_shards)] @@ -498,6 +545,34 @@ def split_parameters(self, assignments) -> List: def index_shard(self, shards, index: int): return shards[index] + def recv_params(self): + weights = self.get_named_parameters(cpu=False) + params = dict() + + # 1. Create the receive lists to group collective calls + recv_list = [] + for i in range(self.num_ps): + recv_list.append([]) + param_shard_keys = self.name_list[i] + for key in param_shard_keys: + to_recv = weights[key] + recv_list[-1].append( + self.training_operator.ones(to_recv.shape, cpu=False)) + + # 2. Receive params from servers + for i in range(self.num_ps): + for j in range(len(self.name_list[i])): + v = self.training_operator.to_cupy(recv_list[i][j]) + col.recv(v, self.num_worker + i, group_name=self.group_name) + + # 3. Set params in workers. + for i in range(self.num_ps): + param_shard_keys = self.name_list[i] + for j in range(len(param_shard_keys)): + params[param_shard_keys[j]] = recv_list[i][j] + + self.set_parameters(params) + def set_parameters(self, params): self.training_operator.set_parameters(params) @@ -512,6 +587,12 @@ def get_gradients(self): # when derive_updates. return self.training_operator.get_gradients() + def get_states(self): + return self.training_operator.get_states() + + def save_states(self, checkpoint: str): + self.training_operator.save_states(checkpoint) + def set_assignments(self, assignments): self.assignments = assignments keys = list(self.get_named_parameters(cpu=False).keys()) @@ -522,41 +603,18 @@ def compute(self): """Returns the loss, and send gradients to servers""" metrics = {} - weights = self.get_named_parameters(cpu=False) - params = dict() + self.recv_params() - # 1. Create the receive lists to group collective calls - recv_list = [] - for i in range(self.num_ps): - recv_list.append([]) - param_shard_keys = self.name_list[i] - for key in param_shard_keys: - to_recv = weights[key] - recv_list[-1].append( - self.training_operator.ones(to_recv.shape, cpu=False)) - - # 2. Receive params from servers - for i in range(self.num_ps): - for j in range(len(self.name_list[i])): - v = self.training_operator.to_cupy(recv_list[i][j]) - col.recv(v, self.num_worker + i, self.group_name) - - # 3. Set params in workers and compute gradients. - for i in range(self.num_ps): - param_shard_keys = self.name_list[i] - for j in range(len(param_shard_keys)): - params[param_shard_keys[j]] = recv_list[i][j] - - loss_val, grad = self.compute_gradients(params) + loss_val, grad = self.compute_gradients() metrics["train_loss"] = loss_val - # 4. Shard gradients and send to servers. + # Shard gradients and send to servers. split_grad = self.split_gradients(grad, self.assignments) for i in range(self.num_ps): this_shard = self.index_shard(split_grad, i) for _, v in this_shard.items(): cv = self.training_operator.to_cupy(v) - col.send(cv, self.num_worker + i, self.group_name) + col.send(cv, self.num_worker + i, group_name=self.group_name) return metrics def validate_batch(self): @@ -637,6 +695,10 @@ def _start_actors(self, num_actors: int): RemoteActor.remote(**self._actor_params) for _ in range(num_actors) ] + # apply init_hook + if self._initialization_hook: + self.apply_all_replicas(self._initialization_hook) + # setup the rank and group in each replica ray.get( self._setup_collective_group(self.num_ps, self.num_worker, @@ -656,6 +718,14 @@ def set_assignments(self, assignments): ] return rets + def apply_all_replicas(self, fn: Callable): + """Apply fn in all replica processes and wait until completion.""" + return ray.get(self._apply_all_replicas(fn)) + + def _apply_all_replicas(self, fn): + """Apply a function fn in all replica processes.""" + return [actor.apply.remote(fn) for actor in self.actors] + def _make_iterator(self, training: bool): return [actor.make_iterator.remote(training) for actor in self.actors] @@ -695,13 +765,17 @@ def reset(self): def actors(self): return self._distributed_actors - def save_parameters(self, checkpoint: str): - rets = [self.actors[0].save_parameters.remote(checkpoint)] + def get_states(self): + ret = self.actors[0].get_states.remote() + return ray.get([ret])[0] + + def save_states(self, checkpoint: str): + rets = [self.actors[0].save_states.remote(checkpoint)] ray.get(rets) - def load_parameters(self, checkpoint: str): + def load_states(self, states=None, checkpoint: Optional[str] = None): rets = [ - actor.load_parameters.remote(checkpoint) + actor.load_states.remote(states=states, checkpoint=checkpoint) for _, actor in enumerate(self.actors) ] ray.get(rets) diff --git a/examples/jax/mnist_jax_example.py b/examples/jax/mnist_jax_example.py index 1a189b3..324a99a 100644 --- a/examples/jax/mnist_jax_example.py +++ b/examples/jax/mnist_jax_example.py @@ -16,6 +16,7 @@ from jax_util.resnet import ResNet18, ResNet50, ResNet101 from jax_util.datasets import mnist, Dataloader +import numpy as np def initialization_hook(): # Need this for avoiding a connection restart issue on AWS. @@ -55,6 +56,12 @@ def setup(self, config): with FileLock(".ray.lock"): train_images, train_labels, test_images, test_labels = mnist() + if config.get("test_mode", False): + train_images = np.random.choice(train_images, 1000) + train_labels = np.random.choice(train_labels, 1000) + test_images = np.random.choice(test_images, 1000) + test_labels = np.random.choice(test_labels, 1000) + train_images = train_images.reshape(train_images.shape[0], 1, 28, 28).transpose(2, 3, 1, 0) test_images = test_images.reshape(test_images.shape[0], 1, 28, From 9a4acb1bc718eeb36be7a49cfede2b657daee7be Mon Sep 17 00:00:00 2001 From: Ezra-H Date: Sat, 19 Jun 2021 22:44:46 +0800 Subject: [PATCH 12/13] callable check --- distml/operator/jax_operator.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/distml/operator/jax_operator.py b/distml/operator/jax_operator.py index 6feff06..a738640 100644 --- a/distml/operator/jax_operator.py +++ b/distml/operator/jax_operator.py @@ -103,7 +103,7 @@ def register(self, *, model, optimizer, criterion, jit_mode: bool = False): "'opt_init', 'opt_update' and 'get_params'." "Got: {} {}".format(type(optimizer), len(optimizer))) - if not hasattr(criterion, "__call__"): + if not callable(criterion): raise RuntimeError( "The `criterion` must be callable function that " "feed logits and target, return the loss value. " @@ -123,12 +123,12 @@ def _register_model(self, model): "`opt_states` return from optimizer `opt_init`. " "Got: {}".format(type(model[0]))) - if not hasattr(model[1], "__call__"): + if not callable(model[1]): raise RuntimeError("The second elemente of `model` must be the " "`init_fun` return from model. " "Got: {}".format(type(model[1]))) - if not hasattr(model[2], "__call__"): + if not callable(model[2]): raise RuntimeError("The third elemente of `model` must be the " "`predict_fun` return from model. " "Got: {}".format(type(model[2]))) @@ -139,18 +139,18 @@ def _register_model(self, model): def _register_optimizer(self, optimizer): """register optimizer components.""" - if not hasattr(optimizer[0], "__call__"): + if not callable(optimizer[0]): raise RuntimeError("The fist elemente of `optimizer` must be the " "`opt_init` return from optimizer. " "Got: {}".format(type(optimizer[1]))) - if not hasattr(optimizer[1], "__call__"): + if not callable(optimizer[1]): raise RuntimeError( "The second elemente of `optimizer` must be the " "`opt_update` return from optimizer. " "Got: {}".format(type(optimizer[1]))) - if not hasattr(optimizer[2], "__call__"): + if not callable(optimizer[2]): raise RuntimeError("The third elemente of `optimizer` must be the " "`get_params` return from optimizer. " "Got: {}".format(type(optimizer[2]))) From 686d8636342f77e48260f078efa67e8428e1159c Mon Sep 17 00:00:00 2001 From: Hao Zhang Date: Tue, 6 Jul 2021 01:34:05 -0400 Subject: [PATCH 13/13] add some benchmark --- examples/jax/Bert-base-1node.png | Bin 0 -> 23819 bytes examples/jax/benchmark.md | 67 +++++++++++++++++++ examples/jax/bert-base-distributed-1gbps.png | Bin 0 -> 27447 bytes examples/jax/resnet101-1node.png | Bin 0 -> 25769 bytes examples/jax/resnet101-distributed-1gbps.png | Bin 0 -> 24515 bytes examples/jax/resnet18-1node.png | Bin 0 -> 40504 bytes examples/jax/resnet18-distributed-1gbps.png | Bin 0 -> 19608 bytes 7 files changed, 67 insertions(+) create mode 100644 examples/jax/Bert-base-1node.png create mode 100644 examples/jax/benchmark.md create mode 100644 examples/jax/bert-base-distributed-1gbps.png create mode 100644 examples/jax/resnet101-1node.png create mode 100644 examples/jax/resnet101-distributed-1gbps.png create mode 100644 examples/jax/resnet18-1node.png create mode 100644 examples/jax/resnet18-distributed-1gbps.png diff --git a/examples/jax/Bert-base-1node.png b/examples/jax/Bert-base-1node.png new file mode 100644 index 0000000000000000000000000000000000000000..be7b717e971373f2f2eba2ef6865c76f0dcf114e GIT binary patch literal 23819 zcmeFZWmr{R7dC3qtq20rr63?Ju_-~iyQMp%8<7U-l9ul7Mgi&0O?P+qK5JtpIu@^Zoh!gRs|{bIq~l9OEAMxX1RFk`P2e#6`S!?;eV<5Wmd5dk>WE-Mg;^{{Z+4 zY3y6;d-v3ag!y^p95l8Quec*GAdn{aiN>l}UXwBtjtMgZzj= zk7ZI5!uk2EsMwyeMNly6tBtqY>le~97LF%;&gockbG|MlZ^`MHVbSp49fA^*-4vnP7bTpUc3Y* zForbQEs`;7k~{YFCBe&kRw1BT@)K6I#|YDX~nyn(e8(Gq?de7rJL7o z9-{k+#5=s-^wNX;Nj%CR;yYRbPPNYn z&&57?ReCBdEG)3}QAS53+v5`vvF5V_9s|Ux0raMrz3q5#g<-vMeOV?9aBDG19`=Iv zUco(s#`o@0+oQ+z-<{bG+NWXoLKrh#Wdx*W;}(hM>ssYR%kg!4h2b7OIP=10Sdw2# ztPlro|9GNQ0l(27(qFv~<13?))BirDiHXvg`ueHdfZ)pp^2hr#i<{WxAYF0AQ>63V z7+kE+ib3{cEU`gKuabcK?;X>#v2}K6kCLQw&7xo(dQ%2)8I959@Q#MBmP|d2tS40` z78svUSvi*K<#U}TSS3MT8YNHDGm8NC^!H0X1V*_05*w?fTVjkclt!+FxRvO}Q~(Oi zVy}{&4;!M&q9ouiMm-uVTal)+4j%ZCC;G0tn+E#e%*qw;F<5a$1eov-k1orzKFc)J z-0%CIb`e04OGhX5;!VPA%0}r#ymc!W%v$FBp_6}94hOigm_3u_JJCCcu zo*7%!IaP@N_)^c7nl+7U7RmOh4;E@(-m#ctWZu5vtaDV?XGQTS=(aY|k@GrkZGlhW zvB?mwEOp%Ota`m1Vj=2+4Bk2nJ3fbJq3x-PtPbaZDda*eute<0u_26+?ew0NqyNxWZFc z>ctQk%4t`%BtmVaV$OHfkl}b|DXPdp7y(UNhO?-F!>%97=~sOl13m4_EW3H?hE)CBUuHymOOkF-OMSHX;1u!vLH`H~TU~w6qVEvA zy~n)eJKsKvp)>LYGQUUWYbH#?Z+`U%He&&yer8RM#Oc>Gn!2GOGu-5u4p~*AF9R}Tmhm@cA2gbn z7Rc3M8_jq_z68GhIX(0QjpIVNH<)nkW ztNv$?Ln|cU^7rBj)O;81nSy9GFKGOZtg*I3sy{#$(pI&M{hf;Hj|V8zWQX-iD5v_t zV7k1&4|(2GV!SbWKJOkM#J6f*T5ZWAW$aF++vS1#hFsQYEKBmTdcgS2lh3eIHt&61 z7m4_+KFH%|X(T*tH`7m3{ho)ss141V^7Ofc8(UTVq*47}2N;Kh}^?6YA~G z4)78KuWSZLzh{tdG^bUkeM9gL={{QGlOzKX46t3b{d}{{`pctNIU|Sm6tXw^ku$@X zumaTzy2B55dTC}zwHt1MHwuke#o&%2ya~6=N!>-!+6j>OLGRrGRzmyllk1|nTGwTv zuUFbZYnIGxb?Rf2esJo&t~#0bEheR@o?4RQ$?}q82kWtm-OkrbmgKy9&}h65O35O8 zZa-hVu>099b584cWNXMo%Y3T(dD8I3yDT^r$%fzgSpq7-+ta&cW z?9d09=im#$@^SbvZL*E?oIsjU4r8O7x>lkIP9E(#Wkvm^Ig1y3&TYci=Hpd_QKm3l zi)yO@k8bd0EThndxgO%(BF;Q7<)o$9Dz#DM#^UC;ZGL=(lOI^JcQFH#q`%uUn1!U1 zg23@)*aYpn4zWs8`ahjk2>@AIl>5n z?q=1fja6_tZ4pgz?i;f~AT@ID?gr@>lCU$5ljU%|=qdS>-Tl-#7f2syQmZe*18ID>98z5} zNSJxNfW7L5F4tetpzk;k3BM8Rl-lPk9|6JyS(fCGpL5HUcyqzN* zWU9u1i`%KQXJ4MAo_WIC1}aFj?r|5H91ix5L6sF9>SG$T=o;3)^j9Xmn@y0Xp&vbr zApO!B6>Pl&D!o$ZnaK|0q{A!Jk(&|05OLZgm}{7Ve6L_)9uKEWMzJ|+oK4#nmjneJ z_~#~QKYzI+1lDPpS&aqvI#F@_Tma9>9Qw{1`OYLXnKV602>8@~;^7WL($QY-p-hX~ z4%_otyP}Dn#Z~D{e%BWWlizga$|GA!K%t|OG}5kU{nZ@%l~-h=FVRqjtns}m)wePA z@F?fb!%-6>enva)d&bAM;z#Jt2*-kVuVG>Yu<=Ln2cSWRwadel^z!m;+?J3RU)%Pm zhWe6_kxZbrhVITWi+a}X4FrhGeMkUVa=xHFTnqXmZ;C&&GK10eV|>whGmLpcd> zo!I}Bam*%_p1o*Oy*JNEr z29Y_=?>udh0kcg!>X?jh{Z8GZ+K<-AFru#@!_9f_ zlk2p%OXGVBL$`57GxbT;XA3xy;L})*aMb8`jxYvQTWW~fcb@rNIfXKB(e)I`ev;7_ zen(wmZjX`dZ7y|NP&?S^`1W(9S>>DNV4X5baoeC%k>wr#@A#X}dLq6_Yi)jV{Y}=0 zFSjrFdh0t6?aMzLifVI1Q1j^;o8V)z+uF!;=^)vmG<=(H2QR+xoxu4dIY!{E($3dC znp1CNPPkUzl^%bHfU?<5x1t{F#Z%zdjV<>ua zSk<8NX2d;}GJbLO6{Y>1OT*T&$V^zn4Gi*0KpI2h z<^kXb_)w@4e#1Z4d9*fgFL3YZfwxOc*(M&EA0Dkg075DV*&KgnC2FJX4p2N%to)TC zS^Zp+{+qSv#z>RK%dL9a(zSg%*yVDGt``Tst91sa6V`8M!SlEyqA< zZFEcOrZuRI%H#))l!Tk^)AyAWuZb{ddqKf=TK(_{Sh$O4h^IeB^URSb&BQPf5^gMv z(Wr-p^|HKOs;r~B+9cBC(^ginfQ*oL$f`5ez2wu3SsZMrMq?Oj+Ab(l1K_ zfQJHq&91*&{^bklhcLW+jjtCy`YC!=N2m!M=WNtw^c;t5nU^QkND-dxnR#?ZHo7+N8nJ9Cc^#XcT--E{PsN_tI+_Id ze%yO3TTnJlYa+o;gkDWqABt0iyO(eF;GQXJ_UzVG<(B+2Jg53stziaxu`{AB%F-aC z-6=_haK#nsWOwXTr(66pw5A6wRYmmv>3iR5T^1w;$~M=n*>moSOqafRF1`!jURub$mK0z-Uk zY%K4+=64xQFGq)&54A!nSIksp^ZL5=F5^pYs-~`2B5toibxaeAcx#siJQ|dB4Oxrd z7j$C9Z#*rM)hwsFZqy^?1npSF-37sIH}0m^O;WSq{=$tR$f3TNnlN6OxTdv4vmLF0 zDv!pQ74zsg=+)A<;UBAt#&L(Asvu8Y6dS(5&L1zmka5;6)tuHnV~UTwDo8)=;9qG3 zerfhaBLq;XxEGSEy+m@?#=z}2iant-tbKJI6Wb)x3Ms-W-x~N9hfxOuAbspuv3hy$ zz2|<$(QZw|TIm+-T4Yh9n@jZb_2gr1ZMT}+3d-Rn)?EuY#@a?>j+eD43Ys?d%W!K= zv4ds`M-@ktjZJ(668WZygpQAQ@TFP4Z#!3#8->t}YeY`6*I6+}xa>6GRVS`Flv}=U z3rP>D0V^k(=_h+ZI;XHySPGm@Z0EH!E(CjdqIw#to98eNymI0@TVmc4DB_7x)a1v{ z4C5xa<4@@2z2?+Mo#zq}$N} z@hTnQbR%IRqGs6}*1kGjeg47jE*Fu9BG>C_q4rw5_k>yH$yK@3@KP|WGMb9VQ} z5DxnyAr8+Aml{LY&rOuUomtcP)hrwj4?k;skre$g)g6AFSb0m%t~jE%o~ewb0YSrH zD1mxZUkpAEAO|?Lb3;sjs7F6L8)oEqVSH%C5YZ2t)U&5^`|R>xS-nYLCjDKK{qf`; zsGvRtZ*T9*nz80!a&qES@6yDM=(u$tEAxn@>+Q*T1c5|Xlv3eR*!LAYtAggCEeJ6m z#TaIaP4R-8x!Om(WV$z&3@5uwtUVqam%|ZXJRfj*yiVUwd#5<`E#O?|8-hJy%vyUQ zyr>gHvG)DZpNnX+M`dadc+=Wk&DM$@@oV(i(LE~%JH!z)%O zh#sdyms3qcp-_Lzuo2vL`a?=g^r?MD5R{!uB^}zzJ=)LLJ(C*O zEKGRpei-AS|6{oKOFWWX$QKa?Tl;$+^VRihgk;;b8v20`caS@S^9RFY59pbfhP91_ zX_I7_UuY=VkbQ^}WRjP9OCCnxp`7S67aF%w*dzSeUs$NO5J#zm?ysR+cqA3tuC}pv z%MDO5ChHO!=L}SQ(gAXpd1K(H{#n?=n3e@LZ_HDd6#ZG<3y#S*7vxd4Tbw8}raj!L zIlQyUYg^4LH?AqM=V9coXni|VGyyL+5=PTx|DJ<5em7SLVcM*HVPcz0mQjyS5}0AV zXz+ia#7ytJ5PAGT-O!WO=is$N)(~{!o0^l_m^4qp$t^nAcgPsw3PcYv!k=L;d{>(C zEKd)&ciOcrem$uciR*r#ttKO})2w*9W<>*KMm|0;J_LJ1w%@Rrzr0jLXI_uN_xu)j zu2&yOGyaV=k8JjAxdlV^l%aVu#IVcB{G71UDQwN8`!VrJ3Yj5XKmGbGWefN3J)vZ&PX#*^?V9?#MP=GMbnKM;2dm z6g<0@a11aFPC;*##)q;cO9=8yM&=c!R$x{U_92&$m9uV&-BNy`kSGHCsNFrmg3rma64X$oG zcT1){tn7ZkadxaxiLdaU-|n5sh*Tf3Hv;~{K(1#qO~eYW{MNU&0Tvw%#K!*mfl{+k zi~21Wy-k)5)=e7}b%Av`?%pm;iJ&-AFx^;D zz!>)?Rk)w|#}&WaXZmPepk||R9r=8Aj=oz|;CRho&F<(;{hUnyHLdt;rr382?Azsu z$*Byu2?9ItWOBN(+zO=eGkwD(oIwYjIKzTtQS43u5`V`sB?d9G5<1y3QVXG&A*A>p z@>5N8^t_On=ANm-aszn@2DTNRpuF2x3f$pfnDlZ(iIl&n2(h{K!3Tt@02P6+Y6oj_ zJTm+&Ia}mpT6{9LbUfO`{dGfx)b{3U7ygJOSFEikC%R%32BAS%F9e=Vy$F-VEO@5$ zE5@p)W;hL>fi2%Sqh6B)DH^aJk3$?(tL>Vi6GmC64&ANO#0a5IRH)P#qf1p51mhC( z#W-=vuJlK%>w|BN-E_JpIkOj6N7iQxclKl_iLRvBH`ZiuM0aQoU z?N6%1F)Dm^vwox)f|*!7)zD{v#TsHvuKV@f35auPw|UC#O^!r`*vs8li1sD2^?5-N z{cVe<{c*Q>?eiGo2Q5S^9P2H+4q#4Zm#t8g_OY|VzT^%dzSmVJBtmW^x9Y1k8SKp$ zEGwTMT&z&@8@@p&?#vZDk^tdD%WuVcV}XtjF5EB;dJy+9^q+2ac^xe}v>P&CS`br#tI1a%p5$y**-A z7}TACJxLz|3b9|Yn&wwouyww{wb^%%xs{PXw3alP%7SH&rL`^Uy~IW7bjoqWrnWjT z;snB)vN#w>`mD=J1D~VD4%ZY-fC_kRMwPAJ=pL$?mpR9YzjLHyd(NpFM7J9QF4jB)n}RF1 z+|{v1Gd8}_W_FjI@*avth#8J`udtR-e9#Zr*JHT@{dN&8#7|l>xR0hvz(lGiKbg7%z%9H5xEc-t=% z@l-i2puku3Q{_~xhPgZ;D@IfOi|Z+~tM)e;o2&h|bWY<%dfmRTs;3quE2bFu=XzM8 zX_uceq{HO{Euma_^#J8t=iPB_$;#xiGEIfE#Yt!#>0?j};7pB)1gg{Pzp3~@%z5W4 z;F#qFVnQ5~YmeBYiuR|+_aTY5CP)@r(cSHGzOXuOFD++NITt%-7I#jYR-gHW;`@c_ zo{*@e{$bh)jejxiZpk7FCOJ^B$?|xpWcNoFvu%)t4_8EG*`t3@5$2U@bqZ!mCy!S? z0C0W?ys?8sN)go#wX*N2{3hcwvVN2CITtz>KSNZTP=-w0t)p8k5g!&h@e`8(DTV<{ zZq{ADJ3M_jIn!=o)2F6z{SJjQfCD!{>b#nhoA#K0Xz709k?NQN)afb-DJ#dWZhuU^ z=NAgY-_aUJ!W3lc^tGWT!=gvcbgrCESHgt zaLhix@WhI^$kAm*&-^$hsdS`+r^u7Q*RxMw?qq8k){PX1*gxk6Mpu&TxH@vdug=Gd z$mbbzryEhne2)JFztr6q#aCx7`dsA7QuTvLmm8e#%vD3ZfD4>J)RyJzg5L8o3s%@I zWiMC88x%r~FFgZm#Nq(F1Xc`yOP;ZKYu)7TInEZKgd3HS-Xz6s7J}=FSsB>=0X|Po z@+$i_zTLA_Lz1zdEY4U0wA{Qh;m>d~{dhP|>g^e6!zYWAX9=x+_EB>^DhGbBFyM@x zvTYQUJ!4Sd>w*jRiwxma1?R#{z@a~Y!s<`G!TD=Gkg$Ag$JPQW+c04=c7=$sckGAH zzbCy0_M!$eNgd{S$p3<3TZ3s7?9>LHAzFMvXz4*00poU8{Hhia<7T-8qAtCF2kA{Urj%iuUfQGxT#Cc zupZuQ51nn&yfo5Q;o{CS{*pARPINm6U>G7kQXrvu=9y&j`OH6CN;0nJF)_1RvODND z-8W(rY#S_y=zDC$I~GOp_#JnmMvm}7w*$6np{Ip{aQ$AVFV%(04;R%^3&R7rHZnU~ zph&6t0sQnrZrszyjvU`1`bmQ%v%h$4($!{5tIZXPreM5!V+>7ltJ^~WzoDmK0EY+p zZ9iAwiN6yAMFeBVJETaG&t{PwFS|zU8`7KJjQF=NAUsy!vX}KQU-Ky%AH@RsqOGYG zRf9iNyZc1u^mA1_`f#M@Ehs1-F(L0SbxYA{tEgKL3>UN)kW?6SJMI(ju%po4TAmI9 zchluf9SU!=-iPj1-1{VtnqM#a@^%>|a%~64#Ma{`B~=qHHi5wnPOPt%JOP{1b+AOY8{Ag zL%3bgb4o&lc;4k$?|}3D`m@W|+gWw>(iU|fgt|UP+{FSBlC3&E8W|8rQ*F;?SGVr5 zU-3%pVYJzooYh3f4gUZMxKh|9bc6BC8%G4G5^gzHI`J|3{Ame2I^=(LX*3@i3wfX;mKu5BVhA0)r@SwPc<%-hlv1b z4ZN_)Ae+~_ep{ACWjA6YNe(QH+%Hrj9K-xgAMjDvPGc)~L_UjazonW`E&8}wQ>obQ zxw;oX)h9f&b9|=Bdp2$bh$!>pwks7uAlt-bd)MBoM zaTm3I2iW{_WmF#wq^kq1xb&BntxKwB4$EGj;7<&5_(K8X9()ml)zK1IukxW%M~((hbLTb zb*DX5F!by4^w7_`BOsOhkFFVtoY?SA^YHAuEAb&Ff%1*%lvfkZ=NNy^?e$0~$#WWx zoY+?5ua_2nb_uymtGt!)bhk3wrFwYwfoF}7llGSx6ISPn2D4bG2Hmt-ntMLHUfVxi z>b*v!k@c;(RbuT1IXa}OXalczX&4KN8;e^WI1wAqNRsWfVE&xb=1rvncxNB(h`E=# zYD-B4E`(cmj0W`m-&25th8$g(JL=+`?QY{kgFY4@v${SNmgpKCIX*V&>;#9f8z?D~ z3k${}$}R!_9+y!&w`~s}j>*8x$4f~`@eFg1W}!geVd(>ma3vDHOY5VGkE95;X}NVq zuYyN^T%A)e8(K`I*VFhKNa;@*I0>^W;jTPQm?=E^9A6=K|6F0Z##)d{y)kY->)xMn zi1>_)i8@XUlTHsa+xqahXxouS&T>PWxd)h#tp8ACuC$G^#d_W)b>sMSS`syD5m{ z_)qoZxp@k)8{HhBlQQ{0BQO5s-rT3l%S#WB`*vHk3%wKZ=W8)ynYm3W!!UnrMZyOY zn4Cm$S#x;tU}5&_w!{uS=b!qA^&XP4_0WBfX3&DSDUG~Y{7<%ib#P!{01WOlS7+ti zd`a0P+mX(qK73kJ<6HhI>9Y-eeASs`D0JT}ZkAhUfx_`*I~PJbjXTFqJ12Sx0%ygL z8RA)+@JM~%uIfqzp3>yEudMkmTXrm0tUz-CUUfns7Z`*o0)q;^U1XI2pJ3WzLO+qP zr2{^}-fhr2V?riHLK3Lv?=RO+MmT)Jc63qbhA>JvnkwTb`b+)78pW&*rkk(RG*(g) zm7mgQVPs_Vy!WMjI7@PCb916#%V$a)X4xJwR+Jebj#kqg9xO5t6SA`@JF@XTO3^HN{|T{j9q&Xa1TS?Y{C-{On*`PHk(#2|2}%PXcV z6OGFfc3w7mOeadtcsW|bE{>rF7#Bcumv}UQy}5L{50jK*c32@-VfOxr2q#;F`^ma( za|`7%pT<%3fx-IVqWx1N14$omSbUT^n=0GziMS-gdTaF)FW#qM=32?~6lg+5p z6ZW*yqJV1&;)q+Sx5F@^X=24pU5L@|gkPZ(gOqW~G%+>%6Q+bEIVBID90FoO)r>Ah zG!muu3!rTILSWcG6~l>UbcLeWDD-3w$rc+%u$|v`drqrmVWNh-k>Pv-#;KBCnhcrN zg|%q?{7=oi{=4^3WjEhW1Z)iuLmSuSt}ZWeadB;M$FKp=?>7Js)C%@U_?$91yW|~m zZPb=Bz3r=l5xwtMIbp{{XE)aKq|947`RXH9{gDC&$-k!|^JK;uvNcSxazme@=}vdj0y?!6;jCyTu0^If!nqAc4W{`n;sFQlUyu{dXi}xR4;p zm-2gEZm7f|th(KIXpPc%B@O0tt;f!NflV8OV?ogCTx11S$!*5%fD7Q8K85uX^oFZu zqgoYIJ<~87W#i~1I=tm1grnWU*hEY(vdvPiSC^Iqewgr5gsp!OQNi2ju!eU|)%04V z)IzOyFPBVLJa%)lgB#b^bSiQpEIrjt-bdWo34#C7-cHEv)&rc$-|<4MF%bT#?8i-| zu&K_+y|qt37jGsRQ{`58VKk>0uV+4)pXJObxLEBiEK~G)e>BPYRFV2cSzSB_Pj>F1 zVfFI49qn0&A8rIiJ-sS_KO;6%-oOWvPE}ubv`6hLAjnq^jXStLe{*#P6ytx9wVx#e#|OiV zpp1**BY@L7!$>)F5L0za*PQqMw3!*bnkDdZkRD8DYjI10pYO+wm0)@H!O?Bnzc`)p zA+`BJbu^U{>En{!s=9QlX^$;*4Lv@u-&mZQFs1NSy)ZpjZz&sne zFuVDeu7GMwL(u&b?=L(p)))gX-=Xiz9*?KVWRKjlURTM>!Zn=VSFbAGvE&PqQGso`o#Ne1rZD)*C`jfv7poEeS0ZSTOBh;mdlVJFDU}eUna6g z=m6P9%=WfvQ~sjETm%|30P~bAQoN};Da3Jyv>R2D8dUN(E%yFb$mQx$mEccYL*_gDgJUuF4Wh{g=?(Bs! z?8tq%`Hg}AEbPISpP+wZE627l{17z$Y!B~*zhTl)%8M#GOWAqY$>x-K+(Fu~<)#Iq zZ}iFC0ZiGjr%ij@dP4*kJX0dGgj9q4N6_%Foo>Y}2F7kTPF4gb+nLQsV#@vvEY=3( z>^$Yqt0Lkn#wFVed<#(!EAtYz=rF?iohkriC(4@R)V^Iiv*?R}p)BlCrtVYA+W@vN zfAnY5AH#cmaceiUww}pnDYjC7ZI6PZ+X0ouQl_qZ{Nk)Q@#LiyJ#qHYS3?jXa}QMw;y~5DFWU9rnQetlC5xspe{=(@NsQMcV7pE0Zzz zmlA5b3Rm0w_L=-A<7IUa_`i065;?S9YA0v|V(Se*8J}q=c zL&f72CX@CJT=gufOyz%sa_`sqv2QxK8(uXif@@rs@Py}PmUdGz;siv0R~qXq+9X!e z`e}FUF}<)Fho#f&tH@Xu`OKHHjUz|+btX$bMn-qKpq<$3P18xi>t zo-ElP(lQpFjHS2~cbpV!`X-EvXIwon$Lq+D`%mtoGh-r>11JR`rdJ%>_pO(uSwJH< zhtgwg5YJoe0Cs&v+Lo@#qM9jc{nDnL$*m2?`T4Lf-}~5{j20?Iys+J0+JQ@h>e@`^ zAeT##bMXaSO^A-FZxU$g>~5Ua9`Sde-jQHW{Lrs0bz4x{yxE8I&tQ1<#meN01zxyo zIuS4O&5G_YQNP$%$ve%)1D!Dapv;wVh0QD{)eAwfk2lb-x?mLw4L+;S&nwI^F?406 zpm*~Mi(Ajn%W)rNWS6|1r%KnE36eC~#GONS)tKd@;gj`%SN4FXc?gA~p9oP*;%qsI zj=KU>&t7CIQ`%-KQ@;I)zjA(928Wd%1V*gwuhYGP+<>6HuM zeJL8lo2+ziK@gjjD6-)dV8(;ctJyDiD)u9sr_vle*VG2v_{EeRaJ|Ni*%Y}Z8T-d zcTNK+XH!Ef+SOWNXqevBJ1FoWh-jA9%OBnV=x5aR84;fF8I308Szte9cE-i7X>t=< zW~Fh^yME3P>nA=@sM&Tzcutm|=r|Sg+`Ph3VPIewVpHHA-wOS;5ZwZjbnd1ix;_!{ zhA`*7Ho=|ee1D4_yOW&ra}b|p4+<*EZvObz=`(WFu$$7P;XN*QA zHjZ8vc;@{6pn8)~QNj8WZMKimHtwKHQxj_f@yV3pb*oIyabF@wEzQ>54ikdC%g{@J zR+pH*DN2D|%ELOaHOocabXpPYm6=JS;;|-y_nYltNAvmp#ATypO|_)g8$s_(bKAGAwmk`Ys5GT-l;KR- zRA=Z|`*UwCM5ZF9-R5G(GR{4WHl~j;TLyeX+Q`pVj>>XPDYn$~3t6ZkZvuP8SK#;jW)LSuoPnyRk3j2aaW%5751NXX}t=jXZPq|yGAwm zeOUak{fS_6M6V*u-Kxetp>U818H9{I@zTX$tr#NG9gsr~QpdX~LQ%A#D$1{Y_Y3sA zM>!R|i$}j)0+yd;2`ej42ZS!y#U}MkZznB0Jm80ip7j4Kk1%ZiR>iiO;ob|*AU#yKXE%g@3WvCd`hVBS+DRQb^l|> zm6ZhV9cpsjJA8Y`sQh$L;iG|;C1(mB88wT!Uz{2{X)H7(oa^j9NNJB9;##T7uV-$1 z+idTjGgji8&$jWAXFh44uQEEfa)*N#O6VI&P!4uqZ(+`bItMt?7f5;m648`3M|zS` zl{8r)t$u9dRq^O1AfRXt;f~>wL*dry2ZiUbBP7mGt5oSpksZ^I=%{nxZ^RFtBv|Nb z$fU^|oDql_KNY>eK&;ETmrOUeSO~fWMw$|HgXOSv;5iOK*QT#oqCd5 zKRfjvVL?0f+WtqU-k8j2Wo+;E_BLD~j%m}zSvxld+FU6TZY`kzA##T~f!y`ld(}8! z6nlf#7OtaQPm5je8=aO4Y;w>ZWWRE8+AX9tPD5NV@R@1ve&yQ4#p?Qn2b^XGLGi-G zb1^gN+c#nnhX#ui)tW|f4aS{^H%t9GRdsR|9*P&=AQP6=5*T{5+#~ZXTVaCNH<06A zxgcX#=^?A+>nKv|;{|&07&@@Nqjvo+MVYJ4mGhxua1S>9N>Sp1I7uI#qBl90hCn0)Wagd!QxK$p`1V*0T$h z+)0+Quuk2hXoe{(+hn4{gW@mhQltcVpgq^a+k3LuXe$JjD{P6*r{IL#fCGb)zDhOU z79)FYG+(R+NUCh!vyuIj7Zfx#TDjtL(Vxf`^Mt`7%gb!#W+CRxZilIGw1e&4EXytmu8+L61B|OjUTY=$R8$gwBhm2fC*zE6P>1Wr)~+) zQE_6b*?U!$!j`dL0&JCTW7m*PnC>=(?^}Jh{A6%rjzJ`FYjFY=^ct_rZYT*?xmp4+ zRkyNbw+ex@4!uCZTRSO<60rX)-U(eK%yd4(mPN(HcRM@DheBkV@}+={31|>xmG+KF z6JZ!eI};y-L@Ka>4v0S4CWIu>gvi8NdufQ;)@;-f?bIx6+1jq)kk$^A^=rGPCVlQ) zMlKzEu(D`in#M07U4D?OPkB_JIKBO5n)_{62S>Hi$NU|jTZljEbc6s<7%m^GycmtK zZSEUmG`K(~0ok>W_vu@xglwWAJLRtXB?c+(7M~6z=QGcCj*+fzF6OD}CADV|*fgnz zGipS9p(px))hJCFUlN7dvr&BiNK1cw9trZS0#Di7OUu5 z(BpHn`D3QktQ|6`9Yu$&IVXE|@iS@siN88G1**x6Bu+7_ePV)*R_PBbO?He;QTD@|={XqRmejPC5I8RlM~1IunPNfFiPv|vnQ}@85htXU z(*Tmv0HopXoSc1_XPd!cN#Sz-jUH9_>YH`?tC01U2WPCEd|x(=@YJ zx8sCak=or0nu$Mimjt&4JXnmmn2Y5!R8c4oE<0%<-*LVc9{D(_(tvSoPxugCOGp#i%>os(cT7#vk3Pg7Y&u z_SvJ-!rIKjl#`h+tv&7};MTg;OP|vewuad#@g|Fcq5{XUAr5lY@EHz1w=6)y`6tK0 z)pGT>6n6Bi_;-WwXah}_O)<%>lI8cA_LYkdl;6V@#N@{mZlZ;p_8#rb?zk_g#KoC( z#`F`|9*Jy8t9UAzj0hxemZQr%Z5n-+l*_mr_*%}WS41;@`~LEf5NiuihyBTPB9eu@ zsM4P_n2C$i%(QIR1?7hJBA2sP*@Udq6y{utf8~mNv@>6Vw(;RjC0yyH{A${}Zi$jH z+zRwu)o)!?qi6e{>mFKMEu`0CtQ58;3M|le=>)JyaA5~dHO(E5Kl&>Pq2Nx0|0ap@ ztAtXH41EfCmdAfzflfRjbcdUOOr!fQ>v&Ik2TuN&=~EDpkKPrR{`UY2l6_m_MORl> z;DRkrIwZ|nN@uZ5cYU5eH4Q+q0o;GxX=H5dbahHoXY0Cel^%P|AI_#ikiK!3r2U!f zcA3YE`|sH70blTg0()W25nNM;zsi4vrEphb{Dt+2`0`3R1>PJei@1@Ty!-DD?_-1~ z3pES^1%bct%45F)V?aJaDPzH8p(XHF=JJT792cGV!hZx0@V*#-0R-S7VEKjq#}sb* zz*iqQb;*!EY(RB-j>;Y{J<8ob`;njFnuLypD$CPBr5@+%6MUy+`YbeV{smAw`>FZ< zC&KTU6?q7n(TKA%?q{L(U9)KnJNc+yb_(;qRuY~p?0A+&lbj^Le?d>~9QYh4{7oybI zFSn7HddDX@cT}YpI56!B2s8fMP|K80iT%qrBOltUPCP-VEB*vt<29YqD;x3y^^L0DWm6z8?JKUw%#sh)K-vt#6RH*Udc)Y`4|7b9yc6ReKeV3hEEU zV6y~T5I0&~yb%ok5%;>Y+-l4Lne}hL2JA!2o43C;8Ed-zPrs1zX=_5MRS%T2W$%PF zb&IvB@cQdg=g*RyO&oXo9VjH;F%1i~g1^)hENlXdZ@rw($lY-EtVoVWVz)VVS$*AkQW}=QT4I7Z)vkU8!QvX}?M?*3Up%v4T{mjz%G1 zXsLNruwfwSKyZlmGQRYCh^g5>yKI1gr3@aMJ37e@8y+>IVk-fMwqfcuaJ_WXQ~0T+Xtq8x<+Fw0-D}__s(tg z{_35JE;1=nX?9VRnCRp@3Yoxp0u0xAh4lDI*>b5acgl$t9{!1QI%NnyXRnRu;Pg*`hpb^-(64yRv>{PGPjz{rFbdLbz=zh^8fs9 zkKU>{9iQXB>#?KqBpVG+%AKnq-X=!Ih2)xC&Lh3l1->=BV!y+8z*NgX8VpyZT6=bF z6v;?ck>TEqFGN~~9nwbJ-xe)Vok>bAH0WiAIG2#Dpm^Z3V8Y?uHl&}^Bu;@|0S z`RCpje8Oi1!zC-Ek$m#h{6Z+0j}R`t`@ykWS9|-MH@Fzg^%S;M$T5qr^4xXIE}>5l zbZi>53IY-Kr}}}52Iv)5O^m@kO$NlNGrP82Wz~*y4H+J;2e(2Y=K@X#G1@USQ`M^V zr#D615*MoDn@4$}Ro*S&Qj_r_dx`7PiG;LS2-;nZ`1i{>^4~WRRNz+|{W7HZ3M!1{ zd~_#_WfuI80`b%(j{+{=Z5L1h@BFt1+7u89oEjp1yh#yj1ZcStKm2NNGlNqej=Fi! zc$MMf4XtkKGjelt-}Itm!$LW;)g%p_vug~wvysqbAqGJLSmRok2G=hDYOV2!?>XIr z75we(0 za0BEaJv~=g`d68SXaMwur{fw0e7A6Z6CY5_b<*+4?7lv6hXQJxP5nyuG4p6jzB0|b znlLB`K*SVR8JE~y53F*XfNT7FWFdrAw1cB-z%Mm3xwqb>AMu{51H&dz8|ndFaXmfA zq=BOWp-Co_QlL1iZXJ| z81YJ!I1hTytyn(-DHo$^MAz;GY4&z5;9zyH2_G5O`W-a@_4U|-e*Mb5T|$aqe)lkY z2?JOvZ%D97t)KO2SPL=Z5eWeKcM6n~r2M3y#>g&!+L5|HoJqY=za|{G;$)Y+>?San z7cd?s$p~PJn(J3#z%}LA_knBpN=G0>V=LMP%1<(TGG9Tn zGdJyMN{O{b?Xz-NgWNt~%m~OPR6(20y-5V*5ZgOgK;9rGA;C}z=u4Uu+N%#wmaH0q z2*Bz09)B&(pgRpyoHF1Ypqh<|A45{Yl}AGtO{4bAknV0Cdhz5{5U$qEs~}o8Q(E`* z?gr7@zE7?gtOQ)S4$Sq{{3k0gh%mmx75xsH+r4ay+rT&MG*p*oZn8flkwZf#QTwUi zgbLBdLg2y0!5CDgrw1C@d9TYYl|a5}23=`t-hxN7d1{r>o0kv%eGf}0d`xs>rNMa} z%*?KAUBUVCY)hwj9Mb)98Ffh>ug&{>Uey0!QJ=>PL*f5IbxFwU3Ad${o0s?hJ@!}; zJPOJ0Z+(HB`&Gz|!=a}*eMDdAy3_730K%){`vTP|=f^7%i+HWNi0r$Dwf~;<0spz# z#IooEm_z*MB|>3$XFti$KKbvv{Zybq zMhmN!X`mBgzV7Yj|I^NOzcrO~e^x}oiV{G|D#hSRQObgJ0%$1G5kd!HMGQg$k=|j| zC@e@(K$aE^te_|XK_EZ~3Q7@36r>r-5_)85fzZkKX7{VF^1Oe-`^$ZvJ7;Fj+?l!O zoH=toryTvogZze`^%W>TV7nW-Z{=eJ_fk!QbpQOY_tO` z#poaFpB~>Oge&fZk({O6=A&Ps^feWw+hEgUjB2*S%zEn#Ajs?X&Rp)jHs0?J2-qhb z>s`zd$!;MW;45pdo>m3Q{2R%A!Onzbb|}>9jCB{YoVyrk#zXPzWeaRiEad(Eei&20$6oMGFFw1?6}|os zV8oZ&h4A#Gl|?`!*AjLlja!S_fh0ZJ1Mz?wVmy_{4v`)=ChbTTYoN6o)D}Gz5_8P} z)we9S+#8%-7m)_2jr(mKT}&kxosJi?$?Olv7YSpJRQAZn;Oy>TAV>;9P{|)6F|ToJ z39GR3p0Q6(b!*TpplMQOK#pS292wA`s~V0fR7Yr>0WhgSsDHeMs3|UBCIpc9>-Ev5 zBSwH$+OCO2Y{!TE4b&HF+;aoe+vNO25b*Zr$6|Km6@Ax{t%}=GzXDS-KE)5bctIOp z1K5AZ#2jEyzY{iZjr4LfHa6aq@0V`AL((a+o(jHh=Op2pc_m z`9rxseocFp-K_8xobBei=DPn6$u|5KSKVYVgH>V!tEpOVkzZUo-vX=aPzrr$(7yd% z!jB7nB;QKaQBhFxuynUVtPH(M-;C)J*%)B*#??Ne#`x9>6%0pdItQtD=ai29bDaP8 zmgZj(0fBu3a%RGWRc;#jy;4m~%rg(@3& zw{|r5@pq3{Tn*#C#21LfoCp05f+)ab`5_tHFcYs+m~8~|+9$+MH5Ar1<)YYRKj=5G z06)wHb4dy&%ZEu5y$XTap2Qep(u5%y3E+2}ke{V6@3`2{mmr)W=8^;m!Vj@iWIz8j z0-ct^tgu<&0xx{W$Pkoy7-nJ!^5KU8L?{?01$*MgewrkK%?v?PH`z?5Zhgl(0enUT zla>JPk-`AP2=*W@)%|ZM-~~Ynv98Red?4$g+NxH+(Oma^j(zJf%qMZtH8!VswEm2L z6hb>#-}eLxc^?hgw*&-@(T-wy`^47-5`*$0vO3LOm${qL-EG*Mmcc z%~~l0w^VHE!o`7k)NHw!@B}#^Qu`&v7<*@dR;gQ5VxXgQ<zTgk(8Fd?(A{_J!+)ni)8S+r&@mK#Z?oB77qe<>?ZQZj=>g9{In@g z%??2usOs@%sR7Y0)qM>Oeq$&;-H=v9hz z0`vsuW5=5PnyNORzhc?x2)OKRiY0r~XjXmHQ&D9=0>|G5jZ7iK%!b98Cz!mNy8N{Q zLeFD3k8?MC_9Q?B;hunjpo~GfmCW@8&8CNx#XSa$W}P_=ieqO!J1<{U0nnom^?>cv z(ul=B>=`DmjLubEiQkx9CM1_lafy&;CbNcTD`J++(cU0dG}_t!0`=!84~AmwOFd$*0FC66 z($Qw)mVbPU;SjRcUA!$_yV_dxuJuh(|46)ld_VraJ&^G(iJ<*~>SS+o+0_;9JoH|x zUSP^w1{Qu^-56b9ZegY$wp^%w%JeTyJ>`;XC$2020b{r1AbXJB=*d)Enw^~c&EkZ9>_(B)Mrh$>G(2w$WBX7*15#9 zF?W8)#Vf9Yv9cOfE4p2@(#O(_dsl9Zf9@pLYCfW9+WYT_c60MXpkM%1ODj~@*^J>0 zn6iYs3sBx77wZa%`hv=D);VikJ<8Hb{*3c^q?$SNz$|{yb?wj+`Q>NT66;oFQP{G< zSYMJF;CKQ#P*=$Y?|5mSX7dksnARzT_LFc<(X0y1^7@(XuI8SVqe%t>y|(4K&JjXe z-Y#mT&k^3Umc1I0=T7apAyvnW_?vH^RMf2>YdXekW>~5f2oW|sw*D9zVMao)FsRY) zp?TI`Di~LCgZ5?WvlR0KQIO%|=q?Rjm7O32tzon|gD_c0TJP$%iDs?hyaFc0pLUuk zM3K9aZhYQE>mL*$;CMD$>DD~-yUViJN9RF&S(Zh5%1%Ez9Ca}n6x~siYkb#iF>Meo z?V>0YN@$eS89Iz3^mwdrD*e10Od`}wOzAMOIkc4`+C(&w>93jvX6X~JuF*fL2%Zxk z7M}SOMANc&$j_XA7BPRsJ0%|(V#qK`085;qmH6k=oi0$gr$a4nZw$v>?o&&x2%Yzd z+D_VKK7N3YVAZt$3VJJa_41xpi0=U{qd#tJ)#{1Bl~=WLRJa!O>+yF^w9%LePvB0^|cd^hOz%tmYxf*RpMK4Y|D0s@}ONBo~8+D9DYOAf)z}J zBE;Gx2QutFs@IO7KYGh7!|0huW#H9(Xd}}W zkZ0yM8-_OJHWbDzy&nu7Z47=9fN5lSH-sai@Yu;Z!oA$9C}MIV%gJ}8oKGPt=HOz_ z*lTdVldfJ$Lg zy%$FlJ7bL6($@?ob&cF29q_^jkUMlo8z>x6IOJ{+>(iX(Ui?mOTv%vR-qovp=HmsVA|7IHW#B`fUfY0MBILU+kLf zR=PABE8iR&dXT@OAlh{jA3SH*{5EAIm4vbyB@1A>s5a2(RItP!-Pu+Krk(Ad7LGn8 z==tP>PP`U)9kljz09$5STlE8D2E>_=6!%#*TErqMAofY8JOJL#X`e^@vn(F4?xhyE2?AhH%3PXw67hU-UL&_dK-jdAS|F|3(&bo*+ zAJ3Go(K~QTkttjZ@}^%5OSu+Tvy0!eVX2mSd=TQ=!8`bw&B6P%MWL#nJRaoT7{4$} zMZcZG-g>!{A{z>ZKtX`qC=?{FP71W~%uowAMUBA$FW_120Xr}56_}qr!)xS-AC07s zW~6v>A2OH~N0Y5UP&rn5PG(k^-bKd|wGKP3CMkUKXk#AM=KU~QC3HZ<{iA+xMc||c zCXR_$pBlKp)D}Iz;}UiHCCLL%S& zD62;8hS#7Qwlc>O@%hcp@n`0nC$XpF`%n@!loujfC-+V_bf~wH`TY{htnoITLFw1L kB2RB^fBG-#FNbx&WzbhnxHp>wXg&^8BP&BZ?AKfW2UWhOBme*a literal 0 HcmV?d00001 diff --git a/examples/jax/benchmark.md b/examples/jax/benchmark.md new file mode 100644 index 0000000..6a73724 --- /dev/null +++ b/examples/jax/benchmark.md @@ -0,0 +1,67 @@ +## TestBed + +| Name | GPU | Bandwidth | +|------|---------|-----------| +| lm1 | 2080 Ti | 1 Gbps | +| lm6 | 2080 Ti | 1 Gbps | +| room | V100 | 100 Gbps | +| AWS | V100 | 100 Gbps | + + +| | CUDA | cuDNN | jaxlib | cupy | +|---------|------|-------|--------|-------| +| Version | 10.1 | 7.6.5 | 0.1.56 | 8.3.0 | + + +- ResNet18 batch size (per replica): 128 +- ResNet101 batch size (per replica): 128 +- Bert-base batch size (per relica): 8, sentence length 128. + +## Baseline + +| | ResNet18 (images/s) | ResNet101 (images/s) | Bert-base (words/s) | +|------|---------------------|----------------------|---------------------| +| lm1 | 74.31 | 19.70 | 1798.73 | +| lm6 | 72.07 | 21.06 | 1906.47 | +| room | 90.94 | 22.97 | 1710.08 | +| AWS | 90.20 | 23.94 | 2269.57 | + +## Results + +### Setting: 1x node, up to 8x GPUs, on lm1 and lm6 + +- Green: AllReduce +- Orange: PS +- Blue: Ideal + +#### Bert-base +![img.png](Bert-base-1node.png) + +#### ResNet-18 +![img.png](resnet18-1node.png) + +#### ResNet-101 +![img_1.png](resnet101-1node.png) + +### Setting: up to 3x nodes, up to 12x GPUs + +- Blue: AllReduce +- Red: ideal + +Note: we observe bandwidth bottleneck. + +#### 1 Gbps bandwidth, ResNet-18 +![img.png](resnet18-distributed-1gbps.png) + +#### 1 Gbps bandwidth, ResNet-101 +![img.png](resnet101-distributed-1gbps.png) + +#### 1 Gbps bandwidth, Bert-base +![img.png](bert-base-distributed-1gbps.png) + +#### 100 Gbps bandwidth +| | Throughput (12 nodes) | Scalability (x) | Throughput (16 nodes) | Scalability (x) | +|-----------|-----------------------|-----------------|-----------------------|-----------------| +| ResNet18 | 915.43 | 10.15x | 1191.73 | 13.21x | +| ResNet101 | 248.76 | 10.43x | 326.49 | 13.69x | +| Bert-base | 20479.66 | 9.08x | 26798.02 | 11.88x | diff --git a/examples/jax/bert-base-distributed-1gbps.png b/examples/jax/bert-base-distributed-1gbps.png new file mode 100644 index 0000000000000000000000000000000000000000..638a47335969d7da3efaf18415c6760a6c8858ac GIT binary patch literal 27447 zcmb4rbwJbm_qTDch3Hifq^}7hMM_#lL_unFD@dm_444JzD3y{N3`TcKZ7KpvO7|up z-8sg__I$RXSAXC8{XNg~58&io=Y8UJUgx}dsiCGse~k6mzJ2@X@7}ou-nZ|d#lC$9 zjvYAwd~y!E&wJlKJJQ`-H?+M?7e)zYp+4mItIP1d*PlTPLvcais6=I6-Iq4~c7bg6 z#b0dm-P$ibuZY%Cd##n86?H;N!zw;x*;piLD&BT3Cr(bJdmg_{EOD!2v)HOI#d#3X z%fKp+!`{4kL)Jna0)F-*2P|V`l}`abdTPLcuSF5yKY(BHk2(+hV1N`U{Owt`oYpliYQ#>o3k4VdU$y^j+%SZI~oz zR%zaCD~dM&#vZqM4<;J!Vo~LE*7iFy=3sKQ>0m1R!Ug}@5@9KsRzvUBn2+jj%!}|} zX4{UgBThqy@%>Axd}$XZDviB6GtSPXTIsj(12eIgn!&)Mo2`MVtYNy$RaXS%#HH>H zn}iJc*t?l}@HPpcVmS1fh6?!wQ~GfmCd3nLI)iB#vq?!c9o5a`*uldq*t!MYi^HPw1$2r);xH zo=TM)R!Qyu=lhWX!{64aZjarMl%G5)AsJ24Xc#Fj`!C9Eq9EdxPU` zCv7T+=tQuX?nb6`x0#^?-8wo|ys#AmyS9;Ah_=@m$=$biAL;pUcv!FMR^2QYuHkfd z28-lbF##8%V3Emq_+qb_zCCZl*LMdy2$d;4#&0Fj({Gq1n?J_nVZC-$YuZfjD%kd& z_fd;=U&vqE5;GAZhsO#93p+wAFd2I{8+1n%#F}g+)Mo@^6 z@cw;}DJ%&Ii4AI3lOg&Q$+{)=N|fq-$)vXv9m2Ex>57ZnWWP}1B|herEh#^G+Wm{< z@^7OJVU2Ti^XI27a{5Z8C+MpzT9WgQN#G2|U-yq@9%kiS3Ku?XvQcn)IK}?6iS^Oy zUTeGWU~OTU4TH(XQ!`hU&7{KM($)I>Od$>C#WG%TzKu4-jN?h13Ve#Q^^^3Pud^i! z9ZxKs$eLfwX2f4{_riZEj9QF!_Q~pO9}Z{|pYc`W?#V}AgxhCXdC!zKhh4Zo^G2n( zd{Kllp+0}iw4hdpyT_n>=?dYivL<Yl{28T%YNTS87P$SZ8wJlw+=%At#wST`9#2 zy{8M!Vw5(68=x=Z;h7Z@O6*VDKdQQkx&_SExrO!kSw8c-Zdl`v)ta2xH+)RI^|ge+ zoDVo9{()F&`%Q{}61qYJi`Js!Sz?O_o)eGd7hi3VdU#E?cWP0ieqaTX(3*!}wMP}~ z!k|gV$xngtzD{Gwhh+(Zy~lIoJWf-5oA;9kG3I)XV-=Q`>xw-6*`s&;kd0*1Mrw;nQ8Y(elf+Fnj!?To&6m6{0wk`(h#QV?uVqfS^c zX549>1sY+J5PH;6&fSq8CF}1^Fl=Ay3A`H&X;!fd@D$^oUQ3gjpDr$w!$WkmyPkB3 zHxxSLwW~z**j*4Q*o4HxHb|{=lB|mec_$-zbwi4-Tc-d{`Z2SH3Ct|F;Gv+fnTpI` zqs%tUCa7a=&zCRi{-~+{-Ur$>-Brd&q&tsX*6D3pbiNZhOA1!k; z*L9TjDeU`T`y@$6&+Fpt_Aphf$I7>4`qfjXR*l-D>C5`nfqf31>T1Dhxv!ma1U#+g zR#jHFwhS%jSrPWM;>t8Cqwjz~c^NC_Se6aA0hEQk8}Vc<}0gdKTcHr4LXd z(#nAXp$6fXln`b580>zOwm;?0OB;t~RSr|al+5t-3FXc|DY5Ds@HLhuoD@crkJ>Oe zi1O*!5J*w)=f_X~b1oEXx+itwcNIb=zRI1m|2f&uQ7;R9Eryw&t?2+hFrsT`FhxQG z)_$9+g4aRSe^7Q*yAL^YI?309kGef$I^@hT+EJnqDl}!+fC=9CG32@N-ZlpffqgRn z%RaS071Uw%{&o_6NXDe_|2)CZ4bN=b5qk>sPypQcrWrrHVYYdDGFO(T;Q@lGGk{or zp5&+&C>~`nnF}0*6Pc}py$h1!___W6-0*53W!dDy*I@TQf8N5+b+@~09h{JqHLUF& zQW;Gi|1W#o?i%dv+0nuC0(zljYlBw?Nl^g@Rc=2Q_FG!817@PkNZ6jWZ_-ol9Q?W5 zze-7*CZ%32Si$ul(*5T`0ME)QrwRC)Zv=^FV@7>WmHN0P9{G{`a|u7h-XKv&G8(e0 z)YlW@1ODTOGX04>a*1+ceGQm~-SW)Nz(}^=nSJ|0WjRe?9;!)ZQZ-S9TPcjeR4rwS zIv+*Z=KUL>n?FCehtyZ6?C7LW!^tQFA9bHk!Ou@q#Lds6$W!h=n?pffkAq{)9WNkf zsH$I-!kBuCGKx<5lrlvuln);A0y9wTVg;OMyFk3RcW3;K4j@l)6pFkYJwjKdM-l*$ z2SA`oJ`XXpzxR4GlE{WLm0*ivJJZve!wT4zA9cR7A;cmo~4!zpDghKwm_1ygL`%9gh$NZdV3 z5QtDTbmrM!+pv1Hg{hr>u|U!BVe{&=S=0m;seZ}es9TvZ+AO;-!R$(($uJ`WD`_38 zJ`|OdVWry(xQo|R2{fFgK(Z?$zD~rL#5eIv^|?u0OB@DVj$af_K4OLVFZD@d_1{Sv z<{mYJ>-FnB1aHRmmWmB2N;?@Wc58{`+5r(tgeqWjhiHVh*Mr@)q{@Udj=ihI71-d^ zE=;(FYeT)Ey=y@~|26}vIHf^qR4^ZIaja!qp@WdoOO?+&BTYU=$N|%LZo^`h+|7pM zw$Pqt$eC_IzlGtE-5kM5_wvn-v$<--Jdr@ljqqXFrrf>3?D=r|@-XTxEr9b#1FKaE zsvysDf4fp?q26Qz0kd(05j0!q;&8~-3zHmSn$pW4+>n1=oqw`!L7DuLVYqp2oQ_zY zRo`8zA_-S0Ke`UKSd#J~k97H4)_K8&qpSq3E?XNvBIXy9i(Uc&`j0Apg;*}ZSoz1a zD@vjoD^nE0KyQ#Vpb;i6POx1LuUYaIRW$TPwe7dhCVezqZ(M1GO82x;G-6(qdg7HC zu-|eX0Hb74H#3i_*JyO>Sx`WmgxP5h6dql;Yt@#njz{Ysb84%Sy4#zVGnU%HD-phL zPmv?rGY5-PtDh@$?7)}$-A44-Hk(W)S)OLZSFh#b?QBOoxN|~ttaRv4`s-FX{sEXp z_tyu2|Jf!Q7&ttQ5tdiS4<4FG1V`Bk7DT*_borLOdFE44aE(RlYo@x~PnWyWr&Q(z zYSwipYh$&6eRJ;QxlSB;tGsGrLQnyeajK4&uQW{jT<+?&=43Eis0e_Yc(z1PPcdEt93M+Qp#lM4Y`AifPc^`bu2q-uu~h- z+#&wfYm;N~0-vJ)WR-`eO66|<@@1H8I%`Xqo9Kf$MRAGL;=}=)(|ELzS?``41*XzE zn)UYYdp|JIISmRgVnoIqhA=P{$m1F_KD44R!z`Al8*M}DyU7Ua2RvH2lIjf45aY3(J!(gvrfWbp)bIHg59V4qijU$ z;lMq$<6rBvraESCkZv!O#R|llLw9xJwCAYPSsn&U3I-ZmldAzRP*p)}a7-sKX?n7y|e z=GhrO zEU=4P%cki(%MYDjweO8?y)@Mlco<>i^Kl2?40imG@m6EB1kLHrY+&7s#3IVa?jGhW z=VefB%gX_f;D!D!LF^?ZN?bV$k*Q&vN0mWYd_kTVY~5#~P3@C5HQ8>+Ox5yq&N0%R zcnJw{|NalpAmX2tZhdl=)DwT7EJKq;_+KMD8@qhqm$NfKcN^yqocl{zmvB!NY_1~luT`(QrIrriWqKq}u%c@)n9%787eEZaNn-mg$%`nMq9e}7*tJ9Tn?KHigZ2S*e^ z$HqF5+v6s~nVxlPy_`|wS13pS*NyQPW&jab=3fmht59eMoS4a!|%*&{{+V~KN3(&1vRB3LiOUnUCro#YS8k=kM5e6lA=@0sR z+mX(_@E)x7niw!P?>fJOQO%Kylu}9y88tDlBU92c1MBUtJqDDF<%boe0g11~tQJYA z`h$uldHm~|&;0t1PP|EydZ~qiSo{k1SYG%Yl}+aUHt}6nAknSaG1UQBpgzzKfuC(b zZtvP_P9=U&*0if8L9`bzU22nu0xJ*#>tz`@Wl0mqUDHO1rZ@LwbHebY={VUQO4@zm zhXFQvf-oajy=Me6#Rg{r>72Tw)nS`PdY4PQp44D;;d?i$ zg{SNchH`s(krd}dF-YMGm16JQ32;_7R62p`c4&^}Fmga;2TWe; z!{=>WdN8Fey89WCAGjNk@n2Xd_WeI(!qbo%WggK!moS+7z59?D>mo7qKoz0YH~iGF zbiztaPa@puX!Z9*+?#3|1iS8NVLh9`B)f0l(kBg9y|@!kW5yf<*Xn(Ww>*cPuYaCv z?$8RT_#orGnVs5TrDt@KISTQz$8@VB$HfhPiPLA)-#2A!Z0W4qSH)%CV*2W!5s#P@ zBma!sf{JIT1R48&Es&gF@5bqU`@FyqP=5vI^StKq1d6{ljgB4+ICxWcVl}P#q{P`% zNSm(BEp3?J+3FhNcfIDVmP(l(n`uY11)IBUCc|8x*_#eAZ4nynb!AzLxBUN~BC<`Q#Is2^(Lw*#0Wn@L`UIJ6?IJyjs&^Smu+gn`Tm5*>m3~b*k0Qtu)Y* zY0%in+|U1_CdMYGZrF8^!tBnA0$~E&iJ&BDsn^s2&nKs@sSOH<{fCq)+y+P~&8dXJ z0Uy5lG+DZ~Vcn!M84}GtC`l*H_m-JGcs9d7BB~@Z9gC6fztg5C{`qivxe89Ue8Fm= zTes*&!0Av=P2CR8J7*}=4+VA8Le-;dNS-?Ra%ExojnxFm)3TloAV2nkD@|jzJ8(U{`V4(IO!8i zhUAcCKOehAzx9I#zKv6#@`ilZOcx)c(-U3(SJvqluRH$Lqr6t2Dj9NldZUUsU#W~O z9~zFdQLxMQ)9{`hiKF}TQnbLc-RBe@@xp!Hh9=5Av}wR@db@*S-qkWTT?EXLouEsa z8xENs*9#Njnw%s3L=4{sDzOqgO|+fIwb{=rT*$UnQF%6 zIvdjVOR&2*Wxg~DSlPoXk1Z%bfQkhGZ|V|CK?oF(03a)_7ywpY`I`cKsKL9YgT_<5 z7p8qgV+F2JSN(yLnrJK15Y4x<6evVNLll7U-^$n)Gf zc3g{wME|*f-&X;2>eo%rp+_KUzi(R3HqRdMzl%%@gaH_``}cnei7abNyiP;w{~YM| zr2vKe=Z3R$mRgdmfsc+;kt79Z7H36)-6=uizg_QdTJ^sj>bI2u=m^{(1v+j}RW%S0 zED`spKKM3`h5K)E$|0^y>atMogNn#0!}$GrG*tdS4hbyXKsh4K6ew8w5Dkd_BL@nv z_RrnciA4j~cWU1uYPvuH?B2T%6zFLf^uG-P+|NJefvn_`W0rT+23XUX8x$ixHU=bU z#Tms4c)OFzkKX=2%h1_g9_t6xBYv@^LfLKoZs3yHe$!gulGRu9tMEn`#Y+8hF(8Qc z{lO@)VOryWUqAG0t~d4D`KQRnC?D?}b<^GWQ;F zcr)t!uX8*Ev9h+T;T-juQhVP@0(faM!1E)!<3=M9XVxZkcD}&T@GsUl-|@h<@I9g- zJTaRwgh%l`g9Z!%I9i&oHLW8T`(6XcNYdc?PJ2PnM*+I;`tBMq0LRKOpad%moNxXE zW6RamY59IT;3Sd(cL}6iz*hid(@?BbFffs*w77QKQF8vbNS*3lGh>P24L1sM4HBag zgjyP~$C)XNim6mj`~$wSDR3WdLk@+p`-ixCn|7?M_?A5fs@m)l0$z~Xidc*E5!XZbSW_Os&TUp07KtZFmeIa zP$ff3HS`Xp8v3#fsD@tD0XVQ=x!jR!2Qt95q?Dyg`zrM|vCs;#@R<@j!D#>(>WL6z zoCudfNp_>LNa91&{4H?7V(i}!b(pZ)0oRr{bMjg#xREu2ODEvnhj(R+H5B|QyoWz+ zWiyWe_|q=tANH^|8}IkbANH90J{-ZbpvCyqA7<{oCfCq{o5MogbwJ9LTBtNXr55UY zYGVGv%eC8DA4dCf6Q~reEx=)JzgTD7fAYN#4B?WD_$x6JpeEp%GhJXJi3&2=dIkm^ zyvwXpeTH))m%p?1WYL%|gWvQ(1GdGtP~-REb2R;fHNhGrA(@qNc<7k;XLYYm!De1D zpzuyr-O}GwH-+&qf5S43+uQP}qVF57N#=S%Cnh%ZQTAl@vPl}1S~uYKG3gGNNELCe zIrv`jSG`?d?yDnjE8^`U&P5?6GV%VghR%*5eRiU!;sWz^3;UCd(nNNR0jyatMkB`A zf78Q8caSG30mOw`L_xx28|P{_?&OPKoiIdmr{7ub$V*P^6x^BcR3UTyL(KnfcLSSK z1IO0O>q9){&#s)UD4v>I7EZ0Ugv!ncmbB-L&}$B@q#ukhjDGo}Mj#SB?xB2Y<&P)? z&(AO#8)N_j?=D(MR}!nw5BVKrtvYidHos-$QPEctdb>R*SNimY>Bfv@DQ}mKpzRkg zwgsk}4-G_S(#j5}D8Bfop#X67(b?b2k)H)@_naS4mifi`Q9;%(#{mc^ZPU?!#r?=5 z-usj_8Giz|T4|5vIS`f8*;`21Q@K;0yfXfh!6$;X*EIaTL%}<#e~pJc)mY7hZ#6F`G2j5GmSq%nr+WR7#=ceb;5 z6|=LwRkp98UqwrvNt$8~?PY$WKzzz<6(>c?gDCXk zkMp0U1DM}ytZeLO&!*%sA|qn86o`IL&DFJWlgY(vD<7|OK~)=73DQiP&O#Zs2mOf| z-0T5ER$d4oF8;C~j2UcaO(BFk(5V%ZvZ|#C%++zp7*JAzH4LK2UqV?{@>hMfR+EYJ2zh@KbJO!r3!2klUVw{gIJm4ZvlN81T z#-UD%mj}7_PlPWADcLC(FRsnq*W%ex!z1q$4)kaiT^?vEv)ySN4@i(|DVh2+-gGOX z)mQjA4z~bqt=oM9b_aT>eh4083^U_g41LQUZF1Qx0+GYGc_~hQ`^zZv>rLe<=+_nZ zX@~WSDgz7pD75X_l#=lPDt!A!~C4pwuj>j0u$WqL}oAHj&<|Tfw|k9{#5~ zSb~(>6xqtpz0AaJUW$(g_h3ue{VKfT$Gx(Jxq-_dbsokx3!Tl4;1@@KV_>YdTi%3u zuAAPmaGM?b#4`1moeMv$FkdjoKP(Nc#mipiQocv3@N6hIm{7=2(k&>)tKEK}33CEC zq&-yjoEbQ+_XYpH(C;DTyIzyoybZ%FQ;qBN_{9uIP+!uE@+kAxG2Zo!Htfw-iOpm@ zfg{&88-IJ}o*UnxM)sT{Iqd3u_rlqA;e~IuZp*hj-m+{ce8^#(WWFjD0(NKk#}(BU z0QRQW*Pd9l1siWZErzry8qsWO^6J{TnE+mpRTutyN&6Y`CZQpE%Hj)2lYrq7gbK%4 zE5%#yPV-gxcUPieZ?9@%rr5s8Kd@g=PFvhtp%7DPRtJ==IUiFY*2flQ&47~TjeV+X z$)L5_R;W(1k$Pv#v)hR&PZ67qheu&4emO)lksw`aT@YbAhs|M{h`{B?nt8(PJ#$ZiTn* z#85{+Pvg5{&d2Xnf9)fCtnV^H1&=1(7X-tr3&^Q4diJH)9J#cP-ZXhyf99x^GxJE;s8XZts?DK3Q9FYr)_0VMbt2#^LxA zLJLb;gWo!aUuoIEU7al0NJ=CX_OkhRdx`UtG}%PgH3rJQOu2`Jh4v2zaHkdhZkjDX zao4V0D@w@d%gfmMtAB6ZN*ec5YEe@`$FWrUI;Etc(H=3iv5(A~EpvB+rW|8rUp*=2 zmea3O*K3%CH>nTQx+5PsevF&lKgIDj^5mt9eCS9`{U}6&Hy*XC45Y?CV^L>%+LDKl zhY^|MX;-Gb?b?~P4_|!XJ9Q4IOW|nZ;~3eF9Zcg2Q3!AF6?JYlaB*#kxF3&~$>i^yvVXJ*HiJ!v$ZPj%QY%V(j>o(; z7SZD>qGf$djLZUJwkOm%z{EzGmkuH95H`_6(TaWKqGNUX=Qwe7a3?`$jwaqR+5 zII`XFOlnV5tjrgQE5wejfY{PI`M8R}V17Ko1N-tNNUXCv^KOF@b*9dD923W)0Q+mDz@NG{w3+@!}x7g zGY?eG^lq5PpR@9K^)Pu;%<>JP9av&?_ijcWddPeeVpi%MiPs&GLrwGx-E&{h!~-P7 z>YD#>^cyLF`4~A#b#atDYC zX{SRWcl4!ydd3_7m!*w(^$@(+{J^Z%Vetl%sEE{rxsMr`#a__**1d|65JIpbc^-6g zA}0`X^YZ5i|B4m*w+yF!7pFU`o!ecMi-9uYFJZ+3$r#)*U~K9Knugam36AhPS5&<- zNG(O>t$bXqhjM+X(%&v?pGxhlYxVk4j6d|8U%69t4^gg8fkgvycmTF+Yh$pM?Ifn+ zxGvi>kv{oa@9|rhwA%zor7sIB+?3B3`L6jR>kndy8qrJ*Vc_oFXY%&Xz7MCka~U1) zLUIbZ={rBWq6;?>Zs^B6>5&VHi0E)q?@?Q!*Eff(&+PGNb$+YY=q9pKXAd;;jVXi$Gs@Aeqn(=RVmC#K0fGMYh(LP)R8 zfMvW&E8c+JrS^iXsXmM&!t1u86QC%&D&?hDg;Xs90*3d+N$@B%bPjxb|Nik4EFx{Z zb7JU(!YEQ3AMiK|0YOlEZYb*8Ku|8;#U>@VMt6kT*A-)oUG_i9lsn-XlPchkPi=_2 zDPiUObpRb&$(23pUON&Qg)jmJZ;YE5$pNQ10C<&UK1D(`oK5D@re*mw?=NuViPLtM z+4N-8qbNiTW#cNK>H{=S6jcm{i0r?cncHDMdydNB(PPWttT7Ec|=#^YJeB`#gH0rf#$I6FOyCcmwOkn zo#QQwb)ZWqnbD}i5Y;aTHY}UT$q30MRo!>(*zD9Qoj5(&L|7qe zwsF~~jFZ9KAPVtH5mXJdo0(PvRW?dEX_!~(9N1pDvzGV%+gtLPT1_|Duh1a8ma#28 zM%t+UyVhlYzh}717Zaca5WTR70`ZwwW4`E6fJlKUFe?(p*g@1Y!UNC>AXcf0h%thJ zRScYT8`r+RoZ==mH_rE&_}LHMxONuk^G*}LxVlk;8gT=I;@x8`mA=H*ZU3Qq6jE+g z{;2oq=ET9sDWOW42=6jxB+tWtqB#r~GmhKJ2@Z_yAeXw0jvLgSv_E(??-&U)@{HCEf%;nlYWJkD-?rTqXD6b|MXU?zz|OK$tw0u;|DBkcJ(ci;uyaVY zXGE1{)Qy241WdWMWyBe2L!B@}bHg0N$ha_9Xh zDSD?OIWJx)0l|cvxxAONhQ@)3PJXzK^LXVqk0-4;MzuOuylZObT{K8zkmO<852>-^ zl~I?-2{w|voPP<;U2G`RePSqKx?hTGtU?ituC;U^jEbza+WKya*B_Og@+B;?7K%3nJMsqBUrZ6e=lbpf9osS z>B_N@MUe|d%6ZayMAbZAIgv;%8>Nzw2&< zoQRk&+O;wU8Xfj5F6arB!)l-d?84M+cp!D5teu%){`6@zytQA+t|~5@P82yB$~d1e zK5*6$}avxCS!$nZyn7%S7SI1;A+a&p2su4h}TbR@UI8M$VmFfd~o zfH)rTHjE5Mo$sMCUW%&~K$-MdR|WyUeaJS(S(=cAafLhX215?(5zn$GYu;yg7SzQd z)$%9*2ip^cFF6fCtfmhYo0&xkyRS4ImixT+LSs7i-FgbynQXtHF8@~kIrQtvSiSHi z4maRjD*m7;BH-BeAzwTIDCCtT@In^UG!eS>shGif-8sg8#>HhU-)$y=Pr1P4vt+=+ zr?^@~*PN#kI;Apt94CbH3QBOcbCgSA6P;|*SZw{GWyQVp8DfbG`5FtqcY;3oiijZL zL4s?-nI@!7Uft_Wd%K$tU*H=h)`U3nLR9Z!A7vw))2Cc-e|qQ^bBJ_B0+~{CM2j_M z=Je%l+z8u9lhFN-uA@(fG;5qs{IK>=-ra6EfZqBQL?=VzyJ`8}cVP<$WB;TfMKlGoBa9?s%uL)q9%&E~ZBg z6+rql_KbkzB2Iq~dg7D`KGpR zc3oPksR4}QV@m*jLN<*tl@pj-0?0*NK$b(#2eS{8bLUV{GbNAC+9)7rzRXHTYr4qoua}Y0sYl?<9FIBl!)p$O9EH(b@`EH&@3#KhwcKkbnsklSuHQbqOqSa zMM$_={0=ABwezMGfPBOYI28EM0{~Tb3rRXz79=sO(pc&cN|>W*&_}RCP1(7{rzH%Q z@HMTclP$~#VmJx+t=+h?NyI*rOIs(wY)1C9ILRqds8=gQ4)}=&UOcCffF*Vv_yZB@6J7Z%+`AMpKe=0 zUWL1z#rbKTX)&V8nj=8ng@)rkwP8WQYyNnmsL}FCdB&4MqJiT<B`vsIWdwyR}SR2;3R`;4DcS9v&sOGfKozPw-ro%gd4m{u>L~Z`k z?b`$1DhGB{yJrKA8$t4VFm;S=C!?D+3z9udQhXjoBK-;Bea|ucGY5H(L^}1#_Mc2 zypBYi-+xFNZmE=jyS}4}fxjX;I)Xn$Ok@V$($3pU*nybF^N>D`(HZE-zYa1#3n*Qq zGKe)dyh-)=@!aUpVW`ypQS*4-X$W(v9}x3B zTvG9~ZVZ`$SXjKA1+le7K4M8B7y`fuyWfob(*Td6#pd7{t!EZn@@yaKC(}=IPc+8e zTs0%44w(905q#K%*tO(noC+rRI;}5C3)|tI=|YNayzBnHg{lmXC`4nQw<7t%D$G7p z0L2gCN9k0U0_@{yB+n;(9z}or>w#?f{F=1~@20t%)2kD=R|C4c=oSMJtD{xX=K^ko zoX~S8pH6aa&uG26EV{9pP@bAau5 zC>WJyh&w>=fJp>*7;kli>~CjbKR-J$JL$@UDPn+u6-?I{&EVg@s{n~*4ivIA%`zn$ z@`Ub$?#A%f#o^`orTNSHc2~XQQz99=c@!UoQ2i7@RZ&G>TgxbI${wC@ro1$rz!_YB zSu9cY_*z0WDJZF%9l4XZeVvPotVq`8^E9j&9hMsQ%UymK*~fM-z0o`0lNkDJtUCh$ z`)JXtrp|<<(2uP7;98A7?~Jk=+rE%|eLVdO!#yavXO3;itU?($f_~UL&FP|2Qd@fC z`rU(p<6iaMNu@vZwG>*_K4GUZ40(aOV*&+h8DqBQ<w)$x-owd5Pa7EJr;csoN$yF6VXIKV%)0wFYfR1fl1#5&uruvq zUU#mwpCsx`5%>$DQms)}X)= zVjZ;X*Jbhw+|qV0iQTt5Qm98Ct#UG3(yR9t8f4qQ5QOj}BqnaZl#`o?VfW9xqqx4= zHFa1X2T?_>d)`@i-OmPr6B5B}vi>=EshQ7%D#H$FmRP}mG*SabaX&|d_!|QHIZnQp zzgkn9IeQ!_?H(gNL`lkh;imAlH_tepBVLl)UT$XxptKm&qHr}XjT#V1N8N8Hvwg(; z_h%P=Hjo^3R^AB61P?9k0jT*&OeE??wBOjxvBQ>I}+i6y>(<%m{_dH z`O8Lv!O2xN^Cj9X>q%i>=*aPM56qXF*6H7JAnK}J%a#%pe%~z;Ibh&1301L*S67&4 zx?(Yw5i;rNRB`o4eN&TlCAS^-_91>rLwmQIiT;bu1&b~C1 zY*ho5EL)Y8uJPj*kkc3c&PJS_=Ss%JXVEPMAd1MZlsgCXUH`V# zO>uVl6qLx_cCcNGTD+vVPEAT_0+>$^2A-JgdG)tRGYP@lLgJZi8J?GBRxkc;e*e3f zrGm_fQO(tvM~=a%OJKVfcu(GEu?W&)gQ#Ol1a{+jz4|aaM)?($Wdu+Q1rqPKXdz^J z9iuB?jz!tCeRuxD18A=bjsSnU0st#kdv*SV-ImP=n1(&HNLJ>$nj)G@L?!4gj0xO5 z>y76IdLk`6F!2T?ITQo)5=3g|oEDYj12O|3I6mS@kI)mp9UN>x7cmI7S-+<7?^SQ% zuWI`#eNaj-XvsC)3UzcZ}x*vEK^zkq_lf7a`WC5z?Cvg*`!HKp~-duAN0K(yPtmj+`}N` z;6+b;`sNySKIp?d@Io_tHFA`XHPrd!?Jp9Kj^4XAoL9g|2j)Hwh9y}VtbcE@CSE8t z9oTJ%rWN(5?zG{?U3xu<_K>@Swe`*I2m8->*+$31D)_(Jee9p?+uYqervty6l-Aj> zBv_20y}=Zr*lQtQ!+sX~2tna!1l*G_ajwC*u>Gl6xQgbEi+HEssESKh)y@;LFDb+Q)9Q2A#8^^IHto0-Y?X( zFz*-7!IsncoUD7_h}v}a6cBc362Q-6rCK@|VPr_woM^)b zmCo(+d^a%$v;R_-7>C7>C32VU`nW1f)xT7%H0>6#nltUoJ`qI&{srj5H#oZFwOXjy07QLHLjVt z@C9|{DXygm&JGw&V)s*Dco+Wj!aEPBSDy_k)`T@Srxi0a1(H|aucnPHxEaHC63}tZ zl`|6SIB8vSzq%djwUf_#ll!fy$L8TZ=m93%fNw=mr$G1 zZFv-r*8@1oVl(a&=tIZ}IW7!bq6s+=02sA?3A9IUlDsYg)JK}Ruk37h=s6L#GG@MN zuZx>uQ(nf`fYKmN=Hy>4%t;ScmoBs^fEuK&3x@-a-qdZHcb)Y5ejt-GplJ8>ZK&n< zx{_Uhf%eBJPI`&H?G#jG$gW#}>x@mwsK~5UTQ@!a(T`l>kBaSiRnvajJ-4IhGcwD3 z?{PbCgNWj?*#SF0`~0n$01On6XNC${F?MQT)J(Skd401`d6iDMT>DI-vhJms_%(u_o>My9mgJ?gkOeU@9krTfzZLQ!dCJ;b5S!iEByq7$>}>;70et``TLpw@)G05u^F&i)6KLxnZ_z{n`lLqh-CSC=e-&YZ^$ zK0qo%h(a7r%p6d#i#kaNYFrYU$Ft4OkIExjB}xq-AGRKDC_$LwEAypn@hXbc*Al5T zytI>3E4rAz5=wUoxhossP2LKlJ1oM~o>-Hcm-#m`CLyl+y4KeDn)d;y z(dE4!bQBw{2VIHUgRZj#vEduIqJ$~q$%Qbd@BJ|Z^RqxfSQ?wC8fS^h@cx^$UeW@I z?9ixIutv!Ya$j2vYTLgrZcH%-Zz>TW|B%rLd!r_z$FRgL;1z(ybfzIVbJqD|UC=c+ zE3~#2J3Pnds;h=ecoQ(8qeI3L`Ul_vMSc}2T!Mw{qta6VUk12jN|GBh9fkPuCl>-B zcxuNxoe8z$9k=TtEB_a>-CR{5gmYtL`Zh(b+^LMC$*HTe=HEb<3Xi|eq6@j)g<2qTGTL|M_`fidJ%u6OsN91oIncbNmDA%7h`S=HUDfmW0 zK=k9IEgp1sN{c8-YEno|BqoKCl#Zn4BFjPbye0d~<(MI+eA=_eeA7bd%G=h*>e4Sv z`a}Jx?+nl zk*?_^+5h6S`B=A9O%0C#s;+b>lgITJI8qah=pioHskKikdFVnG()+lfg)n6)jiC%; zZkK*iEN6D_Z7bt>oamX^-MC&i(IG|Xh*NRA9wn*)?tkC>&(e3d+sdFa+HF^;y`nTmf&$$S&iV$p#NiK#ooW$@I1^Jx7#P_`Enp|&R)yuQ zu?3X`ud-hnvI;7HX~=SPRRb=O@??7ytkjU!RL&7pt&%;JdGKT*BeL3!@3OjCk<@5& z;jr782nJpGdmtFlVMR;T05)g*v6NY|p){6wJ;rHBQaHCFxmIs}Chz@!Y1JBrlH|F8Vc3@dhqC7$3*st7~?_sJYjI~qr2Ptz26~yO^8k_Ha`?f!e zH3qmDpiPqoIRG^W`jk5J={*Tk`)6A`g9!xnIF5ujtNqzfGcn29@;5mTfY-)At3HjC zq3i>AyPJ$)5g;cQd6^tldJ$zC#iekM%B}t9#1NKY zd$_TDJd3=&pBEWVE}H7PzAT#lAbYaCujsmu{rhxDA%4wwo(+>+2e|JY6S&JV+Voui zr=)&iy(+~&f)_e9eLPg6CZ|GCTDr;PZ!2|&*p~QP!jHYF+!08uxd)HwDw+v4n7FCN z{qplCfW-Xz3+SZQr=8mFB5lr>%Z@l6m7(;29QG-_5wt0^)e+^4nV6T~yt6=zdPFJA zUmYTy<4U5Tv;T1{$wEfeq z((Dm1kG-1*dV1XL)dU)ryE0pHDUEWw@2YMUj#$-?tyI=QQQ?WF`76z@n`_^UU4*Y_ z0<9~+x)jFp2XUFgI2!M!vRb1JJum*gNxf!q>|5nM_iIGQ<>ZQMiw-ScLicy8!KCgA z(<(JU_RCmKOM)4IlCqmC;Qil4vClA^g;sx9N;-3f{Wx1?A?C^hV*HE2sbKn@KhzTl zR#DF3-8Hw7jPpYq%W@va%v6O;`KT2;ik<&B$hd*!d;_q-8dq0Z4eW435_?wyy^KSi zTxP-*rJ9-Up(vc@Ijz=dKEe2!h1^B>x~6^_7zTmaJCGq#JM(^+gOko?dm2vt1~glh zyrR5702Ff4Hh@j;;Y2a>v*0r zY|Qn0V`~BD-cwtx%J}JUE2C#A|JnMER<%qSW=%DYOLT+|=U(oiv%G{OoPu z(ZFyDlILIBJ99?5zrW})_T_h6>UmYgC*eSn6$tPwGW!FxMV5(v7T_SBhzWqcs zw7CRDSC)MmB@3Nm%1uorA7&G_SPaWE(_%QU;G5jAz)Y)_-IdG|l8<9&m3d%gyj73s zmrCZ;mucvVU_w0+LG&&0`C6~HPA~HY(YUSz?t@T$_9SEbs^IaVGm35*vpcd^eY-x{ zN;6#w$M{nUfHZ0sDDqM&Q4(YC7?By{0%H@zo4Qll`X2!<^_=7@*6 z3LW2A?SDAJ?$?Cq(SIf0-Lc#hYMxu*fR4;wU07VYDVpx;B>d4T8j|M(3` z=tJ&T`L*2@KY$H?Dz?S|U0mQ_P|u0vI%JKiZ;`g7_^KGI0GpLhQ7thtqMn||S4~fe{gVnMh@uwDP^EH1$A-#uumZzdaIAma+)Q1DbW!lE-@eIYQ!~>n|EZL z!}O!)FlWY6m1GEX+w279rd6I4JHuCbpmCYj2}|xbBTNS^!r1~80+v=fEQKFTL8JOp z-wucmlLxDS`o*6A`_<@8Ywc^?E7kCGUfHCFiTIbO{i~8;&3t;H%O`|Hu}qdX=X8cS z{AEL<52v+h-T30;*c=w5G(73{w?8r9Ge66HGeqF(sC>wQT`Px$N~P77;OT};5$1a_ zT*uEhO?9nYDqo|AYsE990qy&uM_*98V$asGm8WaM#($5Jl+wz&nd9W)8ZRyYaMW** z7n5eq7^X47WU+>=tY9Uh{OLq)1^quRmhhc$kOh$mg7s4UFkJ`-KsK2V2FTW-+>1I=G)T|_A<*~rqsi=!lz^SW_iv1 zu}pSC69tnR(9VQm?YBFE)U0y22}N~zb9BMiQvj!9>Ed;}a=ncgn^3dyJzL==W6GT! z=WeN178vmrwNn_D2faHihsx>(;f_N^@#cc^@M`N}ZU4fU}@~NMsqw?hb^Nt|xE?GvD}Hx@0V{ zI~4k@%sMS2=R_V$zK#GaQS)bK7mU#7@<3vT^@Q2#0m&!XG*nOOWbPo$QMkmW*l7dt zUZfCP8Clhf#;z;1vQZUVKSC<}!vcm7U=b#sa;CJon76EhjaSAN=NX%mIW{sjDM2c) zQLWV0r#@F9TtgIzo8K&{CD*ur+w|~0{MpHToyo7dWXox_0bs@Hc|3X2>0Q9QQWJ6a zR1tIXJ}GrmcoR+bgF})ejfEvKRi%%!(c&Tp6cCAJv_$z+SgnrK?wTGP{6D3X!tpxw)!ma zm2v^?>~(xWbqg+;Ti|^eF(KzJU2)|>Vo3U2QAg+lq$+sA%-zg#W$AY9BA(7mWr4mE zPt-S(Ao-fTwZmS*mohZWjoA8i(W-1aG6gB;EHb*>W7ZS0Qxop_*@q6_?hgO`aQ962>`($@A7#eO@2dm6+~FO%eBS(Dpk>8QNjb zm)v+hw@?sV+vSK`Fj?G-}abYB(@sSkN@3yR(13S)bJ$0?(c@>rz#yHO5G zW-DIBmrAA?K!+lU2ic-RAD#Z~uHU9Sl4u^ekeo*+X#xbKq1juo9x`m^QJeCmx{Z75 zcK1VaN-<~s*9N$1w81>bJEcRUjw@5Q`@mQpzK&JMgh6Z1j9>v}0T(E($;F-Szo9(O z`7MGr!gT}a>Y}djy2#ARqgK(zX63f+6HkCJ0UQW~Dezhzzy#avM!3y2L0xfQ`I>Ub zij&{`+-$>*9MzFSR&a0$YPCHrcl}L++tW>X%K+0Co5aPe$qTIv5{5QkHKZi2?jFgl zUfPVHI5N48Db9<(2KkYyyvb7s3cgA+p4870%?3|K2`rEU=)-E?`N4u&r5_rh5b@q6Q7d=OcM7O(!lud~|sDDR7 z0?YmB+INa}UAU!~MnIu?d+yGNb4~z>+V%axr8YNzqCD+Ci7JXgH}R4x-pn+8ZHW?F z*ZwA&L7x+KC(FH4S9eR5e+zo^DS0hMrEkadbdj3`_zV(bjvp1!F7rwJMLho*QO`ls z9I1Kvw|{cdf17&pRV@gRcX>{eFBHEIaXbMv|Hj=OUH{3TQEwVD9{xB%EjBy&8})xu zPSq@2wD9yS@aWD6GvfRk5%R(C8fwC5{@>U9h5UgJ`+tX@5R^W*9;hkLuMYd$eE>o& zFAFRL_DU%2e;rUgvY#)*{V$sbm6c*cY(q z;`pKkkjnlelr@t9vcf}(gCXsx0Df-yl`v$HBuFWP? z0BiCj8!(uE;V5T^^)()$X0Mpvv->BN$lIfv1Kx!_6@r%ks~i3=9^6)xw8ks60LB=i zk^h_(BdYf^o_{+lHyAt3!qtSmt$&jrkfZm!SiK1sEz%awq)EAiSy*qkm6eR*wLkDzXhe$6AeQR;md zmMHO0P^ZerGsF`BDwi8w`y7?0dFC;`4JXkBpYu7uSnI{L?M2v^^$KYNNzi8|P5llg z_`|#HtbyZ}>48)D4Ory6vYY$HSBOQHTynKd@h2jWD+91n)}S7R@MHA5mqs43B~QND zC*aUba`xzA!JIWQV<69?^KRq^pD#A=gTMSC`SNilA$e$Rhi;kC&v-h=gbtB99;=a;m#TyqzIS-$l z@g4&)-rmd#OBLf3Fb=LH8BFk|k74exHmUO(4Ur3clQrcR6AtYCNLcP}I>C$35Jv(vfbrV1j+Y-0MBmd|_sj#O&5Vt#ei{`JRPjlNcvWwJ zn{}~btCth*2eth?sqCibQ_xWKB)hBk=+ag9UOpc}l}bxA;Y%1M5l6Ro8yfaTIT#{? zOAH-2ah{R?nAU*71@YIl@K0Xy#bM+%@m#0scq9EN=Sfy5*Jq}Q31zN1L za(stL9b*TteC#o?@4&i45sr9lxGajF znQ5(4t%KmV*^xKdGfT;uTG^ggJC}Ois(9>17ulZeiP^eb|AjC4rg5shgDaSB`8X^C zmyZ!z`Oanoy3bcayUqMrwqp9I8nvz_mK$DAv2 zB+lp0q*Z0E|-Bkk{3$wRDJt-L3?YGy<@u4Qv2wZ$6U>$Dv& z-Lq8Z=zOU|ANiqy?c2f$Q0ScgazAs1wHk6%kVLhfCbSSg$76(=xeh3soSB*CO_YSo zOWi016`T?}9t-t#(RvVpUKf>cQ(25psc1Tq}KUdAGe z3Cr(lEMoA`vg%(`>B&MU=k(vLfl66$dWgf~$U(N7AbBg%|6y)(`Hk7!hsCX2BTdo} zFUhk1haR=J2}@Cp~VmR(!g#c#a}~+?01xEAU2m{vG2f zO)9c7d+>-q9_DIhv)0h zm0+`Grzunzd-Q7Gl>R7O!E967YQ=HtB<0LvvfU`_5)gK2N*+`@;fqe#ZXJ%yYUB=AyE9{DLuS_HK z@5Z>?yjiP~Z9U7Ux|gD`wfy`RD}j>Pn7NnBCPGpztwgy5pjMjTig0&=`6QRsTjq{n zI9p?yEgD>`OkO4EV}X6F(ARaVz;id_SBPuWbx7AGX(h=|*PM#PR6j*<+}Oo3cyxPK zBnf0jo|{TERJ_A#@Sk9 z&x#K(Q&TLirUbNw!@q7!n;!}md4ILxLZe0bMIbkd7tnW&OB}JVbKenDd$ihk8pw?f z{ILmG^_M?_W8fo=qH;v#MjcM1@gs75PgH04hU}7sMPPxq*D)CW_{+z(NsRvA)S zZh|RPF`>7Y2a<%FYrGJiBu!W)2)mZB3UuPfzn~-T41Ioi-?^({0IgK6X>>J~jb3@3 z56&^)Q^?m?WBA{R_AzQyRI8OJX%9$qy=RUX3j>kRLZkRXJ$+wWBO6EO}RF6 zHsh+Vl?U>#`wiyWEG*M4@P0>4BLcc#2CnwEr@BgI4_n;;uZ7B-ufB3r*(_-HI7Ul< z>-_d(i{VJ7(<}~E7G?(EIqT=uo%$f?cNqDKnFNY|kml)>_fZkZXdWm?Xtts5sCmH? zTD|I>7i|uL?k5d`?Zj*;h}-asiu3`OrRSx3M5cFY2}Wh@nVR!>KilW%P)MbH-hZ3N46*5;P$<)U)uJaDI^DxN7h3dgf)euI&1^dZ0cEa55s+2=XZ zcn3QZrtD>4qZ=JB8z~ri8xy4Q$Kn-182(FVid_#Fs(a?gvqx!>D0{?NRWV@4isKtu z<>l(F%>u6rffZzn6C3ZGljn=;Lvp!yFDb zM%Ek;0hpa+yqddW8FkOcss9^h{;LDMFO&9C@((+`?rE~9>}};h>532bId0Sp^Uu<4 zyS8Pj&(RXIFsbD`4(gL*y6m=P$zz~G+G+E6nJ!1gdJ!(5xV~=kl{NjiFCU%3A17Fz zHaG1pDB`hu3km~sOabk8@y%Kz{tr532eevk@9S~yWflfAOO*U4Wu?;I5ZtK6Azd-I z@L|3o8aKlb<=vphbwg8kA2z{6EgUGDY-0M}Ltd$+-F1WTR?blngveY}_eP+LWJ|k( zPPMr6pw1&y(t0u14h?d*#JEsD>dhld8a~D>8rM}3G;$nRTb@gS7v{=Sro>IA@L3^KK5gD43XUxUZ zyPVWY zwdlZpeqh?}oOO}mX8F-vAtjQIdXzYA=pT{TrFbkaQFcKVb!bq!CFerHO~~^XrT#$& zd~I=O9MujDnOc|6L;V>;+AjU z7}m%4f`_+l^l=d+LrVR9KWBDKhkyrnZ`1nM$Nmo`N98@DI)WSJ%O-xMeJ8oT4~ecq`Y%R3zD zs3OjG{7oayK{vpL3 ztLr0IR#LjVr36H}ySp29{MSak z&wak<_q=>w@WCx>%{As&bB=M1Yg}`Oy;PFMzDsr&1qB6LRz^Yv1qDqV1qIdkHX86h zFOTc&P*6-AWhKPaJoR@{Lv--F5nW*LgkL{_v-8rXm$UaB)K?q%`D1qp_8l}d(ZW|m@|~uX6=M0rcPdhNP)+o zPzb}Lw2=Qm!9qEp27Wm{L`9$?KjMlY7PkZ4NK0|~0|M-isLjA6vjgh650!b=>7=A3 zr8~0JaS9fv^DvM%%fHxeKHVdRumAGWy?5u%f$Lh?WQq$a>Fl;(L>QK{IGesaVZj1d zx2M;;>(NhVgsL{kXS95Od5uHfw>;mst158MVAG#)>8FtktUQEMz0vj2a$Z}|M0W93 zJgmR5*8Y%sbXIulHS%F4&(z>;>(<9M>!+?k*yehxWwq7$YErrm(D*K=2F?1S-1xgO zGXfUh zb0|_m^T9Oy7x^wqjsk;KM01M~Bz!K92stDxt_ni-oi=LxaxxG1vsJ!jr-=vA`|aKi zmlp@ZA-g1%>bvJ0+}zyQ$cyV+zzA55GjmR%At_p}x|a1+CdH zRIqoghR#;~TBM+;s0eva3!C8x_(D!c&QoFclRH~@x@{5Nr{1->f%>)(cFlLa;b_j) zf8JC30GP%|OigW(xG`FRe!_5#kgW@r`#g_nl0xRZbXiw!bFS>S(GNuse&kG1wUEie zW@|m3iP7Qs6U zF7Ayk-2a*f&wtc0JqWUS`LrQmp`5Dj4wudda*4mm`g%ews@~_ZSb|$KUM4+?^E{T^ zIg@LO{gGf!8I6$Ovu91#6NaBY)%`Z;-V&79nohngVC<|(wsGBfTTJiwn`k?OVj$!O?-=4`}}X{xRS}ndgv`A1qiB?g@JV4e1DRRpm4ptTsz)}iHTb$*WhQt zy(NP?HoEQLi31ie7R2@IkpEr2Aa=jqN(>dFYzR7gwv(P};$p8`gqaE9Y?@1Yg!f$_UADx7gpYr%bYKI(|NFTJ#Xgg&|i3VWHZiF zIh01AEEMwbJJZ8QfH4PtJCz92e0AKjP{ctP)X;}-{MsqsTJ3mx{e;B=#wE%!e^N*f zM*}vTHKfWTv{)_3W%A5uHT=R`lKY76gVSFG23UO4i_QvtKQ2NpY*54YbJUyK zq+HNyZ0%(RKPb8Pz-hX6Wf{4`F@d{K6k1p8G>)nL9gviwktW8#PK{Bk*jNxhd8M1l-3L!y5^%( zJ-yQuXv zaT62Irh&D8zP9J3e}Xmokg@6FFf0l^d6{g!?5n3bRi~MY!^S*9r!ca_!liiigMWX_ zedMf5J{&6h<}SsF@MmgO&*^bpY>|!sVyV>3)}lyQ?OLs2yi*aYSJL{EMK`i*m@NU0 ze9gYppcZg7A>4-h#K5cuo&Sl$i}AK;Xv*HQBd?8?wn!Ry_APmWP*G85d_L_jN58c5 zKsa#Qn|;$_SV(_-u=Au6r(*CjY93ux-rC{_6F)f`pf%G75s~gguKh-Wfgt6=8qwL^ZJYaXyU7u3K)rEc$+V0Q9z{Y$+gvUFu*j)dF*n!I`~#9^RLjr`7X6}TGCL3D;9?%h$#P4c zZEjpb@eBu=Uyx=Uf$WiF&x*&f{pDYWE$>gajKlILoR#bLow5tZ+sc{QU_-$qJaH~%_C(>h zC36y8_>Ud#pE9zi*@T`tEm4^aShCh9cZg&9^b}Qh2x<-52}1}wbssP#@*LsbBoaf9 z*5I_m1fFd9sZb5cEt;I17qspwfv1eAvD^J$LKxe!ZD@YgGPo9nOZV72fingZa_luo z74j!HLP5RhgHf1qWWO>8v2JqJ4%>u8uu!bI+vaG6l2pvt57qmfvi`AR{Smhre79Ve zq$(Ft)?G^xrOW(zn(F%re!bQ49&C1(*yP?0-=&6USI~sr=^cJ17@p67)5D@_4{@Bm zqZ6OKE?Mn&Fkl49$#nm)qZlR(?;1 z>03HR230UA^__`$(>X}%Em+^WcX@#@S55^sJxunO#(jI=__K|nuRCAfSY(AYw7HTO zYl^RYpA<(d%8RILbHbG<_`2x7rp{cXFXq+c@1aJ``*P}1GEx1&uHAqzYqgpUg)ukP z_b*3%c@AtI*mo)P`M6f+{*(dP{HI$y!)#TmPtgazuQY^uYIk8mx=%H;wl=I!fV z6vl}&1fAqCoZJhJnG8ngX~bgY`;0H8Wv^6q3pS-^hJ#Fd06V%J@5g({mKQs9!$OL0 z>thNCVOgAm=|^OO68#@EBuJgY`W20W*~fXSa1(zb(Y6-DgsD@@oqX;?v{N;OS3VVu zc8(n%jszpIv+A{naq2Uz?q}O`e_A`TaLoD&Unxsqx+8498z-9S#VxAp*fF-8wZ+?X z5V`M^FXuO{JQTn3GhOF6%{@c)`S}p|?g5ZPRS-t_gt)>&bB##??bZg##_Wz1mz7gg zB^8Vv!@nM9I^OkS0SqxrT*yzg5=BhtDKh8gGkI?8<3jVK>(l+}SXV}Hr@t5n<|a-2 za2dxR$Axth9Lws&H0)BKL*hpGn~G)D4NjwqHI=}vnDP!nlN^yg?c*#sm2zci3{y?5yPuKIb{IO^X)jR-RD< zR!vZcX8R=1H`-kA)CK78gc%M8)Q#a*%x6oLJ)1aOw41%=TWmvOO3iky z;Mp;eW!%MvoCB%om}kiAcakvhip=7*i$*CxtBeCKk(FP8;Fet39O>dM-D$B@*9 zdANBu`(APZL!f+o1^;@F5XDMg!~G%oKFbh`&_FDWqNF%+X{nQ=u85Y^dqfH6>TQdc zbMM^W;gf@(vX>qAee`(XquRJLapqxjs=?=P^TEm3#q;oKyEYHQTO`nw+91<@f$=;k z@0YCcvY(`S7oE3C<|dUM4If|R7;p9k4uq-U@%eloUSlp~s3>QT;#ZLLdoR&dqoXPE zt8;`#KJCurjEw+Yxf#+#?hR5MDPS~yNOow%Ibj?JoauTjZsB|Bu`Kn#uY*rUZN<=< zBbrrEIkbl2obIiA**(4lub|#T&pe%NWE9=Gy#V47T5F!Zi>M_1p!P^b<|)#@h%WX06ZySA9N^V@=B_rm^Qgj zfcn!%y}?`$X&{4~ug#quy#L`jJfd})LXD>m)kbhyE=BiPHow_O>3P4iu|2-bj2Xce zb_->}kY?}0Dwic02{@>kcZNIU{^rZTG*ec>L1beSWFj z^!f8ua3a-`c=$BgVMEFBa@Sc?7;hPbzk8_U%cvGTPyv(qRz2Go3Va`@!Mppt<7qJg zo>3i6{~{0iA$8Nt*~${N^lS`9u<{b{rCuIsJ-XujMDdsoU*N7y8U$#ARe$Tzpnr{}mAh2m&QhHkgPwsJ(GvqG&);Db4la`{ z+0z<#_w%N~A-gZ`z8iTkbMa||@T1WCZpU;)rpTQK3R6b=Ghv%)S7qa*q?Zm@Bm5{G z8Q6)wY1pY!CKH%Vdj=hGK0|&{+;)vrai=he>C(4hKOlE{;u3&ZVfWMG>70jRo}!;0e#V4@ z?vwQj)EaP8)XfoX8%kCm`f8B42xPV!w$(w<4yehH808wh^wHegZdV_Hw{>?6{K50w z4c<~T>W>q9)i3t_%Vpjh)!Zld?lc)nV6&>r3cShUyJCETOZ{b*CmCZxQgfwpO))BCaBR$Zu7Bi z6Yu2w`m20{gXMXc?lMR==^fwA2QkH7lXONoKnz z;MUG3Q@Paoa0L9GH2r_mpFqre?6^7iG9!xf&|*z0t42mdcHojd&G4CuY14v*Hrnk! z&rrebKjUrA&v)1((-WgS*ZZ5bDon6u`2Vaf6o3B~u;mRL1cCl|@C}6ej|W=%aV~KB z9$YF5&+4B%W2knB)%uE_To74L6)j|VL3AkLqGar<=BlRq`m_agCKeCn&Fl;2&;HkE z5nA3afj7)K4Zcv>Pc8r#r!AKVu-(~}BK}|>NgN{J^vqrstuco=^f&dTuD}uZAzr%1 zK+y`Jy+($Pa*(mPyK84-LmNk>2H5xW406AXgOm^26D7Y0+LZD4d=nLt`WBIiPO3@C z*3Rdr-n;|e>vIc7+)jOtg5r+$_2tCOi~_63H(;7T6veo3maK6?$YQ$K2d&?gTXh&b zp7&mE7LHtQe7#!^%?zOP_?1{Pq^$^$ZY}5B!otwSHZ0>!F$`#kxdoI_r|^9V{}SkM zy`V&>!(`~Q96BN_dD)YqzSpWwC-_3eizI&6v0hFU$Snu34^dI$qocU;ILv@M>vut~ z=6g$g2xAlO^Pf=jrR?^bxro0XnO0}M7$fnE>^qbhLy|YX`~bZ*Iy%}u6nGc#_}C;2`^N*if zlPov~$>54lwqDaTVVGs{v;63Wy~1BCoOfgOdws1~`9k)Xqx4Xzq^c?yDB7K{ae!bf z?sUunq+=_Xu%xXmMQyWs5ob5}!rQ{xQlarGdg~UVTd!-mgpy`H%nh54nmTR0gfhdG ze%i>{*%?49&)ZsBp6lyVmAdX+ps1Xgq zcDsQcEUZ1Ri=tdtk*hu5qe9x0a_kH-;;Xp6lMX{ zn&|vKl9`lSs~)CMCYaBfkfbNMMiT(Qk_3c33v@i#x3BWy9-_P1ZP`og2E&|E^IoV( z)}W8``#ooo03KfcyM=8yuWc6p@cymiOif>}d9KoG zAo!GX1MIV$c&%*?N#J7}@rQ_RQv(d9xk$KnU=N;c!=sA=XW|dSb>aG3{+Wh7TLAXk9qB`LV^C?d+Y)?P8ktQ(M z8W~E?f3q$<yRcuj%6%Pp^kC*5$&jfF~i`P00!??-A>$4)BDH%uln6TUIrLc9+hel#z ztyhAd`#i?MatB#!T8uVif#epBr^5loJfa5;>RC;WWTz0kA(KLnc7d`DX|^l^2_Jp- zBn~caOlU}_FCsDA)Jkh$YL34at_rO~ZK0yY&AvXMP~!e@jg$QIfol}&9!XM~!AovE zkMohR62UBzlGb%N+zO(Gf48*bEQd;ZlZRz}1(kMu&`(lUwtHv@uT(8ck&X3 zyBWh5;IDX?<=rIhIr>Sa&GnZVOJ}#xsSQo(EkB{lQV9rEs@dQMAG~8_76$_eWS}`P zGD^k3E#~H|p1x-a?1Y9Ws;`gb#xLeY-w-T$X^r8E@tN^kQ9q4fvKEOvEc0mG>*TL* z5fT!*xVW^dh@5qTxvU(PNXI4_Vc!PEvG?X1C05ij~`M23Weg21Da;PGKO@$x`na z*o(Ve9Eq8rj*cY%c39BIf~OZa*Z|9OMt=htJ7VjY0<*7_r(zdma+`fjyMtJy<0IByjfEVVU^%pGJ3fcDA;P0Z*)70@;4MfMff64}Ur$4|rOUb?c(D z+7e;d5SpuVtnsj&EF zjt8xRhMY18rrjO{A2guG%+XPuyU(VJ&6_qp9cz7WzkFH1vF0ojHdK54E-o&v9|FRc zKb;^%$oixS>I_>}v@!PUySmga^nqs!=XE->dtWF~mSWST=EQ42zx#Re7`rUUNG% z+GckC7!m2&ajOl0s=?e=tf`Tak!fjEzda+cG}*M~;da`mm%&08GfgGr!ulKN?C_7t zM<|mvXh>ZaJXN?D*=&!nR)H;sV-Jdcr8mhOW-J+W<*!AF((5MYUM4F*OSbyqNl;8* zD|dSTzKTR!WjAhXD}Palgt-SRi7!vnj4BE7PKS%L^I=n>)>zbQ9fs|_y}g|sy11=U z9eBC$pT#}@2*Py{Vxe%h4GqR!W{XZHlhNO4tX8f-S$ zuXVWhv(HaC|3@}C&t~m7v0*S+Vc}HQ%tsXD8qdH4sJ1xtNf_vcUa~~DJxJ6AtLMKn zPE=t1iH+?$lwBMZ0oQnY#r8$!&D$k&Yv^^-luk=cK(!PgNF%JQbn7*0@Eg>2iIZIZi*&cOlpRllOR z`RT_m`PEyBPdlof-vDbM`=FHUw`60u71GX6I!xm}sXBTbfSr8+Fs44)Y2WD`+E$J# z8s`;0C47$v()2Gs>sBqgJvdFY@xs$x%!~ft;{c&ps_JM^dI;~5Kd`gzb>_IA+t^(odYF~Iz0-Ti>x-ccbDeB9>{o}nHV*qN{y4F8SF3i3h{9jr$O4hY z@;5OkW70(eIpR@H{DlWali$^$KElu6@MUB~)wd~~ z0EB&W#KF_%GP5NR!$Tc=EMVi8!$R0k)yz6Bgq$WQ;@`OkHuoO5iEWLRE2X{tY9tol z?=opym0^(d+(F0cPouGem1|RiaDI0EQ2ON~lpPF`U_pz0vFfA20Bkw|PSTtcB3@&d zGDYHtVVsjVf%jKj0mV5s4m-HeYND&=H(+=?7(QS$^%$qxV2pJX952`K66;H?Ho2T_ zEhQ400A)^>BPm~!UxYkREF|?dYaVEyzNGQ3+;=01YFoQ9vx0lZ-sOf-cydN;9Y*|N zZJnl7^H5PH__VdPRhXO_%!`o*O%vTF_h0NW)|VmIFyJ`o_|6iNcy@@^r!FO&Kuq*; zB9A`<|N10-u};eQ9PH(LH9lVLk1Gv7TIm;e%@^;t4?u)6N&;mtZ#&RYtEEZV0QnYG z9Tct^N#6YwW9rhDuNHhgMd0&x`A6|6e3Nlk_OQa`Hd6{3Dq4&EF)g&ZLqqgO)${Gb z!LuyKKk+Z8KO^S-VO)7-H z%%(#=ggyEBg{w zGrFHd`tOXTPlYtV4P*M@btP7y`z=FS| z0@zpwTCX%^K{arTEQYK-MHP$^X7`MKH>`-_Eb+(ryeq(O?qGs`fa~vFL;-|NlE^XS zL|&t1wMMG1(fac?1tkl>BL+U!yD-wy8UM#hDIh~Cb~7u^9z+xJl3 zFqxX0D=91E%byGM7J32AT-tL;UYO&^NYnS>Bl|YxX-0HEMIn#TcX z=34+h+>niy+5-Wl;3EV=TKPQ>wjOPJXXm97;I!R-`>qisD~q}(Jw&~2HMV=$uxoJs zDDIi)p3dcPXgc|SLLuDQgxNM(0lo75%a{A@D48?GJ@NXJ;HUsgu?^^`f zjmJY_#W{fF-9h4=C^9_6tj0xg#747224U$rSDl*8f_i93{S^VCqwBa>$PwM8Yt-$+ z*{z+lsQ(xa+4aDi(2;24?XE7aF>pM*UvpmO5r5Y#QhcZ&JHLovHojrqP zC7lrW1 zE?gZ?d`0Hde9DbqT)MEN=G|R&v~L`=Wr<|x?Re&b?u^B^;x_ECpDJ+F-{(wQe|raL ze&;d#PEaQ>Mk*`Tp3pNl({h3JK)f;*L3NLlgIw#Tdm#}mr@ceM{W619|L9Myu1;&- zi!C)Vpp;;j_C$<=Gx-30z!1hYUxIorN@%@ zQsI3f;w%jjm`zXH*5sO6DrjOeHMqhblGge;e#L1pM@qUWNbFJ=o(?k6ws zcS0F+?+NuhbIMnm7E`b>O?VdI@&hS=yY&a!hbNcrbB2Muv}NBIFH1P4NbA zjqGG1Pz>X>iW~e-sQu9=*U;ojqKS)G$+i{LGPa_9miBIS&8NnA6ZEuQQ3-L8R{1tQ z_Vc|OrDsX{Y;pE8`~PyvJ2D`4U#;#BVzOan`YjP zhsjr+%y0TQD9`0ia}5SHw#Ny*o3Y#Yj2q!|NN;oS^A%D zaRV4M+cY->uJYI#*v3f0!Vm!-eCs0W;Q<8b+P?|B$2XXeibuZY|GJ2m42HA6gPTY0 zKA|gK5QTqE5-4RdIm=2l6zL)7opw}U^#Om*s&JTQ^Ga)bd)lIK)vu$4D}sJ}+L*Fn zgD1U5f6cZZAcb>CDVq z@~q`1s}GY!8y)xMwyxXR6-2?7eOLX>&2PnvnRqoN4Taff(85{6r9fm_c{8J?(k)W= z9BTDtw>i%ctozHPW78yQUrjxi}bbRo)v;{1w0tpJ7aF+|$fASv)mG z`?z>qJIBfrrJx5^8iK6Yb}V#eL@l&g$e~4Y7U&JXy9$Z<%^GZ_+C+55Fft-!$X@mBm(G~lRNMe zbd&!u=$OtkqJLNdfFbM5q;JcXACf;!BV}*9iv7i9rO|{Tk15hp_}>#|#obhscbZG4 zIz=}$P5(NBlJf>Fb-v`QE5FT^Q9RG*g_YF07LzgiP4OiD1#S#YTM>P01D0r|acLpy z^)BlOe(q5^X5-nP*5DkEQ1uS7m44hUhcSX-{K}O)m)ts%y-p zAr3l_^uZ7F*$(+2ep9T zy+QoEg%o0-`c?`|aaX9!22RHz=o)2(_jUz$xvhWToH`XqBZlTZ_dXKtehleFP))#- zsO=4d6vdj(CKf<1TEON=m0=5t?1HSu(DDaHi7ySyz_5?|Z{#J2~iwx%WYtl(M z3Bn6w@d{K^Mey(8AzBsU;FK%K%lYsHP|u%9yg?mW#PKRVmBkTi z%S0@a#=-=fTMZk7M9CXBTU*($?^`6335_%>Q=4BS%Cc)uii6-^Bl+~!&CZz61g>fFYE5mHTWDX`wV8If5E==!ypan z6>9IcF{zkJ*n)(SiQWC%puzU8nF}m|MmliO`+sXsICV4Y&7Pv{Jk`_FBPZ{~qC7`w z2fvdHr!k5|Y!1&wE60oo%Z4^?dz=~5Tzyg=0l#Wig9x||OZ!bboOTu~eGm^pKr3OQ z7Y^INtXC{fsGCMqKNBwRw;Q?W;!TCQjJ2cJJ|>2#60)Rc;;Z|5K)R?;>7)UHsE?AB z)h3c2bY&NVngnFZw+wRcu9xk$^$;IEwMJbCL4glfq1|liSoBqpEf3?H-nC$Y zO_9WxLloOT7i&Z|KdFpchocxOqav(y-Xd-zC-L?OqV{p6(`b6$E92bI`#%E7t=E7= za)*8aWXEzq{dfVyE$|YqI{EYQQR*Cb5PjhtVgj>MW8k91O$B9TWwbY51Ox=WaA=}d zU4P=Hclg~mJoom~taMTv#2d6eG6-j=?`OEjOn5jUv$svFq+RS~HU)sXxu^AutcO@A zBmn>%B`<0Pvb3nxT8K;|PI`0n2#K1j^g;pD>^ygd2cTwre0%^kuZigqlpebO$lXTX zX)%0eh3=3%z_6?$pZ4O7#UslkEJJ z!Mj51Wf zZ(*sE%2SB&;LlM-&g5OT#7R%+n(^2As%~!!T%et#z3r24%_Ug*1~4~G{S-M9C-%B` zz)ZH)`9q&pw{GzylJjexX+xXl zQXsu3=unvi!9xZ7#G>=UncDu;1aI91|4Kuq=x6hYo*fvfnuRGRkLc8z3=_68lJX=k zSoc_zFZ9|ev%#H4o@$p!afKw*X<%F^Pv3<~BV&`l`AzN6XSjk#-QT=-BmOq9J6PQc z<<^C)!{l0DjiX^vg~?=&OX4@zyL~XihX#KKPguf7mp?3{(kD^QV1FqZHN1q!8pez| z*knrx>D~~~K;T2xy?>M@*Vs|D9Q@uS-2r54h`lAjCd?uV6953!tzfZO^@j~z34$t=Y_wOFE z&j~PU&QIr3GgQ~rg$LWj(gp-N=;DO^+&lGz` z_l(0Tq|t>j+h?m+-@%Os^p>cb24&itAG<(gzE6XviZxa!k|H zcyM&qC&uV?8AzWVfL0xg{ol4@0$m+ta>2GZ<(=#Ohc%Y3U!MKA-k*;=-dj1dtQKTU ze@GE11#%Y+rH(yWl3Y?`Bo{nppa1E-RpA^ z?wjBRgmDp*Or=J9IXJ_XJXh3nleC#&hQ;tpQC6~L2mipUcL)C{J;3>|dAIFKA84F} zDC@6?+`r9~NCzX4NBPH&7BCh@u0*-h&xKlV@`)doL<3UeQw-6RB0sC2J=St1W6V|Jiz3gKTlh@ePbcuJbRdcGNesBLv^ zS@{bb=d}2T(NJ#UEU<>i4(3};8wD>Wtl%N)#r#Yr z)B|M>+IvAhTIn;UO4}e?6C=6fhIjvNX!7=cym(RCj2*G6slaC*u4q~VFu3vhirYWw zts;aw3{zGd)br8F6~TDKHro$VEWUX4qWyc$xSJfSu6VW_l7j-y{UhWUpBk`*b?G%JRKg<0Lkp6v?!39lBe#*P!oJ~K z2L+F~cCX1YxZ~YkJQCcPO!^}S%m}winPQ(ORtpHp9LfiIDxxWFjl7-QHLh|_e)Jd0 zs7*7Dklsw~`s_RONA||)^_tejm*ujVdWUD=Ahm56D4d94U2-d)9NSk1vsc9BMZT@9 z&WKgEp6&Wr<=sDUoIZyx;D$OD= z#7^GXe6Y|?kEz4DnI<;mJ%APynNE2L{l8&;9{}_J zDtCD5UYvae0uX*081p6=irygY7PgE80xLJ3ib&gX$<>vIow+i4>kqfVOFv9g;yj46 z+Nh88w_~Z?gJr7^Lx1_Fo*QXRKNtMuBJwDUF4`l(emT*qv|Kjy4Wfej*3Ga0AJk&} z5eU<1HTtQQENzW;SuP99s?;F3mD&0>U zr==j_-hTN9PHv5#$D6z*C^o9>NJCZ6oSq4l2pDJ8Y;-Y_x%`+HMN#i8A>jX(n926%-xS-+cb$iW_b z0glSS^G@2OqhezrbacY(``UCfGI;+!N=V{E#f`Ef{LhFhUoLSB;0YTp0&BAs&ejV+ zBqL$(ufeI)Ywtip{o*WUDo%P*_9(~Zq#-5GDFQ<#nXI7*>?6`IaV;$Y2k>hdRT^Dr zx;5HAb9#_~D2cWOhp;3DPO;6KBaVB|G|NZA^RoVtS*goiLwsCjmq0HV2WUvi$$hTR zo|cXnTP9kHfx~5nxhC`edpJ79(qAd}oXA>pp+#$NsA<_suGkzbdV{jopF2#~a5!@9j?O%rKkd*DQiE^{H>$XrrQn-_b6R3W<$FO?jw zDZ9Hu)&*?4p6GSElTcDpa)q-8q&o_2&m-I$7M%Aguw$h%D65+F=q_IPLgLu%vbxV< zJK&t*jjn(rQT%)`8#4<_a&mH57LmA<&YdeAiKLh_JoVT`e^fva)aU!E<}8q{y-^w* zKHw>0KfpJsoyOnv94Iipi+#=O_S7K+Fn(gL?2!LJw@^Ev5VyRNwu?=ruiG;?$sDLz zF&N=X%3dh3FG?t6mqM1xr6@WbkMipXNN&+64Y3HT~A9DGrzIi0S^4~ zpX>q0bO{%a(%Ek>fBL=ul6@OhB-q{} z!Cmj=M-pYa$EqT+v2_~MUI+(!`$#Qo&Xx;CG7?1Q?GIoTKs%R<6bPzxJoT}cDOF;Q zX=Mc$&91IPRuP?;+{KOpd&(F|lpChW=$oSMDe_FSNdeR!R(1OCFLg6=kL0^NQPm2d7rkHeU#mp8tetQd@u)aF;yzVj!g3{G2ggZ~DDan;YJ z;tUOjpy)&Oai8P)d@oh=;*Ar=m~tTph63~xrtS+_8(gg$Gec!bK{)3K0jF}Q$8KA% z(Y(C8;-JvqYH1*tA;X7B48runch@h?#1USzHuRxiQ&RUn(?|oHS?19Fq@PUfE)F^S zZP9JqdDBeV^7X%sd>J*v+gnH(>wjczfc(d!cw{bh0^zIERxvS>4E3n%>SOwNmukx9 zL|R*(j)ld>MYUdIcnJlmqd^8PWR;hFsceKQHdff%bZ#)}Z|mR}5Ag)3qYUS(aI!-g zvkHUfu1aQ-?yioGzM~~`wgc4Rza;_gxS#LhI{zOmSX5nn^}u9_qwp>u1cyN$Uxh23 z3>Nc}w5MtlS(uydEIita{HK})@}9KPNxXw&XYh*^mlSRRWrH#`_(NH=o-qHIEESv4`>jy{yoJm_&~`Zfso5<9<@rvtxf(>KxWG8NiX0wN zd7hRi&T!?blOU?Vg0&5H-s@jrroEyqR>xkN!V>OP@M{PUtTF#Dv-~v>VOYoC(eQLm zyUr!h3(+FZ z1|jNZC7NgS1eFsTy${n=Cw6P}$lGU8|D6u|`qx3W1p|m%fi(mv*;?8;XOw(;M9y2G zc33nQ3xLh6j^MOz z3{5ebXBbj8qk%Pgvyi#W*~wI~nbiLZ5^oUx6rCYfgq2cKR;7zP@g{FqrJBa2tla`@ zc6WD00_yZg+GjccJwc$B!zx|nc-sA>Fx_>fcKfh}icFdSLx- z-u&5R3(YVLzNg=8NcqlAl#B~27*t;47D2j~esm1gzx&?zYup>_vh^=kyrFsy=KDWf z4DN57qD5prlbJTPG~XRQ7>c`Wg7B&S*PIucA&L7329{%tCl>em5;xAhA?30EJrpEh zb8~ZYQrmbFj)km?kqhG(62IlsoA5d6U3UNt+gZv&&h=%%EUvU&_Pu-fc(j>eZm2&wXu4*FL~kX@KJUu$htu z-R?K@NGmMVcbpw%^84DCcD62uPj?=UtY2!PFY56GtRh<&sH5{bo}dOVVS%SKZ5LI-X%Q zgcX2msJj%;<|V@O?y$WU12C8~!V2`m_2w`15P9H4U>`$Ou62N-8xoxKDSJ)faE*d5 zMWz^pUyiir*NjU**<%wuLrG}D?TY*b>E=Udx5+P zj1+-9i$sq0E-DNOb$VP85p4JXlf6x*3Job%3!W|)un*haN_9J^tZJ8cUca=!Vg7S4 zfywln)Y0Lh*Mn(JIn6T4cP|##8?&;rvx|xV>>>*Y{G`EEGO<^piqqD#d;BNfZ_PP} zRj1q8Yf)j7FWMq}^X4tjfg6>P z9D5_oFOQ+E&kT7{8S|@aF-BTIE0y$dT@i3VZy^7x4cV`?AJ_Bjxb)A_0KoRa_lGL4 z)#eWJUMH`oNH6Ffvjbk}EM6f4yzm6^2>I+yLdy0|Au%u*1Yqk$&3RhBxd-gbf`GRp zpf)f5p3Ue0PiQHj0L!E&bs*9)FC41heGQ)NrgA2pD;%{GV!S?kyu}K9X~q|pQd>i) z>NJ4Y?f|-&W8mk7A#_-R)d-4@JZ>33dvn~fhbfI>T%uznA?BsmE_UNJ7*v{p$mmq}m-@7d(Ig%JR)KpL*Pb@Q~94G4efRT{T>8#L4uP5 zRwwu#>eK8_oXlu$$ zv9R9+0gBV*}Ihaa+00D(Sm0)>GjRp+%v`N1L_#ArW8vV{Z1f^(DoXNEx+ z%JAgsxny$x&7;(UsqD_Yb?C=$F>i~#NB@14b7A2k&!}EoGjS+yYLEQ3RkVq-VB+R)GlkjRVVfexrI`hinje}gvBGKhXjz-YZIYS$0n z2D7??9r@ z)HaH{?gh=eL(5NEoF zKqqYc4rW9a@vB0VcslR@YwkM3n%bK6v7jOd0xC_KQltn66_FZI5fLyT zLZpj`h!8+}Xt4n*O*%*kRisOomK;S0MF=1edIUo6kPy=E##64G@BY5`{@dAmtyya( z`*~+FYvzp*tM=P1NmT|c+njVDpmsttQMbZ7&}d{}%qVScC4RVYM7=fZ7vG6Ue#-O~ z0c!c1W(4jBcB#?YGJ5VV{dlf`=V+oJQmN-ZwRcA?3O$LGqg44j4n?g34H=?>167B1 zsrUmmhQ7wM{jS6{YssTotk)v5np$QW?r&)VP?IJ>0h-tp`|0TX#Z_GjBE9Cm+kLeovwz&Px$TR7Ap>DQ zeEdeJd&$9^##^=1{zE{6Lu>1|WD$d#o#H0mkCk}x?610Cl}|O3k}$6-ithei-`xN+W(!)+6gs)?AEgC zwtjWq@lMIguFE$D4eL^r0BQ{*>Jh|QHg%W4_2BBM5X^Dvh;;y?*%033D39LT!D=HK zP{tU~{n)NzS<7gwxcFC7LTes|cK2W3fR(m*QD;CvQ;+$2(JgGfbZuv>K$`7$d$GL8 z!2=8LA5VrAw`qDu?$K>t6~IB>Zd_`W-{s(&0ZDv>zcG-NdVDPyY3SQ2BlhbO7%U94x8* znL7O|boqH0hDw`NqmdfCyEPXYrMYx35;Y&A|8t=UlvMn_IY=r{9|n9ozx*7;O@W-V z&QZ+g3N@S98hslSsYZA15MIpX^rlO&S!g=^_A^)e%Yq5BsLrx1Ln)5g$tjLT2m3cW z9h{WB!=`S{E&)Hw3eG~F?G7ygXnn`bX@X7cK?-{xVEj|StigLRm>+409c+oWQvM-{ zG*h;8xOw2j%fGU>|AeN%`~)VF)x%}1YgOjR>;iSw$#{t+@-)h{zM|-F!idK;EN8}# zW!pUI&8Y)uITMPmmNaopsQ3*vEG znSd&Cr9u6H-CgVUD{MeaAiX5rs}xov2^^^(E^)MY*|2mOd^lnhUI!jB(qvcOIYAGZ zIN+5sZ8pizuh)qh=5*J4bOpoK+3#?6Cp6Onivd!|b(!Q$fo0M3=(0+CC(vAuBcnc1 z_Gfq#pnnArXb~M+V&KES9v-f}t<``3anNxfhEXr&?+)yaoHPnx)lj&gJpOVgcgHay zx3eu*AqgONckR^9xj$@>5A56^)NZZ=#C!(mM6>&LQ?2x3_Xq&(u)~OumPSYv&{dS@ zo^}Jn{%7rkc#5KPCqOFo%8{L&BLmR&<)YNBlI`1 zGZk((Mjjt@T!HR{m-7Y+RmB&D86iMj!8BxiSQ49_{D5WUMa_=7x}o`vS-p|xOk#VU zxd1uF3c~;ksuu6$cs3A!ZcH86$cg0*~>31uHSd}+g`rf(BsqLS!^0p3N zx}ruf)JmJ9+Xc22X4U}Lje~1f8nc$C4Flygc$e2hrCQ@HGOK)&gk?UMK=7nZ{#BYEOBAGSa?vj>oBeCIRk!rtZjGinwdk-jN4&mJm`XYjY%Wi4f*@=1u>&AQn zRV-;`mtj`Sh{FZ9#T8fHm}v|(J2iUfb5E6W%mq-`D7@DQQDPMIQTlfdiq7^FDe9#! z6FhXURE=LO?L!sS=eKEc!g>FkuWxJ{H$xdLTy(ov48?2A$`=f-kju;fhtLF}2>s&n z66m_Bz&TR$iR2kC#u`qB!mIy_Tt05Pak?W;c{(p=8he4ia1)#^cg`HOKfp-28)W{= zz*ZY3^0j~0!<9u5ce#atfsO-nB4>AY2dwINkbasyld-J6`_?t4=YD#&Hk0qC6_zvL zFXs@E8Su(Ek*hm`^MnXdfO1@fs1v_zJqxb-e3#v>5WqUSxAKZHe~6+JwxcW{dRU8j zs0SpZ#eB45X+ShsZB7J`e)|7vGq8C?!|%QBr!j96VqzFhpNn+{B!H5O`la^^VK5kZ zcC%F7S3@VrL}a&C08JlW!UMK3P7ejAp39MwiY)!oM z4$~}Qy)gEAf7UKbz71u-(2p_GeHJGM-bgC2kDb33_B`&kJrN|Mk?nK)R2G}Q-XhbI zzcEc*NbZ7)mxAbS6RON1)W^oV6?u-HU%g9wqWiY|9hQxhpZELqaCIzlo4A;cwGVAN zbxu%{!0{hbMIxwfnEA-88j?BYV{-=*D8rd2qFsq7(OZalPBAdidO~ZmStx+wmK##rDgDcHec0I94VeKRVZ zySL8cCqMkPP-ADv0@=#%Lk2@>9%bCwF#kUE@Iru3u%9+_$dY*rLM+s)PQ=xP&1XL1 z96%T7&7_az0V9fP?E~9dshm65x8Y_8V4p;_SbBQc#WCL)%i800ofiR-172pW%_E=t z`i=ox*c7o5<#i3Frkwzu$bfj<*K3R@jpOgmk}%L>r3a(Gy9Ur)rwz!_zl*8Y2L}4M zMc5mE4$=yj51JVttcK0Z%sf;^e8 zY>hXCk!Rf_Nlo-5=GQ&&&kr)X%6YtRK8m5%-_U#wxMVTT_0vvS!VT3c^hu0_ZOmnd zW@+>tEtG$vS-N8l+Px79_u{rVAgh$$rVvuu=v)mq;K#QY-xD~Q9eVgNJ=Vy5-8*9` zs~d0vS0G}wzOsEjQ9HYIa7(qML6H^k@(Lzp{`uDHKduIBIA?2d=OFe0go%$-c;s)O zoq+CbC}A%eNHsPs{Ozs2VY*H1tVP#SG`DOB zu}fq(<^zaeS>2MleW@w8-6BZF)mowL)~(W$)=Y|&$52mepUk59T0>;Wx4<$%sn)rF zK_Z;SXyZ9M%kgS&Fn|KaIGjhLYdcuCqtx-7=`2Fs>_KG^D-AryvKs|}Sx^B|YUd~x zgumNIE=oMI8>INI&_`gqZ~Z?<N5AJr7cqH7~!mB|1MluTQa&pD;;`A!dSAa^D_dCmOFYS&4VZwS}WE~BvHqoHf z)-9~aMsfuQk_9aYdv>cbN&-7tRi3$q*Ysv_UZgKe8NK>8GFa}|V7(e@aNb44bxGN! z#i0q)XR1;Nn5 zXgk4)cddH+T{AGr7!-X|~#UopEGfeR)>Lo;CRKayeQ0>6(q!jg9iohrq=v?gRGd7o+W` z+nwG)mv9AGzu6@57AqyHk2C}Vxh})g^p%-#eFWDC}nac|Z&OeclU^`#$ zO#^sfo~GHUQFn-;X=gHk4vXKea1j7nw##RpGXU#w*s!^1l)KYXoHR#-Av1!#F~+@MUff~|p6PQ( z##hLIj0TShP5Rs=+rh8eH$z-ZeInM?3OQFBa;L_t!X)egp5>|02#Vte@_+$xfM87a zXR?Qa^)^NqTQ(*q^{^#Y@{DEkrm*}~vK{L%>ja}sZ)OAENaaLavcCb|*5%$Wc5XTZ z7TrW0E^t`-T#F}+_R8kYe&>{ly~zrp)G|A>Wv5VxVTCuL@{NOOp#vO01mx7XB?Gk8 z4f~qbi-8o8W!%X`?|kCf#3-ZOL&ZbXK0)I~qii<)J#e#BPJC^igbvSPRjf@&$9WB2 zp2;rIyW!vzB~R+CP@zVmv8&l#YGw!9RT6tomT2xBvsx;#dEU0xqR<^Rx=H> z>LSW85i3>hVI$=}dXAQm5l58Z}2yS78aQW`QC z8E~8Ew6hMUPE~}9kOL#;?V1^)B+q`QNDP6zL3uA$1G{4CViZJ`!X%eC7&1=%PeQy@}>4sT}ASkB~m{=(KV&P!0BtzKVFT-5lYZAa1WLuK&zw^Dcsc{FIXYl|wdKfp z^g$M7H~3s@6vWyHZ6)-hh^@S$=E7!8i8L1SUPM5A{a#yz1hzbMGi3mpuOE{`FhA(v z;fHqW71z3nLMbL;gTO<_5DDylTP&sF$mRftR9KKkYU zrs^6w)Jn^H%LZiCt@(MgqYWj70kubPH-VKE-rA7M$bIQT3=$_e|0M!Cy;b`_2q%r_ zYV+}>5+c=y)+%~YHVpPl#P8?blx`ifliy;oj8tf-Ug(?K9GwBDyL2+7DT|-2VAUk| z`!7X><$|g=T{VLPn3jLWxGP=bB>A$f9Bz~D3jLr7^uy+H2WR%2lHYZ?Ne&4bk1lKt zsf-LsLFueDzx3l4>5WQr$ z^U`We6^UxMLAtI+>c|=SpO^3`^ulJhJj-enai~YLctxiko=h%LgA;fFrU`uqO}XV}Wz7n(gCP10s>May@r4& z9U?Wf5CrKYp`<`Uvd<3c=*;}y>%Go7*ZJrB-hakX*w3@pde*bbz3#n(Z|JCR-+FK> z4-e0FjcZqK^6;#?&BL>H%jUJN8qDPeI*@u)JCv z_{RSF>sQBiUcPy`Ik&*WILM~`HOnKRxz@HgULcqV^ z&tC*Ya%A@6u3x|2d}anRn@nnQneqaG(7r zp)6hdQm?bcifk?0g0hk|VQEIqe1t3^`R)seLg8#*Yl_?zp{@z&OaWx~#QMNfsgbf7 zUe;_qmcXWspJ&CFr03Y6>FyR%Y0yPt#qn0R-E^D1Sf{0IcMsV+gBk;8v5I7^ohB~) z_w#i!n)~+13ZFXKsJ4)s6)9VPCJaf56}r3ax;(3=gYmiNqw=Cw+KX?z?5d5RS=#ZP z$m^uY*OOreXA~Ws3oEhZ=LX;HyL;Etwn&9X5t?ASul&>^hBI@4gu5wVB6VJVEK~ck zr;Vm=z5^gQaD$k}d*+^*mC!1YA_YaOwL! zwAn5 z?dg9;=yH~QFHUr(oO2jCpju4KgG<^shwRoYls`172C5PIyoQBNy|4RD;{# zF6I%AbDs47;^4^Jh}KRxWq_a! z2nUgzR|NK4%~%JjUl*rVZup=JNgA_@__&YUL{W~AJJG}n?S9z*xyq#+y)Um)q2Mx{ zRsC=mn#~#7EW-!dPGywI5AG{`-~;gbDT`%w9B<~WJlK3*Oj1`|MzD2$3WMUoM`5)q zWIt7s|0G4$OgojuKPpN`>gV3LJwHip80m1XgeH>A3G-WX-pR7sTf z-dh~GvU7K(63GS#(R$4sZ0%{2dP1p$xxIA3RR9~%Wri|KZ@7na0ztw)_fi-TXbV=1 zS^PBq>Z_fu=ARVnbW&)1v|nC%Mc3E%rDnWvs*K#f9>M7P@?GDm$J90S@bm#zhK?sMHIEs?`Ab@gz=3Cb&;6&*7+^Wu0!tt4SuLqw#V z)qOby43996$dd}fa-Xi8dN)K-sJk;UkWKrV_O`T3SedeoaFcFl`AkIm99x0LjhLp! zKoMa;Faxt2rJD9dTaIPkg*T3i+R?ul>_RBnsD5(Avv?_m)4G9WkMOad8F}%d7s~hU z$UudJt!M9_%6M}+LI=DUJPB8k?D`!ZvagA_GIx$+-Dg;t7pfAldt2b|DMlM(IGeXA zAr%nPPlUy$HWa-VQOkGn7aUk3UA<`Grs z1GlP`D)p#Hr5?ba_=4ChNt2>SP6<$;<(Q7qz97+bj?f#xX{+qS`Q_lJS`6DA| z*!Vtb)vT*)d#&)8kjWl)82v(g7Bosp%xC#4%0);i!3sR$KtUEey$hmJA}-P5IJ;zg=>P^{89Z;#X}?Bn6N$Y)gvyk>qR3T(3P zv*U}im!)03`(*^<mgnqo496;JjmoFjB~$bL&1+PP$B<-}gWFKg zS=hd~-EEi>b`(7svDhUTm5wR)tLJX}0NfT&&s$K9Cy5g-X!6&uf6p#q>%b}aI2V^K zMN#qgyg+j&-hU1jR4>9b=}`FB#IjQr<0<(ayZGSk>#W!7R3|#1K~zZ zx{{rbTzaAgP(fZ?f!s|$0LElWnDmCvZ_dmJxEvt?!ZU`21>=Q&5Sm^w+sI()WO{yX0^<-@Rz}m}B&z61YyuqR+G? zX$!H>WKo5m(!0}&&A$wK+1(S;C?#j2Me-f5u~!_1;G;GjcDj@NG?uS4$d)CHk4? zcNH?+&3!XjY|HOQ>S&q_To^JZ6y9&Fsz4q3gu0&5rgbw#;`EsDW3m*1oI%-lZ`(6v z^B#Tp_Z`k#VSx)oLhrua1v2Rl0gJ9qHqlbN@Lf6?IKm;@^Kl=GCk9MQ2i&ewT0SjO zn8q&hTs;J#`4WB_iYJ@n4$`nF-u z@fWd;*W#ScKXJkks6|5;z|zt|HqMGq*@480V~(vl3UfIhQBcLRJ~vGbV3h@cw*e)Z z>l}wR%b2+~SKchc*6s4B!o1$)yw%t<`x}ZZS@=3t_({y=T^)&idkL%ViI{|Zt#ke> zSy>&$uB5`*iL`+xNtf{LTY}kR7pLKie6P=Oh;*D$p`&Y{{j2)Gsdf`d%)NG@9`%f} z;&Y}zf$de2#QsT>?-sMF0Q!YyH=iw4u-!aJd#?&;`{wBC3ot|DU%r^nzsjnujl3L7=&vv}M?4|0x-@JB+{%hw<~AE^r~5uDW#I)jp^2PSv#FZxoK{Mv17 z9R#Medd>&I=pRwcH}kcTlTn z;n?%OG1GyZJy0(>xyQiMN)B%NjE8{%i%o5Q^ANTF$c|M%beN?>4PZ{%odGnwjSrJ0 z#{bw}J&)Y=Ch=y(5vjJ;e&nKV6`ld5IBgs)PDXom&aVs~(#6@o)G*;oBZ}oL*Rj36 zyKUH4j$6aR%Eg8o3uxBGBH%J#)D#PyrR?=4yn3H8$!;KG`?L#B|63&7bC*T}z0}FT4^F}L{ z#MT*5xa;E~uT}9JuXt{N+(8)PEXKG^+ISs0Bb49MmLO~Mh?I(bDxwFcbkT9@skvg3 zi`CZ`0~3!|hE^Q2n%-@I(0GPXY{uhTTM-G2{=To_23&*Q-BW4~7?~f58?`v=%=YAX zcobO+`(}mN%@OY{o`KwU>)9$-g9l4%#0#(5n zpq6NSmwPBi=Y0 zix5SU(1G+Y&{OZWxD{~enJ^f#gD>I3X-t3a?sy6*Cd2&d$atB9Gt~!)J|H#juT=;)Oi=i4_Rmb*O;<6? z1dF;T_~R4t&u^N1@Zy9Ue=G}U@NVVhO(aXnIJV+KR=R~<2$!pqdn`MZ$Nj$#+iB@#qV z4lT17wY6d)15^RK=X>eikZ30RrV}DHc(WEWZogiGd7CituNEP z&eTM1&+|1Qg*6C;bv`O~5a1Q8Ofxr@po-l6ZnY;&+yckFFx{dby+KvpN-idC1**NV zh;zs0_I7s_oVj`<`C@ouR&Lk8=j7D$R?CGMyp+0nze&0m;W>(3$!LgKlxp$e!?<@e#@9IK?lDOoa~mUAy7HFY8i0(+>|M9tmCu zmoKG+(uI&?A$=-C4r`v99NZ^5D}MKf3r;~&r*Zpb0J3}&B$d;Hwv~h-S`DMkvaosT zi=(x?rz$GoL8OMPJVlyahIpHncB@!EyHajUpVQMdKZTo7*xQlU&xM*Dp4fn?i(;q zlU$_D4ZOhj&{EOBt2^x73QqZ+CmRAa)*#V)<}Y9TZv3lE-DPiC&yR1H(^UlF7uvol zyQ#OA2|Nj^ch*5le{|i# zvtN&j+Gbh+oEO5MGCLD(Eu@n+CA8_G;2RDss~qW%oA$Y+rafH3M2{}d8@e&_ps~sP z(~D?YA1c^w?d6D$>+xOVV9!N@9@@XBpk#RWmd!l*$$<4mU<C`*M}pTZVwoeDKG%;*l_(xEy5Dp=egdwTc*uPJUFj!IO<(Qr^1Rr^7GB5H zT{3+SOiP?ZW`TIsptxHKQqsAb__i|vtK4EO9cSIV3$K!#u9mtOp7ufi2Zx6)qFdJ`_ zSgdsRo_|MMEz-P#QJ$oagrmm?OjL6s?Y0v2d!6AS3+JRX!n78fs3oebh_yWTPl0(! zlQbH_!q=^5v2dXp{pG-wp4$GSE39|Ykk-NcFW+DZB@ft=*p-AGPV_hXUOp&aiWn_~ z!42*+qc?O`Dj8TKX2iC#h5aSEks^=jsE(KtMDEngNuf(GgW-}+E1F>?MZgqvObw?eUZL*Q;)| zdqt&;BIwKjSM!J1Z~YTiFq@&q?RuO<=Yve#PA2Z_6*}r>e1Vkgd9hx42h{D#gZ|Nt z1DX&AiQIO+ty*P0UBJfMb{WjA7gNstF@7zhb}@*DN6$135EF7HZC>>BLLE{VWTf6h zE}6UyVufqqop4sCQTcbgD-NVNwYcQ5ea+@0o1fUAxAI?MUj(*4<+#beWaG0^^75sD zk6)qtf~X0+(yivM3ylyc|D>Op=zhB2!nW`Q(Vlsw?MscT{k+Dgo-}542=X9U-f4G)~RIN6-1#Mx`%ju@Ym5y6KA~OQIa%jlGYX?GxWWand(N ziikwPq&ka>E$w!uVAIfXU=#;lk3(Dap@9?Rh}VQc8~beU`%*K5sHuqp-ey6Xu;!tg z4jf}SRg8d~TzE!ExXQg7#+u>vTJs52EAIBVj292PP$tRdYn8q%Vw=(@$|mUI{QJ>_ zjQD{o*cUUJQAS`oEs$P#fleC8?Wh; z5|~NKa_zT(k4xU{$D@T{V=Qa~fY|(rSnZ?%3e`V;Vczn=3*YCGbnjU5A=J%f2uXFk z#ns-^W&38H9WGi7)EJ@hG)=@17PzeLqHJJFNHYX%9DH9ExfauV#GYn|N{I$B(fR{f ziyCQ(464+Ugp%P>j@Qd&TOfl{X!4;id&Bfnq+7J%tC6gUW!=RE=R<#CLk^1`x1@C*htJ1Io&IE!2(Y^qHl= zd=O`RljF?DRT_OX;$-O&QY6x8yIh=>igNlIObe{Qd;l|>fjUY>-Co2TzT}W{urkfe zXixbd>|^;gTJT-=@zEC04026uMh*@sc*CSo=v+;6r#T(@GU+C&qOad4ZSIYi6FF;w z06fB*W*_WFN$>=?g-vl_O6a{Y@#>$>u_JDX`v|g&Uc{Fk%c`NLJuFmF#&jWXetHV%d|ZMlUOzC-ac_OpG$kzpX7_8@zpsrL*%(6hV`C_QS`3#0RgM5u#F^&S{TV&(dc+R&E^fs|IA^NC3 z0O@bnDAYet8FP8m^W6jl4#gW97`<|PpsHnkd754RP-onus?}J?dF!8=h>pdLvEnw! zQ%_?q@9sqDBYFPF0%0XH@n{1FyJ@9SiRqz=zJso})Azl1og|l>BVMZZ(07nT$BmFx z?7JiVC$>aBWx;EOGq+5tC~y`605QH4p!}dgi?qG6{eB3$ZflYxM>82X(fgi?THtt2 zyO{KdZ<%3RL*05`dBl6A5^_k$TQGa$%lB}?RrXpndRwq>#5wnfew+ zby!t)dChBP73U~5vv@_jrJj*hU@m<7U#OBGspEAQD8a!sb!SONcY&#|LV=YeO(M72 ziR1|8w>@28DMYuST1&mf8RQ5Cux$zALjBk9W$(o8!hX zM>xGTxI~-s{&2k7S0GVTa)6*(AAtekOr_`qyxB^x(>=DGkDG*ZfA)*9uwYSCYuq5B z{JcN;l@4oDy?qK0sOt#Ezq-miaCH(tlDF_woCPy&EdlmYY)t*5^57r>KXh*z7%SFw zIpbi=dQVn`$-P%nPfF-sM>7*NeWm)}*1R)#2p-ji5 z=oflr(W#0;!epbnV2K7cNY(ETg3x4Ym$_z~!+kfvm3BLxF;j3WF@ZA1QSF*HfM)5~ zmtq=cZNz1*nxGhztBZ2}$hSvZoSh+JI(Tg4lhoOZ&w^?Si|$uf={s~-bqVbEm1r## zX+gh!`pBuqKcwxJVU;7EDQSP3^(uC;(C(oyt4#vx#8{HIqoU?O;4eT=riZ1AF_v>_ z){>N~4gEGdJ=6n3%a;tg3{ty|WF>iLPxsr-avWb|$l?PFm6b0L2MA|9K9_MQ3_Se| zB9^lBw$*1Hl-=HKURj$x6o2#{WAtGge=(-e9~(q`S}8k&DdBO{Og{~S0+Y)B0`B~&3uxuDYBL5D#S-s1cYlqFXn<% zDNPeo!ER#X|KZ}feaJr-#>{H%e9|0Kfc4*(!_Q)) zJVOv`Hk(e|Vd~EPlxY8NTZjIpfEuvnFG;~0^HZd!V4nD?SnY2bSo|jH+3%)ZRrr5n z`3CI_QPGB0ao^5N>{mEwi67#e?RXsr6Rdsg!Np=hY@KslBhHgeQtdJKy+{6LgxxFSRYZI+h$PPc{>ahmY?@5f^yh=h zO6>7mh@K%K2||uH7NYPk#xAms)7J9Uv*ikf>@v=jC-_w9)(t679N%~AWN!Ya`fq_j zwp7P9g3UY=83E#-KXt1{MlT$Nky`BaPxYFIz+!e4F?4bW85Nfb_fQf^yXWgW$(=rY zESF~xoea_DW~C1uN=Xv|TM9g{)^!QzGGFb0dqttq@$oEqN>M?r5c(~UAot^LHRMa* zR%y~8?uEeeeZS`Rm>#XzD!7;Jx(+5WsN&Z2XjIZycT`~(Yp%+4{SI@Mz*{wD6uJcf zi(FKA5F_`A>pm9pMS|2XX?rb}Z~0lKw(D!2f9%?ML!#rxL0;KSOo*kS55cbio74QYP}>D(*WIlX*J8Z|%->u>4n|kl z$0H}JM1R4QsBPb$K3j|ssRG6v%f&>IsQ_Cocwrv*WOPvYBwMB-R)LjobJyxtG0+a^vdrWb5yQ~R)h|b zAcae@a073kDG(35USaEdGqOcY`8lXpy!a+=V8YYSBVy6qOj?{PiC{+;PNN#^BZ@>X zozmL61W$b?0OBz}{IGlPw%5jZq(uCm1oO|-R^q8pxjP#1{R7fAW9c7@OiX#JPub=FY?eo^o^es*&XAAkrs>L1cX+RlJhY zDA>e>E6L>LHFdGU2O^=oxjFZsuN&pzw7#wJyI05XzN^W7)!udf0PQ9?*YDB~;A5dN z)Je;kH8q0yGb~i~jGw2YYe4&+@}m{wGtohyyj1W=LEx5q-k`1?GUjGN^49>F38(E+ z2g1e!-6&22IhcDyCEC+5do*0~&;gLBG6xQ={l0qz?gypJG8HmeEp4Y zIoB9FG=EhT7T1i4%KpYVfR(eleU8uV2k5tNm!??bE{C^H8Ku zn<=P>I&8A%E~a%4;{7`-kT|iU>86KkD!Kw}c=Ns{zH~uEc&$wM=qoEn+sMwi%%tZW zgBxV`c`*~NHt$o9Ipw1ja|A={2-(NBfc~=VBt(0ab6L#gT(&Ex!q@`uR;cI2mu#KN z*go7{8gBYH$A(wL56m&qsmB<-%*{ks=eQasnr1(J8RaUq>%6%q%B;{&tXDv$k3bM+ zHdv_>ul%mXe@R$TwA^!bc~kX$>8M0Q`NrD2H&~LhckNR+^oKGS(b5;O&;@BE^c(*w zW`^z&>daxHQDH}(W@s1;F<6$P%~E+0g9Igi<7jqO1`*G4U!jC@Bvo8t%@y2)p>9!j z1(+o?mD&S0o*CB%-(o+OxuDV`*9Y0N7N0j}z*1N3G9J1$DTHpH$Bvv~1zqkMN>9v3 zp1wj_%!y}if^E*We;1Pa6&wsV)3si*g!<*oC<>VBFl5m-ko(fv-afyr`b(rK@xZvU zlhMt>qPjsPTc=i$ZlQCf9fPC-kCm1^~Q1CroB>%kFo&)BL*9lB!={4O?#MfaaPoc0XEASHH^{1VFz;UB%| zJ(ajxMbA%u%L}RNs&BCMedQnI`{J_EIA1DG?bZ zr>r}`M~$m`K0-X+J(O9#Fb9Ndz1gU_c_!A`X?fr>NDwlD_l@sX zE*x-nX;)FmWhk3*W1SrgQ3+|WYommB!OnB80y6v{XLw@MS{9);;8_2ptp*n%b-ZE=j`bPiY8dT87H|8OUr{%B29kP8RNmvz6t>ee)0yI^W;92ok%5;%A%|Tn=ySqrXT4%6x`=Y5$hd59&zKt z6rT35@rm7ZCr5cvaqswpWFjCnctzq0(!71%^7q)vh4l0SPic(-Q4Qrc^}N zd*171J#SfBC2;t7Ty~KFyfQnT&Q(^};jh9Mx<|ws9J^QT4+p{X>fI(8{Y7?;FJA4j{-`Qt2awwc)%c>b zPzIOOK^K;}XfD4byFKn-#3G1a3_pC%M)+JdEd~8C$cNKe=?9ejKq!CYr7&ASWV-?0 zUbi(Do2h7z&sx-0>u`(^ahxHgp*Oi#$(MCGO4lFiFxR5enrr3v1?)aeE0K*`89DE0 z>Da|YXJ!4JP*CCWgHVvO$R!l?n*!~AF7;Zpk2}238Ac)F*>eX`hswGv0mfUf@ctRE z1;G~~B$YD}=S?oQyu8VREtLPFX1$vb8HviLzT2UH?Q*|VMKYHhsTR91RmViFj22`% zO1bP|;h0h|UV0m#2UBLu`(LuKsOdzcJ}eauNNl!RC=JhYKP9 zJfQ2qc2sK83a7O>_CU0=O7@D~@Wh|2Z6MJsC1tdRFT&H~vs|c24Bg^AQY)wj=}S5Z z?_xahlv?j7>Cj1h@6caXovi|!wcQU9hd=ou0Q|PLMhB9IV4fJ&|7eH90%qt8K4+h2Q3Jj0&ZArsXUI@bBfs#xwsuAd5Qf!|?`Z*EM*yG{=J zGRRQ*S?7Ro#D0y(v>z;nbqT36blM`3YT&v}OSyy*5fncV_`>W-*t$X3K<9Bi&d#9g z9Yr{#31)I>5C4qIIC%Y?B=@|Cl{YHL_H_XLwfFZ&6%KD(~b zij=#1%J_^VTM3@u6KCEjuoAU+RCNaX>15xy2w_X6E9ycYC46#>iQTOZUj==Bb|uJ1 z9ZQoVc~-(WYtvTb&F=HnQdQ4?0cJlc{(!u{o!n{}|MwdImz{XH4ZvTu8P95W@b}i? zM+M16r?`E^k0KOwCvMXL;+{Wb3~^4%sYi_wxt%q4hWz_3p^1{MtxU*+wI%1S0<17_u>H3k z&ielJ%AY-$!KI%a*AJj7R7_Ko7E*upDt}@nrz;KX^uB*awE0Mem)<}ow(%i-I$hF1 zjEU|CSrMxk+|N!=?a!NxKb#POsB{zl+Q0+t@#hnzMVRY0wVTHlNIIvXfR^_$8N+{24l>UspTeR7jq!jkNQopAEdpl%+;Qnevfn=9nL>6S`PmrId4oV;r)`hlbZ&9V-GC)vOccUgM2|gP6iG=|IO9DVl|fUdj_8)&(3(pqm73W zLKZ#n{f?Z*CYMTi+`_{B()9QWmnLyS)<)2Cv}8bHX3G$?-vX|=I-&3G45Ok{si@lD z&9EXT12rmHXi;Q+EU1*1R(F!}LKVq54cx%pESK!{eW(7ZeJh(#ICPDkcjsLV5dG+( z#A!W;POGlP(Eqep(VeQyLuad2*#c(+IroA2_G2ln52RM8k?g@ z!k&B%GXL1a#yy|S1v)wJLCbK^8G(=?hlu(?d<@E8`k$3=23d=oMxm+z5#4v zX&Xmyk#+#@cTNW$E&u~CN6f#El;fOlP%JKM-E(AQs5JliAu{oZ#lyv!-6YPT3xDB3 zKQNH}`ru0tb8_Bo3Uk(9@ErTs<<0XE-4TA|h^T}j%&T{d!MDs67c)T6*RKG`W(>Wr zDhc(aBk5mWT}V?%Qaa(KsN3Q->$_AU<}*a%-hitTqi}>=y%e9P8VsS#tP> z_5;R%5x|k`Kln~L+;Km|dF1(|JX;7N{8wE55s$5|z>TKY3UxBEDtmJpYy;_Sm?6p=k||0{o9?Gq0fx<4E){yDHed> z(z)9<6X8&XD~5PA19J$Cpa;2pj0}HHc6kr-4l7?<#|1;_PU7JlrAYi-(bfSKOvZcr!`Ztcb>)z< zzNKZ_oBM9dPnLa3^5dAS7CA4mM3-*PJB0bJLm6f_S4O)OB9jw0r2RcY*COx0uikmp zC~V!FI`WEh1R{2()Rpt4{|4~H9s|M|(t|_R^!$DC=f))CS(E9@^DloCJma*=y`jf3 zLWgI|q}_&RRI?%IyCxrq$x>)Ru=eq?>C0~HFQ^{fhcglzt|fw8T7_TTN(wMX?4mCW zW*+E2AfxVY_)4!6z6+glFJ7-=YwcN&lvV=u9>T*zGp3{le&@QaQ3yv(k@Q)yrN6W6 zQiTvd)m+L+vNL+wcgd*%#_AZvBeSZqx9WZw;``ES)j@vZx|UlE{kR<86Ic+C+*29G zAsBRD7*P@7JV0QDk*LH!rF?Z0cEV@%i1-UX37o~N1kPoVdP=3x`*CL8kD}==YG{r+ zx5d2Bfn77`A9Tz7daR5Fi%^Gq#vv`tklSWLd5QGV^`sS4-SFOVNWuL`f=~m<;QWzM ze?&0T$d0+WkH3{iOHx(D=JAzZWy()zGbQLJcKYMG+~50;s~M2>_U}iz&UC{+wi`b4 z-zN6bt^Wun)wh3QxIab7ef0lAlmNZ1erFBw>?rHl4QLwj+b_WNXME7=oX}xGJd)`H zsW;g|3J?Ew*D2u4{Y`7Pq~?*R%9XkBWbBsQ@3qzUY2E7Ke*+0|WcpB~rlpg(wwAba zw@_dAe}%c;2Z2j4!+zg;RdeC1AhHO_`{dYlu9XOU(Du5k$)!(RoQ$K|wN3{^etUH< zVbc}<)r|Pv*;day;w|_FV0`??2{Ge0j#dP6mWz0Cz04p%8B#4TbuMKAMk?A%3ORqk z4y|HRqjB-xYDxE7@uceqcvLCv6?o-d1?}9P%|%}e+V@u;t*8Q$e!a4X_nSE~yz5`j zx`Um0NwJz(uR&Kb_7Ymb1qQxFE-jn@cNy>@U1g1D$cgE~o#0hFa)VbZq2G+Qo&mM< z>0hX>ZU}VR)G0wk=;3p*7~MsZcv4T3R4c(v*Xsorlx2=wax*pFux2w8H~%*;+ue`6 zZpEROrJMcF|V1z2jY|0Rp8dDQ6!n^l&I%q zm{Mq(59`laoq3}(ZP}A53Z-9t3pjNY##nQ78!}~Cd#EvNG2O>U0{i71*2z%C03wf- z^s$)h%DBY4TaKcMxp9k-3)u+m1{V7`x^LdIqfUY?s33kuC|j>!aomIpoo0rkijmS+ zS|5~LQovEN_GA6i5{6a<*# zHlI}SAo4oPQfg5yu)5k1J~}8px^HFvBJ$G<pN3eTIKWdy*DPCEK?x2U+N=U(oo<25)oZR9w@D988`7OMLd%sn>^#lNJ_P+1sVjUMz&Fs?W zN>wyQ=m2F+`Cyuyj&ZgbECfZ%8NHIpJp`ed`0p6(LKatPahEgPQ&@t!Msh)8iVd@3 z@^`?SOW90ij>P>D4?~E%_$H1Vo}?iUf5xh3y6*olHy5!+;_$xD3(N*nei#6@M)|;= zQJ0$zE>3($DC4pgZDdyEpo<|0w${!wAqVPaz&;ocra%yZ^*fK@pu@*Jj1yLAmaH2u zK$_*zeik+x$z@5)3q-{fX`G$^l`T&(&h|^z|6848$Ul{ z_-*}+mv0M)9Zcxc@#FpHtIlH%Q1%JUhw>`rB@+5h@M&LzgO3xocia`f;H zc%@dd9kQZ}W3r&kw$mm=s*rnJsOE9=HD8ytG-;1vMgpwnCjX_Hj#OzC9Es)I6Um~a zUbjqt%k9X1l(efM{s4>OHudsqjcYozS7|ZhcgS>sRK{F|O!=%dLMh;Ty&}Rolj^pdvm5f5NLn}t8+$GE(UZ{mM%d%PztUVTa6yyvtVg9-{BK`mNSCc^(Y%I% z%sn~u#zitUF#b{0VbLic*yImD^&?Nw=^+r~19`CT$&Bq(YWR?EZfSWd^Wtf3$$Z(i zo*}KevmQ3KHbA<2WZk1yTasTAr78PNs^;>2ar-#vVA)nYe5gd=pI#6=gU#*fC=9$o zOCB;$8*T`EV@E;mpN$JDUT7Y$QoU@6RCwX*NGXr$a~ttA^O>hTzHAM81KwYBOr2JQ ze0soStJ9H<%`^J#ySvxAGqjtEG3(2hho1jYbYMG2!$8PR4CP?zmEu|L=ehG=w)g;K zUl44fOqgCk%8rf{4OX0_a1Iz38)g$de!hzvv?PAaPZy9$#8=g~9v_L}Y%|v!QbUq5 zhJr=&54y|kC2hTfZ6&}ez2=^OJT+iqw*6lklCbAUir{-9+R%5(?3uD}rWj6L&)^;? zY4ELm{W^C1+G3SK!DP2fW(JDWQ>>`n+B8 z?u>yL7sehR{hCwyjN8XL@r0#s9UHE#LP*~UO5ckN;``-9kfhLe_?>Nn`Zw43W+mxW zZs!w$(wRKc@+x^V;@tdj&smuxpyBW;<@eWg{3BBfvE>}9(t+~+g(8FHGFEnL9xJz} z*#RhnqpNxbf#xW=iIKF{e!bpzU{AU$Y42O8-sHS$Tx9gLe*J9S9{DIBF5@fg@~@-}sh~e(^2Y=KFeo5xX}Z{GY*ziCGvZ(A7ePxROl@(59gb1hi-*^lrSi~o%}S(TfAYl37pNtv1w!pz_3 z2RiuDuCy$P*y6*Qs$LX+Z(e5V=cSUp#;B%q7CzA!M3P=bpyBA0Q1L>8a)oAaz4X7& z=v%ijZBos)KX|wOMDCzS7VG)M18f%_;X}OGQ&ztzqu!0?IRpxYmfNR7sCu;>r8=5fUDnEm%9HVOZZ0{`eHbT0pfBLC>d z5(0li>3+0}@>_mGT7P7W`@4PvQ-3sc|2G24MHP7%s>qgT=0|;GF@dTLzoUiU<-oqA z4Zpd9oY-eQJ`J$;{f_4T5I}9;s(<{2Z~rh-nAoa;|H7kx*h$c6^=ABnqQ9F;F8?1$ z@^@>wSpNf+{t+@7{()@%l=bT00qP%SJ07y?4SzxT|06g!$M_$M|817>Kdu7KtNwQ` zXBA)#ek>3*q#`K}H^HY(y6g)354Vhkra=edP1Nz~K~L**b<8oRm6SZ)m?;-xkA2Tx zf4`J0U2t;x+c*DOLD+8qD}VO1txJut{jUG`2!)F3SYjlCOtQCu1uo(UsH6Cx6wZ39 zM%5Pz?DBt#A3!Yl#$eRymxt0#i+rj|MjO9AS)-=kn|4ScabwzVLlb~Fgu$MC$#&%d z6zwibAKlz?yF}sni{zk$1qQZFRvU1yRae?ER^F=KKDMv@f&Mm$&^4YSttYtL% zK%V7}^2xzrwyz2{JBXd6aCSm@q9tKxyR!6Y$ZPPmq6 zyzk1{biHyasTf!*@pI)ll3*FFh& zIpxQ#5BSf|oOlVmXONbuhlF+-bS(!HCW|_~cy@tf#Co!g+ydXSq~lRwi+Kam^7J=I z4ezq zYT7NJMl30=;}A4nkv4khc}B2C&^3n#4<0yJUKW$Fw-XX;3p}zlsYzw(uF$46&23PD zURAy)0b_GOt!ntw8)GrnLSoLtrhAGl7fioNGPmvUl@2@u2)q{H?7ldev?FsxbuCMN zvtb0pO=7{jS0r>`#cOV9Q%24aTV;-5rTn1|@AZ4G=768K;o+$j3Ny3_9$1-xcP3ob zoqrj0Rf%!GD2gxWJlia24hMehfQRSPX6Hp>Vixe~RUeQ#*SJy9x11TerDefI&+eKa z_ZRL5j;^TZ6bqc5RH&k2(~R8wx<(#5+5!_TX)jj4&BqfUdT0Wx?2sxR&KHmruSBWJ zwl?1u-7X)p#Qj8EwP5Zd@kr5-!8Fe2-FVZ*FRE1!3Bay}0ixx+0QmC&NWugbqfDu6 zd~xknTr;~(5=qsPdDpzzr~mMPt26h*nmjzjkE)zS;p!*#0@Ck}6%otrAPGik4a%ODfb=Thz5HlGeU!X;IV`MYSp#Vkby7 zvBX|t2_i4<`{Vuj&Ut^$nK?7_oB3wG`Mz`J%%E6;q(RoX^rD`At!Vl`VKkgKa>)cQ zvu9Wc#u-Ao>*7>qW%I~3kZ^oJ`lyTb*!Kr!o0WYhx3DB2X8&tuz2Be|yZkTd-QItY zkOon~NBOZA2S*(W!ID|*gv0g3#!!sN*S=cb1 z`CpAiMAu4ZY4(M34@D4fNvE5Ik!Id4KuM$rET~%iFMGrJ#ep*T5J;bldFLQmWNw|7#GUBbYLor_Mf=YLP z1%&lj0=iWL<_6EA_N1{fo@|!qjqtuNRnA&VbrjZ60*BY0_89I1-};Rb<9pU4x}>r<;1@1Pi>6Mj^39+%Q}0A{760(b%dgwZS_&Nk3oj+b>aPxh)RA zK1%~!58!AkOXAX@D+C#jxahj4av`bDY%_0@H~Tvd3D)uIZQlWq;IJW)Nse!kAy0bp zOLIHmN!J>fK3EnPpv%c9*a#`P<)QV$Ml{%5Jq&F{HALg`#TZt81|t;%iNX0U;-2{m zuQ3UB&WmKY-mzHFdlj_5GHdKOwp`sXf8e5z(8q52>F)+mf#pQv$$6|Z<*--oKIl1HOe_nLaP9 z83zc94igHyp|g>TTygOe*?8xEQ1OiN^s;+4D41-->SaeVtz-tL04eA%vL{ArxPR5J zB*CLVTTlC3A6zNajq!xcQp;1T8Wzr0d71P8bE?HlG0iEpSn`KhuAQGcdpAuk@a4YC zMm$mr ze)s5+vfBCm0WnQ;6_aiWx{QxkBQP&zMFursa{^8woC3>);DhoLUePIeeT3TsnIL3z zxk4g)%G2MQ_iJln)iw9#4h`}2kvOflA6LO5*|)-kG6G`RT&|t~R$0o5ViE>r+Jb%_ zo0X`%R;V1>vOCKsz(}$h>M5LLb_-|P_`Stff9-W!R)a1P6b9m9MI~c3^40h!0E9+> z%gCCqmr=53XKPB*?nys2$oBd&m|~RNgiT3S#zn><_QK&S6I@_Lh*Xr^%n`_MLQj-T za(9g%H<$w~TP9GHdtrl?iT)qaN^b$32UrK2^+_KnJNHE@~WVJVb8k zY^!r;?^TA`m-G#SUY6r8NYS#tU%b?RG+DBj)x{i75Zyb1G&YTXH{n3Z6bIjoPW$7= z(i{543-WQ$aA!t5VXPC2GdrZD)xJd=Zl2m8!!qy3l5|3{8+Wp&_xL%Jc?|}=O|A~F z-AVM`fjznN6)^WxM9V1-E+ZfM**`A*8r#+MTKzR-Cm8!T-*kD*WyKPu43?lU^Oo#Z z`=9cLY?j0&p{j=SEL&gdssE0GuC=|hUcet}f&nA`+L_b*&KZ!N%Ygy}i{GcLWXp6m z8J|zCX50y)tlkx^?Nnh{OsR4UCEin=GR;<#X$br?ye@kJIsEtm5U+^deOHp_^yklW z?4`x&MJP;nuCg}&cD5?mH_He?9TWUR(!9v>kpIYtZRK{0A03#bi110rAq uyX(gEx5 z1D#P_N@Q_-T|q~un`Ynj)E@2Qdu=b5cy);yZ}jhfNuAF!-pL}>We{iXmb5+m#?6~q zCMe0n7p_FDeIHmBJav}VEOTr46(>Lt>3T*Jb$3l_Q1V9ifLO_Wa71Kg#&>P8;(8W( z;pTGWdy{WRD-F!yiPzj3#d3lkE1mkpO0sY6?3GKTe#iIS7#ZhcSUF6@&Wr1dBGV!| z#>5ABaTpZsX-F9EMA+xJ?(s;d2fmOiub?bVhjR&75S%OLMhm|~`L%$S-D!^~JAWj- z9nEyILMyIiensZ2UzG=fBz?vS8GGO1=vL}4)lSyUP zf)kP&Q{TVyrYYo))AZx=NX|=oxE9oMzpp;qC{-u|AWlRndb00N8X4Isw~eq~@5B*# zKRy;?2mJE6c9&dy+Sc7^?aR=pL|Q%ZtkvuH?z9bH^u;*NQTEwxTIrzSK@%p7i3CWA z)D1WbS>;>;rH_$b6``R%cq?hNNVNO5d;M)fa`8|?Y%hO@f1a*dbg$Mbk3|;uOxVL7 z3wiUv#;w6jLFA*=N8NzN!#QNPckOy1Zk+d??$KfI6FlvDTVS~^{8N;%_H>L22NgVh zPrNgb#%CPV)Ul`k9GThtHPBY7yu+i;Dy3gPd=wwWB}CYCp=j32!CHv@M} ALjV8( literal 0 HcmV?d00001 diff --git a/examples/jax/resnet18-1node.png b/examples/jax/resnet18-1node.png new file mode 100644 index 0000000000000000000000000000000000000000..78d8e9c4d902662766ceb9c7bef9323edf8da2f6 GIT binary patch literal 40504 zcmdqJcT`hb+dZlW6%~b}h)7qefYgBWpduh$={+DNbflLMz($cKozNn^gMgG!MS2Y# zLY3YE2!sF$9g$sXBT>XRaP3t{y%Y_U67Zjd9)$}w*Pao(|S}z>z;3*)!Fa8)$zZ%4r z_9EIc?P}1whpV5;(hF<2I`Y3fyvlr^;Z_Vg?Zv!TnVgF9pWeNNIi|v{vB4DXM?bxd z*%No!a@m4~TzJ5~U7vlTc9YQ1g}6mOG!^j=6YW(1@w59Kkc9B@_0j*Ue~1(j6Ky!i|4T&JTeXt45PQ@H4@`k_8_K zGD_9CEMZKy{mTz*aQn-5#P%Bwao3nizONqo@ye2VAIY&d9!hpIW;KBy5pM8(yLT1H z!f~fl-pD9|eI(*frHyO}LFVa{uU4;mI~J@!CAb zxFonjsRu;|m(3kpfnmqrE5^EAjHD;k-zA$1 zR*@}D3}mz8%2Y))3e;Bi`AU`n=WVN*n8QyBm41EhTr`f)&vq60e1f{A2|t?gsT~Fn z3CB}(SO~5}i-BAxZ5pK5KHCW7e2Kw|QT}fE7NO;%18ey_!zjdu>FtC8eYJ3DiEZ9! zyb(mWzS^c{J&GgN&>2p59Xn?>gygI4N?2V_Sdlt!8A)h4Z$*MBKwP7q)dhIy;e>pa z9!cpK{YT1@FLgI*yk+BZVl+k9n$sK%4Nb#T%Jfv`PL*J&fBk-Lf&7)o;YH1%lnhMh z-TI`x#ghPm*V%xT(5j8d!z@FyfSc6MZN;|?Q7yYRH`7-J1iTl1_|U*h2_d`ScRd3i zDtMN~Ja?5+QxMjjX7(WP9p6&L2K}g7*xHM%h=ce}$=fz!@v5Go|fCojvnip(wh@)q0aD8*v;>E5>0Q1z(GnTH}+@ zkW}5h*FE!484O;55c}LcG%}L4JO-8qE~f79ed@yT1EhD^+1>&7qKdh<0bWe;yetx9 z&p;2uj3FtI@p}W&G<`zxwuY@9zGh>C5lqqz#7^j@!f0?`eF?*-_hH;#S#;+**!uyC zo1I-bT`EuzTFKxn3dwQx^;HJxYO}kHxhSK2CZ{jp`~e6*^&$GkzoN%NxZKaH(}4G2 zxE{Ev3|0-Lj<#~}i@jm1dMBZ%_D7*}I48XYh+1YJmuNY;meX@t>@f5)q4`6Zdt=~5 z&rVRWEB6wg=J;;F1a}Wx&(u7~3i~W7qZKSf9j9Ue(v73%k(5Eki(Q`95KQ{lrF+}k zq!OLxyiQaTD}V6c4PWpG=e=jFE1-F=9L~iZU(U8rBT)55Oh3oJ)}9RV=;(K&0AizW zp59{=u(ZqS#M_fO7kK=Z_R*7NFY7*>aJiQ(P=;)B||MaX{sULO%A zhVZ=3rgXrTAZ!x$4BKR~7@r*7xBFg%N}ogTHhj-aa>=>mkXm1-gv}d}x9BV$HT*^6 z^*>7!P~lxHa@KPZF}3o{2|kL5a{;Bc<7= z-}$H9In{rSd6Q-u9K&RkH5LJpfCL!UB^e!~^_<+hvVJu`rDhZkU7aKqo78Y-YZ+pB z_*g)AX&LmEG4;W5Lmdvx|h7`#SpILgo6%8~u!49Sf}}I6T#}-&o;5E=9e;d&FjelX$VLfqy5zx|7=_m(!yU0tcI zTcfv-{!{Ju7IUIHW&2e^r^!PMp9?#ABAJFHjvgZeCk9jZE{2Bi;cs!If)7om*|PC6 zI;cU|VmKglbic8sF}d}CW=CKH^HjQ-Jhgk9qoUD2*&F0@@*zUU#PwJ@KAoiAhb0>1 zlRCm!8j3szm3`ASXtqr~<^6aXc8cnu$5oQa$^SRI22aG`@2P1rt?LQzedur~h9FI; zPmPL9iOyl+5bM(5m3Gc?6GL`5y6K!O305V%d%MJ0addDHA>L`&JSgtU-u8QP>B`4V zgEh3yJ{Ub(8=+QeX%9^6+hwV$*hPa46hB8RFT1DtJcC4V48*(rfaycU)okS-{vP5+ zXA}VdxErA*ruvQ#ZyC{;_U$%T8N_?Ys*V`~@5gs0t1bHsoJ1XAW1i%rinBYr&b1>+K(f*J2feH4n5TLa+!QbjgrisXWxs1r42*nJl<5U|(re5?1K^oO?Hi8=mR#J*XXXsf)NphD3r~ zGK45C1wOK{MwFt%euRdHlM?~AkR9VLCDN#Eq&fp>_Pw9+DmMM@AeaCm54n(0uDi-b z>Qlc=6T7AJB3kFUMBHYw6Dexr(pI zRsqiUw`0g+!C^HJ_7!~xCd#ARwWO*bnzT_O`!IfKo)tW(+RaD*fUV-t0azmK1jEP zb4mAzI5jVeG?0+kLD+-uY!YA@he)lP$p;L>Tb>a8+GB(M74ywNbpZrgFdHVl>e~(i; zm`FDF`MSEh3hs2J(L~k5-qV(7YHHQ_WY8yCIFjgsh=}?1#iB_Q=MgWdQ8@n0SFDw> zQ&{9>icgPqf$p~#WxYq9&ii9x*zcDYpLZ|lI^r32St18GB04`gk*T}1_1g%KWgDg-LSW*h;`fAJh<+aGwbPxD$3})zx$t~_XQ-&qKG?f0T^_m)& ztFbo@VBjRpx{d#ecA&C-_+6YBH~*yvfqM~?6OaJe^`IKVewnX}epG%S@ul+_O#&}x(etRn1DJH1$lH@vQhJVa?T9&zNry5mfbMuem>*s?M z%^yv2m?QyIeCpI)e1d%yzgl+D{exCYyg~v^^+~Y(goaglm`+OGO@r{I2y3P^IoIi( z%5-fe@9$k={%&uHJ1Ut>_+8oYPQH=`4vmPl!M&WlfNBns&m^$7NvluFukj@t8%rHM z>e7C6Tkj}qf6aZ|a7asR#Qt_pfy#KNbz+kvATWAeMK{~BS|aUC^t@M(^0BNQwi`w> zbERk5R3ezOpKYVpHCaINm@8CibxW#D!Ifg^i|vD5orBTpT*wyp(h)RYVvn@k0X>z~ z_DeGMHKvoMn^(=^f|=0LZA@2*lQHfCJ$Kjy!e>6~1@kmhq#+92Wtd2wevryH0@ij= zU{C^Bl;ob)v(nrx$EAYYWja*6#6Kb=U*EdPmRv(jtUE*n%~!Dh86v=Qr5?NK^Uie?|dnUIv3*7$rxmr=^F9b zhTHE}ecxDhvm=wXT_av=jI>M);kntuok~PypI|a^R=7vMc7m`0uu%ijRRC#AA%Kls@fLJISJw^s<{kzw1(E}GUY9o_kz}-qMe>+Th&U0lS3htde~hh_>BNHhjscakBc1a#B8a)yw1y zA%f$%P`SQvtea4Mv6tRZ+P2jzXUpn>CAaB~<>CQ2+TM-`yZ*30q1L|I%-mH5ii{GX zou!E(#kDd3>GiYk@VuZI)$O0($U7D_UNh;n)aYHb z(v^cp0%VwGM|5K2V?-!+jwNnIGi}h}w2$<1+jY2%qI0fECyqmi$>b3~#rrvDqAw#u)BiCZox3PNn5{PXr_W^FBK;hE3iJ{aKe3 zypeQy@IJ?6S)b>YZo}=+*c3+}>uvqxUvve?j^OaR!}}r#`HtA*N~ij>yB&GU`1a`~ zVCiM4#Gb^HL(#70&I6wvj=sHC;#QSFv-1GetVo;ch@J!IZ%tiA7)*7~#kNs`03?q? zFbt#F-jt0=4UN&qiYyy|x2s4p57&A_>8H1)U~iSvHboP|C`HA32=&!rZD6FT0qf*c z`VgJG=ZQs-V^>t>3lUC$7}xEhelTD&FECSE9pPtXuKE1SWZ%b)@4B75nlw!B0_+oj zN%-Ae?_a*KsTFi|xHU?T@C)PlCMusPBK&l$G(0ePYD(vCVAhdRwc3> z-D?MWd?BecTWgFzeIE7fQ>D1O1C21jQv0$nEucbyVc}!v!q5hTx1JD9SfA$;+U zx$-h2EX}aNo^}#h8FQZl*0cEx(Z{Q{4nkgAx1Cn~n}Zo86Bta0_vTX*__B9RC43CD zU$&=eHz*Yub*v2?@QQw6Ucr%VXXRCqU&627Fum})Y0{EG#c>04Co}cH4WndKUHK?% zRh8oNr&bB(kP6-+IoFw;%DW2K){%9z3LD3)p;I}|%Fz$FOe-ZN`Aq|-1lln`C+_o=0_2h zag6eiR=i~1VibPr<=3-f0!`H)@kCaqU2>a1c6{}<)Zb#9D1C#bkg4k&I8e6Pq{h|? z%VhOP^2R$%GbESs>j`QUAXvU%ue=1Na0WVofs(-}zUgIpT93{3k0wWt*G`9MdS|}{ z+&Ymh-AZJ#8%k}~;l8UMd9SdJ5&g|=&3#K-LMnVZUtoukv@vOa3!nuNbeS{Zjxg9{ zm-4TKr7s-g?H+RX>$6r$YgiwPJ-VXj4UcTLD0Nlrl*x^jHRRz9?a3RletZqWN z2cho9;$wZ!i+>YnZy09*nc$H6b{3dgt8No!-l%rQ)iNHKnBGn?$VfHp4mBuo#7P*; z0$E6%WnlFogI^{4TGbDeIK`W4k-nNK&qEmZuIGR2<3=&W~F(z9u}hh#ZLc)RjJkM%P8vgquz`pAZVLwYP#RE*6v%bXL!i z8D>2hR?FSLYfz0sTllRma}V#TO3(JST~25B+>Gpp+&sejP0=g8BsKVqux7&JGP1r% zPAJK07CQ49+IGK{Z%y7^)~#YtE#AoBJ4~l_=ml2DGv8LCNJe_@n1-H!E)go*y%cNt zl+`Hb5Jm4dOD#{&zJJaLtT$X0793-75@S#r`v%+oP4wmF1h~ZNm7OWDQkwIwbmts} z5vxv(LNL6yV%pUL3v}FxPzuG)Y(DgGnXY}002f#dHT?SKQD~c0{0RiVW!AfP3vXu^ z!8(H7kidk}7kzSh9sX%*(a&#g@s4w*=%7Q=LGP6l4w$|xoLD6$Xh&^Qh@O3|k$3rH zf9i!t9i*AgATXKZ_dKDfd zS(pY}q$H-y=Qrte2HLL$bC(MJe9BQmDqKd~0S04~=88kclrkEk!9J}zSo3>J#GC`e z@+s>nrc`V3g;x(_;4i~IQ|*vPHAqdzVq0;vN!iv&Qs+RICzE$b&w}AKms@nP^H<8B zZNY~-#6tX!lNr*iivg_*zsp<9{{B88O2;(ov(TScJ_sl&waa78isqXaD0`N=TF7(u zq^%rR)u+fTD}yTOm3@b@>a8sp4iYBib&uFYiZNP7MI&lM+tWA>cF(i<%Qks(n8TcB zD>?ly|#@E?yiRoNhrJ^9_iwI$;e=HJxdz`QVYdETzBB`DkF zITozl(C0QagwAjc=KK;tl5C5BCRp=sZ%j3}1*P`9$wnA1a5e1F)yB!m$psKPeR^Yi z#&4d`>F)L!K6JZ&YddMrNY``i)UjFeRfl!IovLN#N29(~2D+2l{RgWLhrogAv2wnz zW+#)YNqT6mGw)!3p?R_HT{(#bjF?TX=cA7eQjqa$bY-lBTK@p1=O*U0eG~sS^s%^j z{^Md~$4*pvVqRf6+XPq)u&nRMG=R<=rr$d|5`dcO!Fr640v3Dr9we&?(RdEw-W@uk zERn6-c#^be<<36@9&J(+r51T5%!)QE&I3~$*dH&@BK$O{#k9TR5Dg(M0>#GL2kM0M zs1U**>xL%8ps&A!4a0SYmlyb%y`St%jM^9d0rtg47DY21*jazeV!Z&;vdKk zQa=HQsdC>=;N}lu#YW1K1dszY3gzN?9^X8KTNS|7u>V7q9cT!_eg)WqD|_ z?x>89;MF6+oG%bXOG;wh{%i8E@eD8hPInG)YjjW?i!wBr_pR|R^ihY1`GH2d%P|kE zZnrn?BdymgUK8X3+L8`7=F&|Bm(*+seNkxawnS`{=Z6^zt{Ixt90&)QJ%K7(hpSYr zl67`TQQC-BO>2&TvhY2d$AI1jHRoJ@OKB;Td*uhWg~N~C1LBvb>)kWXHX}_wrXCU>9o@gu#GeNh(`VzbUo97F7mzD1B2G2I!BL3 z(hrY{aOr#OksUT_$}}9ALhO6Z2|%)rVLyIx0VyH=$K5kRLviSOJ#W!4XpykUO2`Rpi=$ZwI2yPiSoF&&k%&)f40V%A}r)_P4!$7;L>gC9pjANb@gOb;*=0 z*V7G3zFXfYkVp%csJopXpa=P>IW0CvuI;{(@5*Fd$v&EjpbrdFG90-IDM?v6fF*{{ z2VlxcjZ&Z)4w7AboL!2N&_k2q&3vS{cQg(PQ7r!8>3yk{Jv)2qn7h9j!1|z3=B$^} zld$XfhpFaiTAG`G9s5yIIs=t^5U}BJSzOHN-(lTQotmq!rC^V7FQ-)cI|CW) zNsN4egL9;YT;KURG!^d8^^Jk|ZKNMt+<|QbF70qE4Dp$!*QUF#UUk=K(qoIFQE#+Y z+%R>O?~~hXQ_Ih6slXEL8#dyhe$L;u3-Aa`9-C3%WH zhr}9o;o<+%4Xhn@P3{qw^4CZ=Li+7<___YB?@a)P#?10dmLT8BX*~x0BOiO~DMiIQ z7@rE2SHz(D_KXVjbL^SnfLKlCtn~u19UN4^|A(rr`ck1iw zHQ~L;1zg<5#lPy$(>rh=G#!UtQR}Ne`APi*evL@4_)E9O^MP`KNib6;Uh>92aLQMS zH$LXPa^(u)+tfC#m{F3yzYGWr%mXGRFV4u&jWYiIt(?4k9*~ZS3G8@ylGlb8r`;&v z%AeSuEwOShtVJ{|kJP3*)qBthUjF-vT)_-nXuc5kijoZZ-4s-#V+k+n?FaoCh@}>2 z*8G%mu71oR!lR|tU`-9g)vgDXQ*~Q3l3cAeD{<6q5tx;dpeHHT!j#C~|BF-j4u9?J z9JOAami7R9g)GD7bj)H0x39#C=jHTF8GoW$X_@Md;zG^)>>8i=zg>jc)GcwxqHp-- z4rk7~?(uB%Xoqu<0k5UX%lsXcgY8+upzhr>as2vl@lBJ{WQxO6Q}G`QyAaQ*Ma$ww z$v*p#7iEB-stY3Z5TOywCM7p=GmfxpcOHH_?$2-IcD70ByVtf&sMdR-|+RN@WarZ0W_~=w7p=lknupJR#B& zL}KSV{2i-8KC-5!W}NQPB{A*06&iIuivF=h;NcV-Acsbj%+Z`W?p9`dDmk4vIy6na zWjn5awpk)8iNQ2q$0;21c;kEFgQ&y&4y11(j|6BZ_GPq2*NR%)s8rsX1^lJ|eu7hUQ#*T2SX@xn%vl}12}g|Y^A$f#ni+Gcz3Y)s9_`7me2fC_ zR}-KU|UQOt^}A1(U~ zkm;J3${Zqf=QTEOZrfMq!Pi)L_f&Ay2-sIG{cJS=*D-jYqq-Tglmmh*MP-3D20%TN z;-f@>l;)$byv5-^2cTm~N25tBiq!6*afiJI)RQsVv4D;8 zxrIeVMMdeDKD%@Jrqj1QcJ>n1@KQbw?Ys-=XH8F42J9#3oHL#u zg>~kl__W4OR#|$Vzn=Wrv-Pq5b4V@!IWZ>u9FvXXr!c3C!rNPKVkQglYonCgS9{ku za>XHc>n9&bdOR4d{)-v=NfDKjGCer>!`K!P6iMyW*K=b$yLqj^&_zbeEnLi7rR-qW zAmzt{hUJe#IX0hK27l$qC@baKyxxSB)-F&b(=yj=(f4EYRy8(L;L?f3wL=`_L7U8E zGfRxKZ1@88v+4AEgn8OC3bs8fAmnz1>>?snd3nM0qv^du#x7a5)*4o+eQsT%?5P2R z`>bWVDw`IuQ+6`%l2Losl$eBTLb3C=p*L?ra~##7*>$b-e(V#@Wp>)&Ze^t?YCsy; zjK!@sKVcBnL0Ft9@(`>LKI?E17E&hC!6tSm#6VY#GtG|h3BQZi&8?f4ItIF2oKblaW5-pmNLsMB zJ_eK=za_-Ykgx7c<=*jof~V$~D=RC@&CLy65dhmTQzsorUA1JBdewol?G!XP6M?E{ zd*au2nUd|1YD3UU;X0GKXACXz1nwlw{BathuwASE(Pbo?ant-DhLojXi*Y9d+EXjz(YR1lHy{^65$?0rsxUH-Z3K)2y10&frVvt z4#Y%X;-)6g*qTJD68(aWUR3~dLnc#My7RjE+MwHTN2xl_;qvRr)fMqQ=N9>8bgv5O zkSlo3GBz)e{$%T$H6v`b^Q!rEd&rx+9+lw%zndj)j0sOIES5s3`NnN;8nj0!yv`I5 z**_f$xR@gTpnd(eB-$rB$?d>CYBry5FkY^FhEn5kW`%d5WI#h^qy6)NAa}rL5>%5x zi-FIUZ@=%GbP1^^Tz4nsVxb0%%06GVpPDSWu7zPeSby_RU1+`3S1jIuKN6lrGb-S7 zb8-o5uW*4(jg$yFJwKPac85e^e=DHgs-(+fY8|kCxt~#kp04#e;>b$fg%h)udKVuMpPg==R5)bJv6A zOAQSTW8*$!+i`JU<-TuIQr~3oqe=6<;SFH*jfV9is@UCE)rK|pYHzq~EyF$)&(&n! zm#5i>K+%kCD^XE#t|MoHyC zpoHMI=akSD;Su>8+Qt-7j(RmowPEjd4*e?h_ui}mo{sK(%poD}yp}I@baeFfy9k}a z5jyoZwIj`$?>PzROch=*wfuq*6gfFP!JyOE!y~oc1%7F8e5E~cBLC5C+SdJ)km>~O zL+5yiW(I$n;juAlYUe+9R-QNvC#1-98JBQ;^9%9={}>s?dG~&8P1hzT(_{6Ud-es6=m?3KX)!e;@d;~- zNQ_P!g7(&pkfQjmKkZsDIu>U3T3S$-ZACK!1B3GNa+K{&z*zgD;|0Q!M;Ln&7vxfQ_bJE*3D4qBG{>kedwVwS>?Z&fT zgUlKr?xI%=3s%R@HhNs+Subss&#q82dP$lzrc{L0BahIP*Xq@rAP|V9CHwz53_s>{ zX^|f7M`gP;)14d4SN@l{slf6t9zLS@gf!4ev-fP!gLaS&<)M&v#%yS(5mx1s6_wIN z5u2EtL@)=`w|4EAH*sL6#v>|YnkS16WT~+>#VLxRjY>BwDMPBfKr6kpHorQY~L)8 z5^dAAj^`3nr$=Z!ib9KPiT9kINJvO9NqTY!&q7cRf_5{E|k7c+QD|4=58L#y7R>xw+>DVBm0%r=LZX48C*#d zi;#cil6aK#7bnf_VnTT!Ea|6=&*1>FLevYgno!*bJh(TL-TJKVK?N4jF9oz`X|GjT zBRPb=(pDA}gscOigs@%$|C&W-3r7XH;#&-Eaitv;93mT|cf>|~&Z=vtI4Zx$mz!c2 z79LV?%MuFw;73V&1_rCoDcydP@`gIaZ;S1NM5krBe)Cn>8&{$F#%Gw z;LOh5p-N1&>#vI&9@_Qwb@88gxvN^-djk-abiA1Bjd&;;EU(41^1}Z1wxmWL9=`Ke z9fRB`JV*J)ix}9S%ieP*C8@~L(#7$;MvB7^@Q5*|BllKtieKbnRZYREGt5A_>HF?` zWMiY>u);EJ+-O>~Gf{B5nz>%RP}{tz-|T6hOwvejN`x?@7vsesf;x|5{d(H$ppVBQ z(WdsBs|>e10QKnU3ytt(9I88gHRZcEduvDO)StMqGH0drFL0pqq0{}8H0{8lM(5PN zovD}Jg)KX;>-(C0aG2ora=3Ju)R$Jmw)A(0`6}&_>-)YC$c+ca;OiC76ieXtoKIB= z_PCGEDbemh4TY3_!|Ov{C*1`^``w?G+54k!8H^Qr0XA9;#E^0z3=Fn+$s;Em;DG*7}CgAT6MO_xo?jJozNkvPM8j?DY0r&p~1@w>MAd7ejoH)Aaz zuxOiMIoCavU>=EG{}%qdMw%H>=D1Ij2TP|6z=i%x0OyKb3*Rlwa?|6hRlx_b1}eU; zPp0><(-)O}D)VUnbY;o^Rpagx_HtWobILgB&Y2gktcZc9>?q2TFTRrL93x3$_RqZY zllPKyZG@nfOR(d&xJ`D*ovge82ZL5;MDjYIJwrjbK6=PBdIsQ`e$NY zM>M_v;BV^A3TclU@P>;!R^pb&iSbs*=vx{YWRvR%n;I8l5h$|iS5cFc3VIVOFffuP z_S2te)OwIjrGuShRNNS@L;czSIOk&CRdpNJ@`$fEe9B7QSvui9<5;{-sAhjF^0Srw z&`ENRvHb!0fS3w|LJkGm@OaMNkZ)uiF>|@wa8DR4 zzS%OEpopdWmbltbaILKOR+j7MUdaf{zQ9;f<6H~U31#cWyWY9jMOra+nhm~5rG3;^ zKPg#YVPv!Ht;)$d$H)B@*?|Zky9xjx;!<`Tb1h`P+W>2J?XNV)j9}5rtwHmEfNbA8 z0j$6UEol#4`(m-9`%-;klF=>+4=XnREa%}m;6AytOK=sNh&z{^Muix?TpkRLJI2{{ z{^K8jv3;u0m{L;pvervO3Z0t+*xeo<&4R`wUVvhMIDAwWxF$A$6`k;++|y8^Hv_FV zTA|BYjS_CRSL-BGSf`7O1RkZ?KXYz;bp}7iqVGVtE^4_n>FLMMe+k4+AD9L3qREVe zj9gCmN5JH@hR%{H+?I~}5OU`ZYKHo=8zc*#DIsDc3 zeGxT!DfB|N%U1RjizLGRCD=QJo;S#{O%%?Z)Ialth3K>s;q8ehdfrV0cMVjMr+|-i zHXc2v#M@mI%j%KZ=<2y1%v$@p%dHrgW;O!MIQH~te$U7ncV*zLNHIL79j#ZYsVo;- z7vy3&=sYxs(dv`Y8fg)%Wn-&_870&N6-$r`LGR);%lBNi7MfYqRnm?O3vl^IpHOex z>OM4MdtUS%U7WpQJg^6@^$7W=IvV^HZQc-}Fhs~Bp1||Mw#wP0+wqB9f+eyz-hf1x zm-Dto!MjJ{nmA8CSPx5DoQpa+>Wnq&NIZUTY3P`Mg$Y6Q5GMz?+4Ho%S2{lkPSUudqd#Fy}PS0o>F^HhoPaX?NC3h&_aI0AXuAiu`8eb^7Sqz-(Y)|0FyTbs+2&wNqZMT7rmd#aDhOTnDuBv?NtmJ?OEz;LE$sYz1zXL0 zunr0^uIJa)uuhLEay!eeHp|J=Znq5drY#ujV)^HON+EXEPH#s9?sW#Y{G{xSL$N{@ zTu)l1dis#{JNrZW8(?|yR4}Vi!=I2j=lS={va^fID-4$Mxt{Xob3vm|x(*LTN5H~o z9|sz%%*XPgV&M-)c4Dfg)S9&76EN&plA@5E?b+&mlOFZmw3rj!cl$S#-@s|i;vyYq zQ4tlq&Xc><$WVc9G%^CK?|q=c_R#zWzhmDWylR?nZ#3Q*)p-?^&i? z->dS)74%gVmlRG$4zExP$2-~O<$F>Wj&F&tZ!=6=*Gv4`wmn;~pDUd6d#^(qU~(=0t7n|vG_woDv{M)bWqQ+TOL?O!TRbD3RU z%dLK{l@+@;i=03uxe7^NHB~k0KPaz~yoyq+N~%Ei>Y(i>CO#p%VSADu@IG5hds}<^ zEF9WGn_k0v7(TyRPvy7$C6jh5=V!js;if(@4%t`_;@b3jiT_N9gBJJ1`M@WaV?iv9 zT`B9eQ{c9QESWrW={S>(vmYDl>*{3dfG)fClRw4N4Uut=&I%&b*`FmN528wM|LizrV02 z1flDG**l9#`J9_jDxErH|8r;bJ7y*HiS0n;!jTM`rzRW1$Y{#@Xv0TKAg$Iqe`Dq; zd#&M_z3v=o$`;?3?FP=rDGf=C8Z|3mGrVKJKN!41xFWZ+CVcc0Re4~NLUIRkHx(Pqne?Xp?cYkF2>+@MQ3CY z`FAS%ff*%!_cnN=#TZoZU&K`WkB<`?z7{Gk7q29g9aYv|#9RbBpJir?`4aLme2}4WCJ!I%Zju`uxXq<5c`-<@f3ZWbQ%pK$q@Og9c}C&r|Gt z%A8jLeS@QTy3|0gqIde6rDo*A^D6z76R}D!B`|fht4TIBhK@#aCtfwDx1pB#Ef30G z%u_-S zKcw^iQP#HQ^2f%6CRL>wGOLJy`!(PLoay_0LK3U1(7boEEyW2chgtb3QDa;TzaO01 z^CVl-oxb<T0q=q!R!msON!R{yf4ERQJwyckN-^`RNfA3GPTf<}8 ztpl}>t2|E&`XR8-sPJJKO}Lg@f35TIbG3lwgBiwTi=R&kd4M%7A=6w1RQv*Js7Zym zMIt~xrH2vm^gO@}#8*2m{ExClt2r9)h91t<)F@O7L4o>CtDlFgPJ4Y7+K%aU5J^PD zzcVyfhWB!5P>3BKkL4r2au38)^-N*a367ciALwDOpvbnVVZ0jM6<451XKBqP3W?c8 z%Teo{#j5oXOvOOqK}(D^C^Pi8OQrIa#cbTzZGy4y+ig8DBX6@`i#R6x0}{K|d3)00 z;m|j!pzC5s%b${<{qIRBq7fAgY87vq7PA-q&UhDt98HmQddQ zad%0{?z7BC0eoJWvz|D+{lI@5#dRx0ar3MDjeXSXT-{e`W#6J~QtE~wUe~oZLjN+` z2lvJ}e@jwI!9=$Vt!g6`kMRkJ`P|4dOfa&HpH%V-Omz-ydvEe{ zzzCeg0DTTeWk}J$5zl{khmlsjuS{LvzHi>nbKI36X*^I?^Ln@K-g$Sd9HDq3^IJnA zr?vqI3dSfd&9B1#{QbOR|k>^2L^^BGp|f9-18*V8M-Wl?E89b&w}&X z=yVfjw`0lFDC&8Y>LztFlkxMjy%v{O4;bAOH!9RWRyMQo@0PUeE;Y}(DK7mQ`Sfux zLy2uXxZyA`U_>>-tjb1PDghLM5U_B2YN@|okKvG|a)hv}tk+$>w680xXMMm#HR`7J zhAAl7u1`%tq1#j4aA-ohb=u*3G>4kLzCM9c;~#6!v_9vThvl%S`H!bdsSn}-1rxxC z&LA+w*I;?j$87I`5S{p+J8Aa7C|a)s?k!o_uD=hTcnZFDvy7jxJ+ zgYW3~6UcBHA}?nmjzQ)3gX`{MV6@OSf`DZAWU=S7`c7FL+G}L8PhhX^?Yp+^X)qt8 zfKf<#vG$I~x+@hfbnG2aBLjKw%m5F%e8KmDa~aP@=n6Ua05iA5?BSsC!mMm?xI`d5 z!;MLXsDUb1QbXhSXD=E(;H_hZ^S+mgYmy&^^!xt#cV|xzEQl=mp_b)@28BXVrt?{U zM{dxQs+3W(F!tJ)!VXi77Fsgzl*{gT7~a-%3VFZqizuflA!>)JJ2>G;z(iqtAMVvfMry{h40ULD_L^a*oH|9QRcXl-0*2O)cPHtEgNT>iT8mMgYy)Mtx?cxHaLVf|3j}?_wXq% zh^)K_tyhs)G6iolGI~fIHD-QTm4Vn>G@ZpA^p?pF_JgO+pT@QKK#he%fd5VNU9TS? zvBK%##W%jyn#yc}y*ugFDLmdPF>;XS<{o`t#|qYrUo;oyop~Oe4F+uFa|mzjQaZAlj5zTqubfXFJJ-c0ZKm9jFX|T1 zWc7Oz6S)bs$BxtU=p^2A_Xi4cv^kF#AcWAon81D8*vlgC(N2Pom+8T?>>aGH@#V3F zqbX_N+eBAp`#6%c1Zca7ci&M+y4D>5Uq%hR1p;W!5uPxkzYavo=)E0fEcStRxrkf_Vq;>NegpHYdTLSHn!F70b z|IuVBwDUC9YhUlE_Tm=4S|Ef_8xMfl&$VE}T)XhNhDmD$%~qMFhCzETygl?>xu9jZ zBS%FI<{tpTla5{(SgIOHr?KZqbd|@ATGXQ-FAv;&t{CdkZdLhryHlX=Hta&EB(h(jxm5bXSUQnw>@y?^=;354t`vHDU& z-W%V^OEQ;fD)Lo5;WUXZ$+q=#&$e;jcB)Ue0!O`_S^4Y=UZCGpIxE`pko;@H5&*_z zfw?Vlhy{xpXH>}F1nu{iQ{9>-^fzjHXTPnEnvPyMT)S0FVC99wZsTl`HOD`Oz4lU? z4f&y)6b7>r445jz_dtN;is)GCYTNO3g2-Mb{fL!T%$Sa&DoXVE;(=|$6G#I;3(NcU za(NVAD2(s5mttZ&J`LlV0!2oDZbuj6h|aeUk_NO~&}BU)?IFTA}X@#$D3E>3s3#?1ULpM@yw)Ie_BRzRm%PWYwAVF=y(0mV&QFUwN8KTS_vVzJ@*D8p!b$q#INMy(&*2pw5!gu*V@~ z;Q2@(1*lYJnCJ1@RyJUTKKS(bDrsk&j5Qg@m;4-qp?90Y60$``C6BE@Ztv3&4RReo z(*HL81v7^#)^mH@(advlAo&C<%FT3XUf*QcBwdtb53M+Q zcE0yjYX7ZHjUTcrGH2bk_5^}=Ezh;n#CrE@VlfPLdh>4@EVJhI&3k_8S*$-Gg`epQ z%iP_z;ElUt>AE5PeTF|>f{#~HIpXE4RHBlnDATp9)9oEUZXMB|-TY8OZ#trXs%Y*q z^qG9*h5nQV%vQ3c>w-x}z|BUQa4$H(rs2=?9HEHnH?2=NGyItRwYQatUEIkj;J4?^ za|dS<(&0Elow7DDPN~s$P2`GLGG&?U{GSH(&zyMlp!~n9o+mZhp9rU2pZ4q+7@TOt za;IvTNH(>@tyhzXxTiV`{LWtYxx71OHO=uCYZ-7isG{LSL1+ZbCmluu00jXTD!w2` zum3IP(Oo(!l5Bg*eA_WL(6|>OlIb<635B|G3_AttJR9McxOOh7N?dJqFBQhkn0F4; z_4C}uk8{0m&9WG2%HK+UL&{z2Qh2dkxD+$pleg*=TtI7j7MiVFiqwYj52rg4HdW6% zCMr_jK(r!XHDd+TBge7IS=cT;3!7?i$NWi(HvIoY!?5|lc~Dhv;``#GSXwiU_Yt2~|gasUYTKJ`ic_8k)G8U*v06L`N zyk~>uQQmM5b*krhozGeC$bVVHB(S4F&B%j?Jo{VI!=!pgNUD^K>_*6Ks20U`X54!B zRbJ!wemw7?*#OgAy;hID$cb?nqZN1DI8*o5AkIh&dK4L+v{%%A^-$(V=?>eywOYm(wt&c<#-sKQ+qEpMylPdvQg&wyJ zp7v_}m%I4dviFKb0Z5AY$QW~G8{@GLtB&oFmmutR!OJ*Wr9CjW%>UYtE?gK&`zPa( zJA!`(eWdpT=^Q;9Fgvv4{4Zy*bT46OJkGauf_dxV8Qx)c`x$vm%0XOrax(z3uW}Nt zu^mzZ|Fv^HH2iEu-Stbnqxo(K@(MXG@m(X|oxl9kT=w0se$lIZxNarh=(TNtbN*l_mmV^bp?foBqFa zD}T`X3KD*)33fA(>R)lnew>HYWCF0%KhrSapuK&9*+&sYBMgrCiWGRxzYe@RLVplS z@?(85F|qBLdb5(tSH${NUB{f`gR#`YSqKdVK15RPM|!J~zf$=Bz!*VawVcgiI=Jqa zo2*rUIY1G08k-8@(fAXO6kp#at+waC52ySJv9PqPBs!)3kl9~+C~{xhNK;S#8`Gu# zfkZp#av1j8SCU8=(^_M~DKLp}Sfu*R#Dx{|=HdUmQbnVK*i{<}CgAa}IT8pDe?Pd5 zZxwKU91)fXhC>|@bn{U09G&v^SB^w&d7w|Qrl2oYLMsH z6E%&*pWY?7Cj{xr&xzmF@rMLD)ZmPIrsYz-+CVjsrEvr#JC_6v?vkZKah>+yAu&D*;XcUo$#KQ{fie|g@uJ*{a@{US6EY9*RCBE5f$kITTzhS zLnjuBQbnXUsR1E$LJulUq^a~8=_lxZd8uzH$imm^AG)e2LkFB(htdC$6LGq8=J2tCJ>cl6&Dq{Y ztA9aOdmfD*+AlPZkgmp-@(_oc0k!;PhHMSF6WON@stU`@b`zy{^y_wwMDV5AgyH!= z>*@DcisBvKjS&t3z&$+jK1`a8Ohqnz~D<3Fi*9q=tnXp3D)5`(e%7nV>(V>T{tBBi5li!~n z-p&D5qp-5akTz#1f%_}7q5dOlpyLz}Xz^12Q&G|O`gHSh5iP^9S=sV~eX2=8F6bux zaTWsP?LtS&hqlE>PrVfVk6SDcL^@en0V&ykt&(erk2QB+uJb@d*l)x0PqrHrLJF;GBCLLA5XR(37MM|hVKXR{`1K+`v_s6t04A1jopg!FO+@T zS%|bPd@@f45O3Sv{d8wI2#ESSv)^uf%<+aD71?Hs75l#NrBQK6(IvS z5uJ4;g+O?H>nDigGBmgP#1VIB8wEs=2F`rQ%b zTf zf%Wwdd>+*4RjeOJP)8qU-~(KWm!o7bIAo~EMPK9Tke{_Ys6D7?_N$9kkGvIB{DW2F zvui*SlU0f26jg?OotRyqtFiwhOG;(#+$}1L^%Xevlu8UZJYXt*{!5_y0TASJ93M^< zZeMwh>)TI0^lzQrze$uIQZ5^rM~JV%S#jn~G??9joWFYf}Qw z%4%wA78Y#(G2sE0p&#k6xYy(IaKuw|=AP*UQ+bqSJ@IyeMyX}C&u4BnOqkWqNTjT- zIakqRc5zLSx=al(>s?}n8X*hjJd%#R#zXNf#^%-}7$<`ybAJAIV7W?Fu2*Eox4K4k zDfrH`36L4_jnhrxQwz)NCbOIcqBj#2JfVN)&+~bUI+LOA+rZVTquvX%98UY+b33ly zA-2~)_l`g8@KEwyg&>%bV#1Yxug?(Sl~&2{TYdLbT)mUM_dlGT?_mo| zj>F{U9eeP9ANte8_qN71Q(w2Va(p(Ly-x6cb(^F*rF=QGmMMRfiR)IYHfNhhUBz#g zMTZoc%=K=Qy|61!7PBzd?QdHZa7n(!Je;rCD!6i~QWspid$J|^rU}%ld-1d=rIKT= zZPhc;>?8cQ(z=;PCYkNBh2}j>Uyx%vaKnC*pRwh&m?;K|i+`1n#t8+@$2!*8FfrhG zw55i^rv7hFJF_46WeBAqZ*vZ4tGnOmEMZLFr)lfyEa!?7{%sg0RF-Z1HsR5a98aI_p*W0sHdG!DUsdp)HoDvNWyi@jx>A1dBhjifxC z`A9pP;$Z(wF+J&S*@c^L?>ezKwdg*+Ci?+(YZ*T8wxai?3dXhEDWtQs?Oy5mDV~Q4 zgN~UO5EY%Ca4DY=S4fs_mLqn$hUp$&=$DZQi_^_8?r&x6gMe~uZMgR}CAF%Q<=lwt zJ@qNWgbQXl=_erUwa8DefTSES;~HP?nmm7kegBZqnK4(Yx zu9&$xZZSQcezn2tF7u>SPDeMLpeah)D54dC?zGmo-YN)M^|MbfT|H3nEFE-Dzh*Dp z%*vo-uL|7u-%k&FKobIHjs6ou22-% zpWsLVpHHnQFx77X+0U-F21tXoORVCns=V5wzeF>z>Sj1IKP3~C({H1Vq5Kb7)!O8I zDCW{$f**b^7ym7A+j>8XpE^cHe!Z8^-$=YDng=!T)|F|wiU&{IYB_FIIEmnxQaP95 z#f!Eg$~l_0>GCV{EFxZGSoO&w!!%n7DGT69#rWKnTx)p^SVqAd1)?*7;p0{4D>%zp zS=!P4wfbl#?iMJhvd0cAl|#Bo=<`phBii|SF0o;ygB2M>34*mM+;yzd*&>^>N5>u567G<)S! zCgy0fav=c*f~`hsg}0_YPiu{jkCz79YaRwX4@5VtY7R!th_3N%pCn9~pyTUREVTxA zlfDirZ%*S~PA|d8baB zn;%t_xL43;QeS8iyE2?S+lzULYt}v~XL*BNjMLy%G;2xUJPn?Gc|#LHK9xfsmoZX0 zP5bxCsjwj?&GDY&@f&uWdU&#_yi*`hc~CPO2gW>Y78UgD>-mZanpkaGJIFZ&1Cyln zdeiuh?w1a(#4|CRrGp(4DW>xJ(P-I#VGj--^EU{yifR=NDoBN^^;}^ab^@|=8ZJEn zp*gg=rCXF=3R0bnnNt#;p@jL{bx)R zGFSrFPCM=7e9yYr8J=T9y8`vxwldyf==*eOJFl~*RM+g;_GJ<1Y+i)-%E4vk+vzD6 zkEVx1@s0W(Cij;3nZxRly+pXcS|tBD!c}qt@_dZiGo0Ge#@}^NR4^y`Ju|b92an~z zY6Vm|wa{HOn~lw^YO|&Feu9M(k9q4C-Niss8#_kKxW|o9Pp5U|GeqOGP*aq}+UeA0 zpQ_df%QvOFz>!~xAil}r6`D=F!9WOex^Dmexbu_P_wNnL z8>(WwOTwG413wlfL0r&Gui=9iK9?-QR`UIG|lI-qu72&-lmrp`Eu|sLbWpbfFd%^^8qZ5;)+n zjb6zQ7!v=twsb6qQ=BGk9@n(Y5P3MA?UI1CXDM$+^95+>NYHB8wnLtYYvHM>cGDb! zUY9PjfzI4^_k@N>ROI7PpHH7EuE`vCA)tcbc47o zVM@cZA^&s1bxX_T0hcp)ZL7KJzpaKSuBKt$hamW<(DM^@&0eWN{zQXlZ4mm^QgzpHY8i)=>pFKA2lb_qT^%BF?UU^_ac# zi3$6P)BNxH?F1OujTDZx-ML?T(n|ypHad+kZ>UeN@lg=EeXbUH8Yizp(9VG;k9ROY z4ioby;b|;{ubWQb0?%TpUm;GeZJq%4Twmim1vWYXZ#qR-*(b0rFA?S!319P^02`eo z=$sQcF<{}JC45cw^5-_a*1*lpy*ASl>VIJ=wB6Hv0upnIaPOsBq{eY$um%fDOR}=E zG;{*sdt{IPR_KJ^NjQk@Z{5KG=O@5nJ6+@1B_l;mlSDEpLH|+rFg}n0ZIR)rqo%c< zGPzy5>?sH@ELjVzLUXT@H?S!%8@mfvF8 zMC()Rbbqpo^>t56ptPvV*l6D4;Q%x$&G9HD4I`_?YUuM1o*lNm|2!VYU|vPH2)#lA z^?X4D(TipHBs6XGy+U%P@?cCQ6q&PP*DH#7Yzc~`>Um13adO%N>PUWvRF(~mI+&mK zX(MqGPzx7#fAUoGXlYu0{0cnn5@A&HPJ)xq9S`=W?Sl7j+dRitcuTksjV5`V>b}Pv zu^w8gZKU#yjVtFk39Ffb3Yr{KR-JhDF1Ic1!!_k}b8~H~!q;!c(m8SO-S3kIKeYzd4?jnM40Z|0tl1dlI9Ff4iMgN7W5c>_zY;lyS1vX%{L;? ztEFidzbNv;3(6G?$jmrpJS|+0p~5?rsU-2mWgqOQ)$T?%m*NaDH?}86OPcl$zZB5d z4C3iso4W-Kl!g>kL}fM-@loWh4wuKFbW~&kCY5`SM$=+I}YrTyo5@I<-kykR)Os!59^ESG zS3kkGWQMTSqvF=1mv+7v>veaS1l!!@jFT1tz@f;nsf%pNP_+C$Gze92=CFuB%MP1D zj4F1_%BB=f|80(NV}~SVOucs#s9iL>2qzsawBf} zR+XF>{b63Py54bg(17dwFjw6kpT*VjNLu?e%X>rKSF>jsC`FSKfOHnokWtax_NFG7 zg7f?K`BHUtu@y@PbTU25hf{y6|H(czcrcExs>ekRyS+p|q?RAraiQMTvucbZO((C7 zecX{v@_N3446#gMAJB@ygX~=jL`uAw7>2$5BZsJsH3bI^!@|-2TF#}_uToB;qYO{Q z8OfB8RNk;i*F)4@XcVSnq^ezd7j<>dL+KQ_;DM&NE)8sq9o|JIutP+Mo1zYf7#X={?kSxtV)SzDE;4u9W)iFi(4t(tbI5 zSa}e0hsyIx30P>nGupvuVMSe~_3Lh=6!o$L`gK9gnEC9Onn`+kbg9c`S{JfApT24~ z(0|zOsK0l+uu$71hZ6sg>6an$(^QJVTc#9LR|;{hJl%q06ja`(IbC>caR9;r$2x9a zi*IAjR^wsXh+nK@niq*0Kw5la<1$XJsHh-ozW^;c(lN3yns=_m7F9mFlhdYDHaL_G zjv6q9S0&%%TPE%LZfU9RDm^cOc2>TQuIfJA)jg7MU$0WzOs=Gb?22#}6y@0h=WFZ_ z`3H8-U7jXYbHhBS)zYR#k;gtnrb{^Y-!yksw)-^kW+UFLe=wHzHgv{A>aE@RXJn%t zs@<>!+_K@SiaFzEQ@{>oYl1Qvi|XfYm~XbV%1(PGPuRj8=W(SZHzIZ*&mKxMf+v^Wl3ryAl(O-N)Cdg0z zKC+dKt+xrk1q<3AS%HuIcXw8_llqXi107Wx8P=}McU{spY^@%%igAXS`z`1sA1Qv#mfPQn+%Z5y$(K5_FS-3Uw)H=4x{{>Qm~Pk@e#7OvhiVcb*DAs zRZaBujwZMlI&FOG)jV+wyL)=4@78JBWqCL(aVm9jxFAKBEwX~X=3u0rlKsKup1q-X zk8WuRF>5u2N| z0{gS6JL1*Z1j!8v>{{<_BAkI}Z0|bco7_t)U7G8Aa$ZV0qrGRS#-o`m*)s+zv@}#e zL(X5-HEckt`f|F@eFDQ|@Yw~srGuYxEsAV%$cl#-LDFpakw$E6g^XY2F3PTm*x_5*Dn)6{Cz{=ASJb*AU|ZsuFiA)is{>ZNS5J zWcZ7_pQ=9HT6-KYdlsv9X_wXZAV1Y=9?{M9ZC>PBRpbIl5*)os_q=j?%vK4{G+(6B zV$4UO<1W8a}#(qmQaQ(IcrNGn}n@8I@DSUFlpV_!zb9 zSRkO#Q!+s2 z>1*%wqB^^VL~+INm5tBQbhNY;+jW{>5M(n%8HOe71NicTxn76wX7)G`(de-FVvcc+ zucngJhgGzQeqhmUOMR%2`;mAX+beW=-smR?VDom|u+tr?Lp#)9b=*wsl;CTN z0G09mGHCwfIcRU2U5((aE9ChRQ?a-KvfwmxNBz+DBa56W?*T!z) znh%Hu(3TNym;+O#|Cmzh`Oc)lglN=IyCc>_k2mrnQE%JWE) z+ouI-s6!`dq00aZc}$;0f~6KXhU2|I0C69@$zuskhr4xD)T|Y{YYii93q#V_#t&Ux zJ?AB}v`%lIXRw+8NLq39K0V*%V3ZTj+rZUh~@eeg`v$5 zpEk~{wzS@nF^{x^M*4I#MnIMJIbFP*8b)7paFtEPOzX@Xly{y^R#5j4bk+St{Q=@7 zjdfyy%LTdaeEX2FJ$6y!g^^JSqbAJkQE!TG2PtTU&@LoMMn?i&P_NMd zgW6;-V+x~QA%OWsFa~~%kMcYO_hir9TBfS5kBhhZV;ORWVH#th!dT*RMi*{4N2Ow; zyC+j3B61c;85rh=7jjI0^s969eCkzSTG*>=CcR2e(w5K7#n|fWjOMuO)X(7h={JX> ztS*U4GLuj*GRLXxJ&|TU$F#^ZRDy!&y2EId9zQKuW+xK`}QmGGh zV``LF(ynp9mM0fn0fzVw5X3c2-WAtry7T< z7JB)&{upl!^^Ml^@?2%6MA98rkI^T*Qua@^o(ZKIr?xK;Dq*~(pD0iZw{DtZe1JZV}nDs&oAX3gGyB^U%5x%)n5-^}_T!>K23LDJhlck88o zS`S}x<>*LRP7|zZu6^NpD|8JPfLT0vSfbp9L<>vX(B0BC!aSlnYrHQaH)e=@x^Bg51X7EEAq=C zcK#L#vk(T)YgKxs4Z}M&Z=AI^4gi#gDTr^tj^AIbO$w*X1BJ7#Xfz?%jz9e zD;-0uQll&9ao2PIp1?74p}NYvxkQu7-#Z}T5=F+aSOhn4H2c5vTyNyKt5~nXbtEW&sB{Him|`RqlD()ClXUkMm|{6AWrR z6v%aPeMD}t$pB{~12vesIq;c?+){11RHKW>vb|@0%y$bM)D1!B&YEp_Q8GaKtIeq_ z+Aa-YSMICI;ie+@J@N)5S6F)N#8CQ-Z7D16KA5%<@z4ZnYU`@Wl3}$H&|Q4Eagrs@ zajx1?E$JnkW%*J(z&}&<{VC*964AQF2-s z5+C$%BCDie4=IV)oE$oleBXzhb#+Cdqkd>`5K|_8n=7ogN_H*3AlH^^T>aYYT~O}U zLDO}HjR$buNtXop{UbA%FrjI|(`-9q5fb&HV|IMsCUOlQ-g{7GcryCxq0!FRZ25R_ z$4AI1MhcMzx_ zFPppR7Y-ch?;k~weuupjIPuqw=M*@WRsy_lk=!{Zo_*gy=VB8>v~e?RrOQrg%-;H% zhh&x3R4i}9=L^z7n|iz*O$G==f_(wyu+y5-;zS@!jViBh__D~9h-|jLV(GvaEIypD z?~AoRu7C^NH6mIX1eFQ7dPw3Nj(mO);D< zZ~fk)P&qcjcrQ8!evUOhlVeKe_pJe-Vp_2Q7NTmR(iGZ*WF5RC8O{b|TC}mVo~#21 zh28`T&^2B>N>!xy15WN8_$wPW&L=K;!+0;YF53(0DYg_f=sAd4UzYdzKUp~ zm=n_YkBtXQO@?dEpYE)|D0UUTYA{pA%1)k}zczl|M>q&9Rw#S700nxx z*Zi>l6g+-<$aP5X2JYZG*Z@Nk-!s0Elp`PimcJlj-Q(+fU+`gI>VtPlYG_qy<5T z09W*PDRZb0(TS%BExd7yojM4}6=MHQx7f-P_jn6azo+QiQ`A#=8%sa=EG610ZK6E? zb-ySg^Ttfs&h;M9eg-U6yYF<|(S?7Iw6qV5Cq7hm42(8$SkBo=YRo?}Q zLB(tjG0_~O^TTA`spdO+>(HKWVT;dF9A(8wf^9mIDX9`fRLnH|Csy_bnfSPgO(Dn;AL;G z;M&zP&n+rgZ!6gu-x+WnviA>K4f(XOSezyh|F)1b-$6v2kIrGn?O~Fkj?CK{ zZa>q45#~gbG?|MOBn}NTl0#>IRbJ>%^k>&dG?OWK;T!xQ2oq4%ep!UK&2Du&OWeXy z4utp;Gu(eLq^wP9xHF<*)qA!c=Z@*5)7r`|Y;~7to8PT+-}af8|Jb~9b3x@!O5r(o1iE+z zZ$kb4@huK;8pEA9t<{Cjm4znGt&lKSx&p~7EF=?9Pe%M;o@{PHJ*_Cr#? zZ(sfAe$jhB2<0Cw+ps^N7Tn6uPxIp4YXVn)VdLT~RmZM9-mJK)s3%YSkB=1WQww6J z#=br&Q7yl7a?wNUC~Bm6sS!L1*G1D@CwE&PFO6DTp;bqpKIB}cQt7pkMs4f*eKT*S9~;$7-%OC~;ZoW@}T1nOkX3 z3-IiJ#u#0Vmq=4f*#F1UlmHNHBPZozF+Pz|D4t;nEj$>~C{ix79H3{V=c$p zgsDCnYsO@GP-JsBT@tdJGkjp!?a1Qag}Pzs=-JqNmzUuu<@FcA?=9k%*>DxH)p9_Y z&I;|)@j>zPv!Eg9<3rtz&O4yu3{r7iMB6dk@6nb??Ms^g5dSj!5;d;Fc|%#bprzRs_B( zgJVYgA4g_$e`q1z<2oGFWn&eZo76tWv9kziaXft3pD+t5Mqef;^{nET;5Rm3YCnIB z#*vE8G-cg>(cM414?j^`1!6t9D$6n4By~0fjoNQAT_G~sE{24#dAI&x@=Z*)I^h?= zjAROwI?LWgco=#&y(a1@e|3#m>+^JN%DB-~Lj(x{lS+#R9Daf5sGFF!cI4c?b4Rzv zwxE9S3bQ#s^b^bb1 zFfw2iGdoF1@3ACVifHbC7eFdPuKgtH`%@>Yk6V28q-sq<#QNq&Mis51#g&lN2xXhf;-%L7J$56%P7mu#n4#O%x$|aiNZ16=M+2?Y; z#lb>NgR10-9qmV4`7uFLAz7)bare;>X(kc7hQllLoL8(ItZbZyau;j2%i-Z!HaXWS zmmL72p%pGropZajOL48hUfExYiF_J761j^&T#MumOqkkT|{r~9);U}fu@l1eSqTtq$mF;+$T zPdE9MEa5%?LvNL%&9MC~A83zU1&qfTqen~Yc0kUVo>47Mvd{Q;RWF%}2->Z|lwbv? zhOL&bswF{#;aCL)X<5Zm+VHjsN5#BXjo16var&0q1) zNwX3S=D&R|Qy_k-uyIemLmqkFMpqDa-c^k!E8(i0DBFtGvs@dL1Jvx$O#vKJ^k;J5 zcVQ?H1MGa`0z|PQrv_Ip`y%OZ3S^)eqw+5IP3sM#pAL-gK3YqiJPvSxT4B&IvOqtt z2CvMgSyWG%D}&ljb>GQYFt)L_-oNOoCeg!=T0uoS9SYyxPq2c1i+9?v(ro~pyucKoVFgcvM}AQurUvy| zhug28aS(ULT2a#+aelR4v~+EP+-_zF6r%MsNVh@t!W5oF#BlUc5dDeU`MLQ~;Vn0$ zrZ{D)P*+=mN?Yd67*y5hVJK9x2Tfov$`x|P9SUtEwMWZ*TaipOhCb7$m%Z%B!B+=3 zZ>$*?7|(Z@GkwP<@E@fT46_tqEc$1Ok8q(Xb@nSP@TA17;qdGzEuEBq=#MAKks1jGO)uizLjW^lVloCJ&Q}yrqnbn)0nWg$L%;$ z=A^2rsnZ?_b|qZPeag*GR6`5ZpK+vQEE+9p9rzGqhVkTp&da&J-0*Pkr{WUlf~Kr3 zYR|SkR@e#&e2j_~Op-JAZF%<^aLI;poFy>YI!Ckw_B%qSn-gHIsrGo20Fv4kWAdty zuDtQ!L;1tuNB4D0@*%yA5AKYucbl0Y+i4C~&&knF`nrXSCo?g{Y^1C#D}4msMaTpW z2k}~83Lzmd-q$PH!%Duk$Bte7=?g${3=!%J9VvV$B0&7XSz3LMq3DHOyruzXr<;j( z*dLCQ^@%&<(e##f*G4Y0cVwM!U`6t^ zn4A0f_@v2^8df!m_rq;P-yg}Vd0zdn>AkV}y`kK%8@8VVRjm#|(uixm@0R|s+Rv)5 z_#&yi`tfjQ<<$g+#N}vBMsBx|!uOq17KDC51?~_$E9{_T+2L=M>xKSyQ=mMt$HSL( zr$}x)KcaoCW9d&BArtC+vc%~zt+|KRXM!`p<>+l8-BBqoL8|v7TM2Fy$3p+eXK>vy z3cgg;AVSH+;m(nAId=h>@|P5TLm8SLlI3J&bu6$RsmSYm_Cj}x=6_`bG7WdO$&j2Y zSK{0F)XFRaEtD<*Zm2>Azld`CmbEpT@oAN*`}5`CB-t@i&5t?DIYCpb9w>pUrUU3H zr_B7mtfBC&LUp?$9s#q_ZtyKTeG>PYy9OWeX-B-HIg?IqGk2II#44_q_FE}nM|Y#6 zqeJM$W6DR|xTe(C%e?Lc`)%rP$9l9(C0AzY>}fqEQ_xlDhDoGjS;w0pK}x$ya7L7sv%?(Kdu*aFv+9_$tsVfn<2Ab6bMledM^w%f%lvCVDa=sXVP6kDAbYhQ5$18u zZ?=Z0vCaBsNTIQ$m1b3A-5spM16MUQt@`NG^tACihuUL&qYrp~@IOT|vg}#n)8(s@ z<=NMCZ5Dm$)I+UR2YSy(y1g|zeIvc|F1KMx6BGkBVIZfgcCOv$qsToBq{#&KJv8!D z-H$(?>`{ZCY$LOw@5|^Yjw}J^jd-I6&n-Lh9nc{*^E3N0EaXXAYPMrtNVSD?R48nE zxUVrb+NW>DtLJm{`2l4+yj}fh1oe*iLF)>t#6l8$u>N<$=D{QoWl?eq=B;CWBRY)~ zTP?OOe^Tb?%wAKzyZ-8ZgX;FKqZa`hDXFVUr~V1C0uH|>QH<2VO0b()4-|ChVby4w ztO*qf8@JYjz7W-t8D|Spabhp(#S8zVS;N0KWb9-cSmP@0{289>fBdUrYg+@+9Pvjn|6mR~Joc zVJQK+hXw4n0bj@ahvM?BILnD1A*sM1E_qBnA&BVD#&7idBMXf^SQq z9p7lQ;NQ@E`?sSMzgVxmSFQxYV30(PmcPfptnp?2zxkk= zp$X}U6Lg!8CGS7`|DnJC+sEJI=^kJpxJNR&4+k#raFYdM5y#o-q(^ma*WT11x& zqPmdSS8w?w&Q-}tc2;1>nGpJ@*-uVmp@Wtn9$>pAzxQ8CIsm+GWRv`%2oLkXU8i33 zv2om17Tk+(%CW!jGtAH8C#6YE>+pj}{=*T%3T9;74S~i0W^1dW6%SPasj)*Wr4s1t z0kv*>7r5H|byBwKzc%1}JR(ROYjF-t%OX5!^^DVNpp12Iyzefo(^a`VhFM^s=<>gg z%N{Atl3+yII%++UB}!*UaYp!r8$Y%SwGSH zRy01sc3rU}5{N0`_*b3By9);m#r>AKJAfWFY$dN~-_R|2AbAsYY{cnW{I}`~{D*}Y zCW}+*(C$T3$kv*UO9Bc}QZw)gf!=V+{Po42^s8FI;#j%pD+Nu56?H%uP~SZ3xPNcs zH&i5mGksf9rYO6m=Yrmf; zRUgwv3)&?-fz!Kc>^>P*JSxf}1a!IP-%1U-J({HoVCGNR)*p)btiLmj(VH{&`VXk7 z+=BXXt{d`EFs&zmlodyqBA9{Fegcbk9!+UIJ0zEdOoTn?^dhJtTyo(oT{XuHbcjmF?%YZHH zAE~oJu;F;bK$!$vIA=TIhb^4{aQIu**GT6sf{b=XNg_bJ!drTT#wV=@7sh*B=p%e_ z`KK8ibeRRHdBlR_UCCGZ<^uyl*MN{XRL8LyOcqK846x?AMn-!K#2pfoo0qAFd10Uc z95+R-Z%`Ng!{#7Nb?Phqh^qkZ2M`5d0X)Z3(`tB1lk_9qdKk3eS}C0rU?*BzR0 zuBvX@`14Sci94h`cf?Y~W_Y*Z6cE65@n2OAU<$skPfkMk$3|(sMz;ss2vUh9FVMe;fX&2>HM==MEWcT9erj(u~X^e2lf0IB)#BCrrJMphZW&h+~5buXS98?|k#!Z+06?_?z zz?(>)#jW!!=-#p$^T71C%n!hXm*RJCgS3Zk)BJ0FY7-EG!-sC$HPXn(t6OhYV}}QX@85To`h%fILoFQVs>T-YNtd{!D-ytLRR;mg)@MF90D#IrZ)3)FKn(tj zcZD+YlHUB6naAA6nIhK<=N@Zzo%&a0_wB*KkR<*qR9d`8^lYkwO>6#&p(b!(SQ*Az z>t9De^EsgX?d#*WUVG7%3Et(JInKV|=*Z&ucV*3 zsS+xpC3K5R=ilugpsv_YL7P{t42}~V>|T1tQoo4N3HXNR-lbE#@UP-0UWQDo&R0r; zH~lkI#Zp?F(uiVl%g^G1)#E7lFNQ>2kyXjN;A{Nhg`u!uXlj zT5msPS`*lN{$c?FY$=3A;cIn6?8C(QEFAj4VW6j|-ZTPXy`Clq5a9Qtor~g&hTSF0*(e6++9>DUaG;2h&n=;#ZhV?9A*rAuRcp92 zenXnT{_fHX5FqF$vYdXg^*Qgz6>9?MreIzFlAum#hl7(7fDr-48-qXCHGYM+oEl1{ zOG3Cbu|`JOuCUxsLJ+)sA1HadfOBwsBZ7m22k3HG9`v`ncxw0O?{8`||wGH_n;nGd4if(V*%p_-aTC5F%e+Acb5#GLb0&A;g%Ce0>tQ__{+>7_VR37!pW%N^8T(}S;kc>;6-F7 zCLo5d@PTVysm$Ueu3pV8PYV<}v7HVE=EG?d{9o$`x12Zu<|kf(c-#Cb^qJ3W1bsa@ zL3lMVBQGJ#9C&&3i4$`^KD&I$fq%kE#%6zDrRTs=w?La0bI*bKc>fvuI9rkt?g8Ia zn}VDgdG4latEVQdsI1U@g(99%!vy5(TO9DNC*T95`4fJ;EIJ83mPIZnkn~9S^q>84 zM{W5_;Cx=-qd~a_oDK|p(5K+Xf_<(FWDef^ho#?s2`pL{W%ddj$uUp&rSI6p@_ol%-xwrV@nkUu9e!+<5FW^nI$hJSPe zs=os-o-+1AaEOIgskSuZbV>Ges77!{ve~X+mYnlpYm8j0aYN|It*p(s$_Jbg4)?q7 zPK=L=E}4G^6i?;cj7jY+oL>?Z*qU16m2HU3j6hvsU{zM4@ zYyDkWtqqq-LvahAVtp~=knTd2`6Lkyui=if%Ze~;^2J66I#S7RWo=8`p(qm_P?@HB z0<8TUOF9ejI9tIjRN-cgTH06Ok1M9*T6mq*(XL_P8dVWmp>Jlq)w1CADnX@ce#Y&P zXKYFUNFNhkzUdC+i+(n(+*``VFp#8i0|B~Vrko4x^`sz()IkN+$SEa=;0`0&)TDAU zAG52pA$zGhl9qW!|BU>amnx-`uqftj25ya`5Bq*@=8C!u=98PQS`>V(;H=M|BNq%| zzakf0&zAd+H_0SYV>fWF-&R%jxN63Nre(COWn2S1<_p0^4HxyT`rBq=QCC+L6@wp) z*CBiBJS&d|C~`g~cbUdjKI`>gRcVmiZ5Vvn;p#PzsNGSToc*@zi~U$|_MxBQbZ8G> ze*3plswFG#<;sy0;<3J5B$4iPBzh1A58fIH)8%Jgb&UJ3wms7P5FEGNt;Z%`0^p~3UxyT2<33HcQgXO;zVsbc zLEj0mN?YS*Up`EJP;m>MuPNpiZL6xRMz!Q_ zUfZ%1VWQz&;nvun>D)E-6I~?H?vyAlH{n?;@R_l1*^4?wY+%4nFwlYg)wILLx*AeWbz*IrD!VZ=(*_=191BsW#plU zsjbk-Q=v#Qrwb*ipp~+Ib~@F)!`I2#u3o#EY&U>(U9}5;#56QM`t0A;oTp7!B01+| zRQv3!naWfG$4wS4pIt|6v=+ARLukSWli}diS6wY3$}qx4-?RNgj`sx|KkQ&)yi?r0cr)*~}N{0eEf6@f4} zOk(7#cSEKw!P(qDYce7dbT4dOYIBChGygM8fBn44)4KYtheF zOU~W{oC)rWm``l7!nFC@8J~pr^qm7qiTHSiUX-_`I9&*m6RGly4bh*-rlXH-9J3#z z4(}2RT3UhQ#Tb$idYfArE&B;5+(lgbMe3{|)a1eq z2fQ%o|t; z#v#=7lezQAGMgovvDS*jdLl?zzS6+bCgj_T<`{mCHvtBhP=}uzY!ctLBy5z5tDx#= zM|Sn$#$WBn*7!2K(CruPZO38<16$yZYmD*Rz!6dAkx9F5u@Zi?wm=SM0}5BTb~-#y z)FUaNy%+{$($|tc!xzZO*C-J2=0YbO0>S zf_T$xvtM>i`@p4phw`=iX{Q7C(T&pXVcu283;E`>X3 zOXgl>ASWNaEgoV-w}v-KFR>O?-RW|t@FJ@vB+ks*tn)= zhA_qC><2hW2ujU7H}KQKI%#@Z@h)j{Y{`=#^o{8^cFrq!@QCb9K6YR06{qx?ugi?= z7h(V!z}kf3s~5u?F-+z$y};JVKuY`(-#w<5xV=nIR@d-1xrOR)bu8box6;2vu9(ej zDGJlQTdp$Z+p)_J=uFDRFO&&r7lW!2d5Y`R;G=hoSIZyno!$^E^Y@h#{i>b~5W?bL4Vn^c`c6g$K@3PQhJZ#*o;Y^B2lWfvg zN7vY33RXyoA2H+CbhM6Q#C^eMbu~zT8;Y$<7>9!TU9i}sId^F+MoDxy=Bh#Y^pu@Y zjr!B)mYqlh1*WuA63oq(dkRoPbAtuDHPmrh4MZ}TL8Aj(Z z0sI_!_GpxgRrAHkqMnV^RR#lY{?g_!mVV<9^@OXq_F1!6Q)A4zCys6+KS9RkGZG;M ze0w;pm+PT*X`gOMjrr4tQH^8k?Pu-~li(~Tqn9~@*LOo$;1&iQt&zpJc9~od^b$A_ zCRobA*2pRW1Qb(IdnQno3UlY36`VJHKIJmSIq9u~u~+5InsBpcjN3XFGX3>CpTL^O zo}VD?w)|K~?KL1K%lkJ2BHl1aTOe3uC0o!^c=M57?svfH8s(pzq*6`qcF6L=43zvi zF+2Y*-u(^omBWqS#7IWctHp!@b|2^!FnX&pVYx*Z5TBLCkLs?oo}LR!R%whrt&xXiI0CX)aI1JMS-~-ztVC z{g$$qhcX4Rl9W{kG3GkRhA61wn6i_o0b$IEZ|PprHbUG(_VTWa986Gn)uA=?{EXka zB^|&ELkh-)Ep(tj!-Sf?a9PIA3X4QOba8G#_;x|movaUsl@&P?L~$Nf;$o+hugQ&T zjN5kyoWe$nG$o`Tn!CJRu zt@Yu)Gvjz6wmVfRBo=f64YZr9w#s8Gf!fi+yREb$epydDfMbS^vGxBI3|~K*xI%OP U?QH>5LK%;x6eY7CJb(Rv0N?U|P5=M^ literal 0 HcmV?d00001 diff --git a/examples/jax/resnet18-distributed-1gbps.png b/examples/jax/resnet18-distributed-1gbps.png new file mode 100644 index 0000000000000000000000000000000000000000..4e2202561a087d4ebb4f609b78cc445844b7f07b GIT binary patch literal 19608 zcmcJ%by!qw^e&8|A|fD4N~x$wBi*9{A|fE&p>%^orwWK5f=CGnhzLk`cXvs54>5EO zFfiZV11jqKJJ&haIp04Fvu8j1sr9UNuY0Yvy`M^n;a<3U0RsaASNxHPEC$BuP7I7w z@6VqGt|;Rd-N(Rqq9rbJU*2AGX#@q4-$kL;+M~G|OiY$cw2Jr5%Ui4-5d=tnWJyeU zlGPyF6q4fexa=W=&$ODSv-pi^wL8I%cBfCthxwH33q5!|}6#}wamfNp*#l9A9#gvpMSbY0UJZsw0g2 zyuH0$(YNdCd#spi^u&h?kAzRc@962BJ!jXWJWL#pk||uKFF&NN%OwJCgy$pzll5um z#gUS@1HUXlS|=D|Ykr^*07)4;1N^S2C)awneP{NwE?GXp*wghLGR8M}*`JYYR`ht$ zu7>qf1iW^7y(a9*M$*fjYc?-R<1^?X>_=tQeml1(G*=oSdx*{l1s9GWH%a(ex;exn5^wT5zRP3%8TW z?R&wC8`t=jWY$UjSzia_lr0-a07#^77 z0XJX-e&GfFR$T6&06w&_4Z({UXE?Bc59QP0n7{|)sa8-@LteswzZeqtpupdkxBj0r zn}&_|7;6@^UTT*^%iCkqGG4-iV7l4%hc!qdhXwn4BE~&fGfg(;e+oanAx;4Lm@r3} z;<fF&bG*9Oe2@+IE7{QbckC)T{4@`14 zxX1K5Bazh?Hs4p`Lj>Se_L?c->iuj(pR7H$3gp?d3xrg5L&@QxgM9g`F%4(|j+W%C zmp976nOd_OS??iqEx6_zT!h?JZZiq|wphY2MIpl$qeaCOlU_7W|9*I z8!uYk%l1`s&aGtNk!MMFWb>WvIoF!^>$s?Dns+ce-Qr3T2?dYcRkI{s$84z#+3oJq zo#yn4r<}JP@5;`8x=PAyvuj=sxLgW570Z{0(#204amEL-c)Y2 zZ;)WXK&<8}|zRFJR<3Ni_BrRzgmW9|;AMO~&`bgJ3$U|d$TQh4}1yStM+PC*co zZZ5fI6O@&8+TF>V(Hy50)$k5@2r%!LoCDTS(#f$ge5(+?=$84OsQBZPsQT+9N*!sO zAH*D35QY7G?&>{F3)Pabb7Rp^*_|?C&%^8MUTlg*kStyGeZ|48qvZ^U5VpG_+skgsgLf`hOTRgY9AngCB9^D7Py2lS zt(23z^U7mGOZi?C_x!}SEWiT23?7gNG_~L0!eG2#jC68O9h>>r^0>0HLObWJp?5xv z;5|gZJ-|RPOfUV9r_Z@Z$SwR3nN|6tYwsb#;X@2~gv*Sh_Z=b8yk_&_1$U4F4d?@{ z#rgloNoJhrdCbW&Z1{4vPYrE?`sX_+kk(>W&*eoHgQwx**t0axWur@@)z&cnhm+ee zFyC51mlgPOS37@Nv6dgkGg{^;TE_6nuuJGR9YYhXF~$W8 zjf*%=tluE%cR!I-Q6Z9L?MB_B@9?iQ@rVUIVHAv_K03JcOGW6@PTD`wTG=cSt3g($ z5i%6RuM{&^KyE#bXoabaESUD3<$kj!Wh^_7ddVz{wsLcziU1qMBzlZGhw;Xq zbB3y4&?;|RP*-1!fkOIA2$0s?vwGua2i^rBNRijcme-x)LYS|L6 zpy#WTl~)mGZJ$h2(}j!6g_#9R94HzVK&OCb=qf*Q@B&1gVgg1iCseo^^OY7d>>j`u z-R9D6G8QW9+@@mr+8Z~3?tMkY-wg6j}CT4o4Ma^BW=LGGAgHShMfqBh|O-pVkzy2{66k=L6145 zQm~pZ7u$iYb4Rr zw?`5$YHCtVP26Uf>!bBSiv!~Tu(Q~niQI)UWn~kI{z>Eu(L|%Zc&OrUN^Y4V!do$u zP2Ml(a>QzP(mc+I>6LQXNBAH>nPBW+YmJGGavU36{kkWrQ(qs}^YK;0oJC@~#WJPV z!S>f1AR;?5uFd~1jqCrJDZl3~m&h}bXJa*7N97M=C2HTOl6-?!b7hLzBch>FR2vW* zz6R)+E6Rhm^zj7E8N~c@Rs`Elv^3EZoY3Cvt_XMP^fI#R>@N;AlO5-;4_gZBr#vCP zLIPDJj@W%6-$4vcRGUeNyZrF;C5w3nc|c3$5%-0oxa*dfTBxLPfr^ zH8 zrE+ijH-?sIMjFQW>J76f8li$f`BElfFvUw-3V#>bYL~_4u4H*9Irgx<7nbmS7G9bp zEJJ+T5N~EYB78xVlcl(xxAtCh)tm5q8XP5KTfk&a?L(!Cq8NT?bcaVrrxuOZv5tyM z(jQ>T<0=V~)iGqs+$sVGTzfSl7QT0%-+LscdvvDH;*QDJox#V8(n8^!_Fq0&K`qyp zXYGVZoxE$d6Fwlz$TT@sujIG8T8zKaggP8eoS7bXWIL@wNB()F+}Tll z#A>!6HVBu2bV4*_YY0FAe9F(DhNq{H?Rg(HumZ;x+?1mntc~+UESIa_T0t32%}eJC z>+81jzb8x~O`#wCS6aEx&Af)OR=73bTH(6oSf7>Xv2T4xaOTVoGy9_}S6|Hwes0Ke zjhN^0lLNN3@3Dr)ErF;ow}MR$KvW`rwH^7&mqM>{XRF%T3}#S)o(0U( zFsk|*FYH)n8m@i*K-?Eh_2s>f@3H#T(b0(nU{mg%@@fU*gZ^?@(V_q9M!#`u9NmP_ zzU#<-a(4Y1RNStediYl6!L5=ENd%8DlO5*vBU{SC&YS9Ui>B#_{SGSfM&>?5f`wAr zrjj7{=zVCrx_V1f`*T4#IXMuH%>hF>Bu^6eo5?0#=q1z=qhnxTwVz*}%H!owAHQvL8w`bFTf{5cjD|24}%qD?qUJbF4rnN6^#P5XljKC-cX4hWH9 zGvyM8P%4P*Wo8>hCrkFwxZar&J)=HdEK1XxA4iz4D_Mk=^h6R*6yeYeQcHzoPm`_@YTvP8z@6sL`6 zgQp=ZvAj9<4q|4)?cL$I6_T6z-F@6Ue&iY*KL|kVnymf4^_dJ>>jO^RTQvT=eII%W zjSio@hFyCA;>u5FTJL+nfPYxA!`;t=o}Kn(4lDu?YgXJe#Jd6F#h2nKiM;X@W9(ce z6J$n9gv*g<&w+Oi`Ehc3IX|?4J@faZP0zSrU`I>}-fzlph&PFL2*Gafem?8}@)A$n z$1CRS=m%DlriVv{O?j&I(C2n?VS+-C!HLMsvUeMcJzYjC7a?Ee)t0_9SBquo9;}4< zxhy;I(IN%u7~-SQj1Q#zbhtZ@A)a7vNiNSVW;Q+{I;Qk29a*hQNiifwoiWeul02{0 zyE|`F{rWO7u#hiPYjAbM`>y#*=BWMHWaM;yTdWPd3s=e6rR~=2^z4W? z|Mt65k>@MnoBFK__n?=sT4So;cgIC-uO4n_zH`=NUtJF`_oup8JeJUEpkT2~y?oe5 zp0J5Z69o>Owu#k9zVKm#c_f$p*BQQaYU|Z@a+R4vrRmP${i+BD4WGDRJG+wdfPTtH zj>WzB{^%v&ljk9zKQDQ208ZS%lq9nrf^qurVRZgTk$OLdLkmkDC63)UPt-O;%&1l%tJ(DHN z#Z&CIt3?ay@B!kzl}pDs6;`!1y8U9cc&9UGc9@e&kiTF31^n~=PM+;pF>2SM{>RJ< zCQ-O#yCA5c1Sw|?BGM@>OD9Me1-^wjbx_possdC})B->y;ZgYS(Pq!7DD0NLJKQG| zOK%XvNyjz=p!=hNjMMILS9LdARt6h^sNPDnzw9^bzzLj0)3n*%XINv4_>U*5l6viP zUKGR$pv)&#R!X-o12%XzTlx(y)!qH=Z2~NE{(7|0(kw{l68aYAP~e+0)2 zz$rtZ$?@zGQEAxyl%OqX__PRMzMpXO@eN{QV8lD_FJ=uP+-v}^YAM`f>>lM0-?~88)i%Nu}NS)c#;xlu*3b0!Q!yJ0*_zjoC-n}DO zMh?cu$9vSndQ6jeN5}Z%=saF-rp-L~lvp;`_Gm{`C@fqlKOVtnEUimgGOKp5ofoaI z_Gt9l@h<6Cx;)z1T~_wUw&@CLG*_m{EUp*=~)+QceRa~Hc2`M5|5ZHa|tMY!GD(Hf;(zvq1=iFSf-%BxU zbYVu8zL+=&>2=(EjMRSP@7`zTPJPQDBJeqN5_j5hF*5*3+ub$xHT|Y2^wMlHz%6MW3LLWT?*|vv+UY`9R=gZPw=Jj_ zmB0CVVE)U1ru%MKbVzs|(*vK1}8LflPVQtNq z?(apOm-h)iB6ht!r>re6m3^3RW;NnTV2+oEM zRwp9tTXTqW&K#6-Abj77ag;F>lA$j(BZLH-FAw$MKw0~ zV;l2p4jhE;h~Kt#>>dl=%M}^IbccAhTZnUe|2!eapZa_*Fw)bQAflR z*MO;r?gli&Oo!Eh$rEg)5Q{!3W|A}9!ym{P0&8n(KL^+9mr)Sqcmn>=%fy1r5M+7# znX_JzDE_$BT<0x=E9M#N#xJ|km~8qi4q%eo6X$%| z*-*QkkOZd#=kZaJ(H6KYqs!*9-wyZXVoY(1zGOn-Un8~_9*_3KOm>OgxS}_!K{osL zBlbZ^=L53l0;zf-jDDGo1B@Bl=PQXrqk zn3iw9_GzAwBVmi$X}!warl?`5L%Q zQY4OSdr$XdHtxU1ozrT6ap&CV_c={R@2hADoLpRmbxb8VMUU{pYahq8oys3&RA*A) z)ZBP}<{v^kYL*wHUxqN9IWzAvIacVG^20_~W#kGZA<}lJ|6t5}nAvkRra2x`LNA34 zL6oMB>|oKX55|Wo32|!e4D0CZ4@DEjCe15(i}`6tWPsCEeq4{cKE_ za&=xDP-z`a?=`$|CxkD@M4N?Y=REHeCCAP*QTMSkDO0dvvX9)&2EtQUpF`)qV@1^% zvOPyd``fI@$QiQ)JsMcOaZs$xA=CMXTNxW0YjH}O)+o6cmMigmqkKH2PeP5J*~ytE zb6{g1(qqOzRg{vN#xwQsf&7Od{NkIJy_fv&+rdMq_h8~B`&8t`T2eJ01=OXA1IDYf zqh-Nkg?dO|(`B;4(9+4k{u|PowxrZQS_FIM@qO(&hS?ni#S1pnVq#`e!O5^~B!O3B z-}18z@8(KMN@DxBCJ*?)I?B#(T76@f3V52}BA)m^kbW7&Y1~^MB5TauWQW$<>AG&Y zA^RR??N1A5&I-9MY}r^~kvF>e&0HiGn6VTLD+^EE_FCDz;D)HoC7K)`ykKQ%rgzJ= zp8m~d6IF6piYpm)5+U_=QCQOXClU{59!?HMRVa)>Zw_NEe;iO#O z*SIq3_tlxGeQ=QPV8j$rWMh7L%s+!-kJmn$~0(_kyyRVI*oprL!g%N7Es})P^ip=jn*gR43_E zdo2#kxa?HFi2BDdo@Dvii(+_M8O5b+L|e9}-H)P-vP83mTD{*vhKD2oGQSk(?C>g6 z!%B-KZ6$A(*=xMEIS=h5JE!K4ZMc&O;c4cFw&%5R(e6h#IV6P7o?CZ!%g8)n%K9-* zwQ!_9vayA}9?G6)D|NXB>$mfq#uwxw&EBNXFW})7 z2My&|`v^<_!;5NK#mfRQoO|6*Oki}|N-H?Lae8MP-H^-C%DJ22PhG#=R9{$))rw!< zqWUjdojR5C;aT#}#{DY0o;NEPAPIpcEg`_jCFGN_!C9HfH_ z+-n{;S`NTHSb(CX90RTPxg&K1`nmsY`!F#6uZ*i!MZa&7j&5&NY=N5=!rJr|Fbkk} z5GE{I5=BncwKu?`DmN)6x-tQG9J~(NAuUuh^rT_ps;11X<5)C%~tC{wJ5Q!j?;)>DyygdUz(E{ExfL4wkqFmmNf?-EX zGP0&xcmSj-fEm!;1Ly_=?saT0L42S%9?UM6O+TSXnftgEC!g$+%DIkTcYeVH0&wfjlCypbm`Om?`g1jRHas*sDBH#@dZ37-hDso`4r0?#} zRc7qF3l)>KD-0|)8qo?Tqoe` zlDV=n!bnM96xveu1R1$ifP8MdHkH_<%-y^5@Lkn6J&fc8=ZMMN`ca65dO?P7v|}I% zpF<@u-vYGN9 zpcv;8r7XFDAoW*{J&Y-&fw#~D`(5F%D^S+ufgA+e%O1I$o5d?3k9tk;(pb2)(@h>s zP41lGMp$(Cmx~6mygAE_VAWmOVSW{`hHaHaQ+_CRBVf^1+ObrA?cq-#2Z(}Kopsk% z$40GtNtUf1v_;4tsz`*~eu7q0v-5UK)E8uKn++N~9-$#T*QD!o9C@b7 z8x(-BrP0!S2+pb;IB0HnQwNeS^ws9dh0)yPONDUPuvI5*{rW2dc3g0PZJ=L3nU=3L z+Lv!W$6Y=!OV_|lcxRn(!K;}{fSent8ODr#kp7;@squ>pC|Ab|Y3m5kk}PR&G#&F+ z<$}NP%D?X`A*CZ%WPN^#=wP5SrF+d@6$>5BE^3d4Y+wvkiAa9lX&e>`{>t&Bi=rM{ z?}4xUjj9bYp{=uV(5coMIOq@Iq%CqOW+^jQMO2)Hwz}~T4LXcECQLfi-h*y!h$?JF z>Li8jq$jh~C{psOLj~@cA#Ks z$}RnKnXF9TXsj)yJODwvNwGbiV@gp0^ld&v${jEFQ@5OUPM4s!FWNvhOopa#M?%v zKVvv5#y_?z$-&0NCp~ssq;Bqo{P(fZ?JiTCM)GNN1R(_nqDU~~k|*NCD=P=ngKMdE zP9}uQ`X3lF1Uxd`X^J9sE+-9Rh7{YSr^uHfFa62U1p4tr8e$U&#e8zHH1p&s$a_Em z(gJ-}W;1d|-vX^|{KPEsl6Ji(p>48Utg!~W-#m{gfx9eO7qx>C0kpHO{z=M?0SR=i zcheoUu9Dcd)I0cZTYH&2+5PeE;aT}eI&0KX zrme4&Vj%{WK=kIZg(u~Qtoz=zM8UZlSC9fn@Pip}ejqZ0%QtPj@AMvCyA5>if{6M0 zbO*AWUy90@8#$1}`=}w5Kjun`;3-@f$}QNzZ|8BiSr#=zOCoKDoV`~iF_j<4z;8~y z;p9dO z2!qt4H=B|us1!i0l<{v_t_d_O4q2B|bL@^g?2q_llOgRTOU9@j z1d|n;1-L&<&1y3Y%n&L?c*CyY{xpskNjdA0CI2HKkm9@GBD_W;d^WVyPGV6#c7uvY z&`fPDPl?i%v9Eo!?cia487rdm>j!Gn!z=RImhn1yOia`AkF(bb@hKJTVB)ChAB$g5 z1O2>I-sJe4jt}nc{MJoF3B0h`9?W1Rcg)xyRK&BIyjLi~26EPi(;L`c+NjNJQ5@l{ zwV--?*A8x5X7AKy1As?%s$VRJsAXD<%XZqZFy4u&TO}9UDvQ@ol+{0`Wm=(_yOogs zerM`=O2bC(RKs6tzGnAvgpMSdC2-l`-@R(!+NX6cL-S#4<<_WRmfG`rO~<%oJlB-z z7U}K%bS}8o7Ka8=ZAK<14lSoXf0n!J0G!f_?wPo=dX2Zc)U(>6{h;;ZpoNw~6Bx9h z$vn`v==7GWnkCHsZ4$Fb0HeI z6#a>+fPl!^aLkIYXov-TIf7Q&K%C|13jmE?!7wuJpnv&|lL(JN6VS=irxET-iS`qD zME0&DHUJ-J0{GQGyCaM+$1^YBT9SrvE7I%XBO1(wsGpJve)%KU^C;bo!SiG@Q@=3S zP*#RVi`T8%opdiJe(|`r`}36NdMb}2;!6s~LQl7KBW>{?8HM-S0kq1?t)IODlk@?y zF(JxP8pHhg2!fqM*Eu0N@(-icOoG~ZcjwQ;trGS1*QS0Zgw{fC0P3ms#2P%8YrS4! z6Ni=2_6%Q$IqLg~*tY+;7>*v}lxyZfAOLU_gpXGl>@(HSukcN>ms`2KV5YU1Zr;Lc z@@jipP9`l=nP2ImbAPtE;0SHmg4I9N%VRFT>i)mTp{bxhxn*&@gw;#!&?~P;C{=dG z8Ey&s)V3aFm0m+yxX2j% zm=d41n^9du=BN8!8&VtUZ20w$G@1>#eE{XwzQx#}_)S|US0U@|_D=~J@GRia7WpaLt;KT?WHALh?Z=DxX;8@ zMUGu?$kiIh>4$FkfyTFPeHq)%l_7}J z0lj!>X&k_dzs^yA*|kWJy*oI7&kJ(3OW)%^-FP4$unWnEtA9JaaB*XQz=W)DAkl62 z(3B;hpSobrSQYX8R<_G-!nSL9J7n^Ka|_vu7K$d|ku7z}Agd9A)Iqcc2>IXQLgBVi z%aW$4C2sEBpQ0s%ws%Bc1V8;3Yyv^VV6^`o9JbImJv3s_P`v?;e(%5oym}Q%jV4hf z%F%POdR_;jDu=T+AQG8i(YgeL6P}b*&JuHYy~)~&PmhfqTe96M*+z=}!x&U6c{^#@ zv!|QPuaoWkrr>y3;*6y(`p{FVtIq%s*J_L@U=h2iPazSnFLc6fQ-7#s@kXsw;Td-m z1`z-~cpQCXaspt$p13#~x0U(_r3~Wmb08Q1FSULfK0AZ2mtyUv+sMOGba4oyFT}c{ zZIo6=drN8@gtlsHp}ZQG_q52>wm%-WK4#~jI0iF-!p`Pcz^s#tT zituOf+eq5Ukk0rD9wwte>6(=}DigYdoNAxerd2z5VfFzt%Y@1O`A?0T*4{4v5h!rsBb*DE6Bixx^avw_WdgfMdc`u#pF z$uM<_jg4%*td*q^3u|U1<%W&|T|mrzv2D~CH`id_3+E}q!>4xC&bU&e2T1etI_cOS zqaUpA>)ZDjN)8JR3BF>E(nyfPF2B{nH}%3@u%GSZ+5nL(uXiYGy>GGx>EquK%_Lvj z9%{ODGb7FDlJJR?q|S&W2-26?-QwV(hv!xf$=3kO#PN4HFU8V@scXhyXE`{y$naP zP(9i#@IDOL+k=g+^;odbXI<2y)Ere*`4nc|=9FH#UDeP&+w`9PIcHK^Q1z3B>bTIL z3yC=^R++(z*aa4r&BTAeNQY8yCC{;of7mi~s6$(9DnV0g$2KDXIjh{3VQ78l82D*M zu2QdH@%0~GTI*iD6{jv8pAJJ+AFf7fE<-PeyK|hhueMw>$@aayz4I%uEos@cDb6)l zx9P37B@j>QWz~kQM(QRj5DW>i% z(EiFk6hi!hK5oFHxJ!~G%5t?N7m-H%CrpciKm*wSS;jBC<0h5(`UoET5zVqpa)@BZ z00TD_aACHrpUIbomf;F036S7{^2W+_scMHn) z??aeTw}}MNcq~2JGL>7PtOwa=>(XB2R8+kGxm1BW1t0GO(Pj5oq(_6$WsZM|I)5m? zFOj3-wE_Q6uyf1>t=8TV<6npl2@ZjzTO6>K4sHeD%J`L}CIcBHMhj$D92^qyM)M#1 z0&XBo(&qhZ1{lpJu^))Dlz)THqgYYJi@!R?2>BJ=ISw;roa_V~-piSjdjtlM7=J_P zBgDopFO2H$TE@ol=Uekxji@~R*F#^jEWem>np(kvwhsrF|8q!yPMobyXbm)BRZ$0Y zdjht+9;7IFthYKOs#LiFPkYB$tptq1mkfH-3Q(18Z>-B`h6_pYNWU%JnY85{5eEmVZ|t z2(%d#ycTDqI!1J(d6nn{J^~PBEm)rtMQoF6{7DV*Hrlw5a`6UGUgFGF_6axpy||dN zzXT`yJ2&mDbr$YkmRy^*Z+eLEzed6iqzx&A_DWuj3Px-)w!OF7lr&O+_x`Hjk%0fq zK6JUP^(LMCH|hb&L9Q-}x^Bw+1B!vJLTc;lY-%=@@1K<>;7=qQ~7#oYrM9F9u2dJ-x-4RB7X*l#^EQ{8w7<+Mt;(qp(Q1g$;4xYU;iuN z8MjR8ug+e>p6js5cp&Kj)$TTkINeh)km}MMn**gI{fyX>YyKn(Jxso4XdZ`Kz)oD5#g z7dq5~5JYDO3UWG45#Ytd#ycEfzTW10T`&L?Zwtm`v(}B#VU<6j7q{1fh9+Wu`kedA zr(l}HV-=NGm4e*F`JOO0G~e+gK{0vu2?u}gHX{0Iere@Sy@v!t3m#$p?k)wvDzB9W z>g`${7Ks$a7LE|v_{8F*-I`2&P$hM~eklFk`*P|Q!E85LQa8vaCzuc(2o8r+snLV$ zh_nWonLAqLmzR->_CJK4134m5_(m!ql#&S!IS<^+u_X?gI+y4Mz}Hm$`nd2IdZ%c)zHT~)WkgTdf2Ezbkk0T-*D4S9Vb8Tcu6oS0y6g6rzUa3G zd<~%@Qk&$?RlnJV%DOt&rS?4_#@gb!C27_*k|r$tGZnE)QcxtMRbuUC_x*3Y2>*5o ztu*!7B8vi(K;a#1DZ`RjJB&+UWLd7?WF_%|fsuci1)f3tX&>1mv4x}?n zTVf9$FMNz>bQ6nSEk;-yubo;ly3Z`sC|M*&R#GBRS1qad1x_Fr(kc#Q0{2qX6)Y3t zdrcMMumsKwrWN)NIMXtynFFOiHr=R!K_uapdLo6hoLuR5nBERg0;!=&|I-cInj21r z`hOiG5$GzWe=OczS(=s@50d6Sm4A{E;lbsYQn~}qfL0hTmX~&U(E`=z!ygo1#EWAd z_7f3Pxl-Dv_}p#1lktBM%gyT+3McwY%XCu2Pu<>ATYyJ6)hm^kes9Qf0O-Pk(IHJd z*PYFkM48^tr~$<+xc0i*0jRq&E*b@Y!7TeTGv21v9c{ z%aiw+V(@$;E&>P&4($$?hOV4*(hB|7gvZnWXc13yMV)UeX5sOMRYoS%w}4;@N}t|- zl{ty=gsY6E0E92@9H!moeep(7lO$~3*?!)Gf1liG)zI*P<~*c)NWzqog$#Ki%pr}o zOXe} zO4;k~Dq<*jL~8kdD1oNa6i^<|CUc#6SdN+<}rSYD=Latn)F7TN=MbMlW%Dc~FwI8G^AQ=B^y zH%ytdq5N}Y`o&vg7MUfAZ}ZP-(*OYu>_W%l>D0sFGrtpUGjNVFiHtu}Ja*7h13dBW zyZF&>i*XrNHfwE29v1NS^Vy3K%^#{IEq|R89y2;*G233`w@EeKFOMSp)5`NF@^bl7 z8zrVJusYJ6gY3m;rDQF)dd-_rUyAs9@v%%V{wca06d8B_9Q{4$-#7z^zLB3`c|e)u z@Ve!ENV~QXEI}MOZBixu{N10>pH+s2n2w70K)6OF`jFDIUm0e!t%+yucf%$jcVMmOg5D#>MbfNrm*JVa(+$g0n-y| z?}#M>laWC7|ue({TFy!7-_3~$g;6_;VvHTpm5FU_5uk&%su^aO_*s@ess z>{aCw?D%%fPYPz$ZZ3OUt}wE8t&zOn=^XAFL#hHP9}{)xB(gp8VKxxO)cd>uQOv## zm_D{ST%rT!lI|X;gBN}Jz>6gfj7NB<6^(cLEIa`a)Yz1K7i>Q4NeX10nmIbE+M8;= zCp%GJy)%riuMQwV7n-T2Dt=mgpc1{Nl~CvfA<;+n8TC%k=vE1i-3X;Gt01x5UO6-6 z0sxjb=Rl;j+>3t|j5E#I7J<+tNHQISCQ&)>LBKLAtcr$e^3qf|0E{VFK%J)b(S!+s z5jp?8WlC`9_jhx^hA*hQL7vI0*!n&YnNr(M{|0iPjmPQ942AqFH<~bl9MZrq0+3^c(- z6#k`wJ0)UnsUrEv|ExvJ+WklRfNH@L#o>EPqM{+K0LA_%K1Ul<^pXA3D0$sFNemqE z>h-oKIBMWLpe<+hJ}bFQ9?Bht^Uu8@7ALX`(#M|us&{WW49ELtYZ%9l z-Wu_R1PjW4A7(SJ3uw8VbvF2P>;3-WG2E$si{{u2bz1+ibAAt z+Kc!0MvM5~NeP!)w&y>Z3PqgW=HR+u*KxXQ1RV=60=ahn2u&Q&ZCq0X)97+zhd@73 z3x?A_2xbIyboPLXG%63qjaH$qV8f1)jW+PEbhSZHg&H~20W7`Cp6(AEHl{I(UqxO8 zx9ZVFUJ-?PUu6^Zfzq3CoC%&lDKcR22rzX08hW*<=nVcFE9vjKHN|@98#0`sEgJ{C zv?_H>Kz{q!4YxZwar-n9b=**}AO|Dw6-x(D-hb@O$g!9|?Y>R%V)LyNm-jUAbB3bU zS6+^QUvHu4`t=ry+Zq>v*Q2~Nn{Va-BD)L1wZT_TbsG8mtY~24`ZYP4tuhmX?S-_G zcyoR-p;jAXE`-3lJt=_I+J>cNZ?u*Wv#aiL8j8jblO{={ojhaY`aP!NH0wq z>?LPUx#QOdwVe;;KoNV9v+3w%eJc2roz>qG8jGkmG>v2#--e`o;caoJX7(3cbQM@{ zYVDpzu$S{o#L0$^Z`zKqi_yw@46`6|&u zh{=^l{ql^}%^3Y5!Dh9~EEW!*1MYMQ&G3qnQL^o>4T_Ap#}t+}IV|B{K7NzM+7?H~ zC`kuB9)lrX_S5_XQH|8An$CW+399BM{$cwx(Y{kdCe?&yMg~>7wD@0mx%^Ow%^xxb z{%^fp5OEL5>ulmF?bapD)XKhct2nnc-b%7=8o!?r-Z*(4NGFFOf7a9gdMk|LXa38o zTR4W4UPK&TysJ7raW;W(P|;)ucDcQ7vT|Si%xog08GA9)hb%Gm{SZ%3bJJ%n09x$k>hsTlw7y>#!g@ zF2;8jJ*&8XTD9@n+16)d3w}ecev9+N`1b2#ALirXF?Ydyr>m`*GDt71XZvIcq5B=O;lE*Bxh8D8R)^Q>pDnJ`+zsNNz2oKGRHL>XD3g7*m(h>9}2vbaiH;Ixkiy(QIODnLDapuxJH?g1<5I)`w znVD?buuNCxi*OohMY`fTEY=hT0M^ zK__s6#9x+MvDdtr`VLF*ZciTNAu+n}$MAB!1xcNodIIU)ETuyJsMULUS{pP@y4a&p z1*aBblHdUb)yQpjS$#KcS=_RBl9_|o`wzF zWgu~M#xq*VykGzBp57WwuvMj%fAO1;5;uyeq+J5Ke1o~EIgYdgpQ$mRG{}Js|Iv!7 zD_dXb)q5w9u+zDJA^#wFUP1i;t6C zPKBDmLB;c#P-90&=f$8B$_~>+4xbZ}$|@gzb>W7d`{j2G^zb6Z5{-+B3C54t^*>ej zT^u7|JrD_ef%QCklKLARV(qJC0#Q$0GlPyG>Fjsel3cl(B(EoBklCBf1+_}>m1ng- zQnq9*z zOi<~YlP$UYgCkJ-#!lv;QRF!TpE~MD8zKAov##_cnU)Q{gcNM(m!g+nU6`nolpc4jJlw{ltT+*M@F*6bU5k74*g2(51U8CT@M zS?PqA{7ZW^pNt*}EJO?&T3^F>HQ@8fMRt3U=GiFy&&Z+y-*ISBo1Bt`NgwE1fdZUk z1-^hEU(I=~x7UDoVqDf7pEs84pom}_Qn@s>?=-h1rqB}?W)(wVgo|S#c&=+z&f-O^ zvrDxcwE}lHgrS6t^3YCvWqWUlm^k4WEGNUTJ7(+`BTHrl8OpT(i63rpITL+zNZmS{ z69VikW<^qB-B_usj9lL^Lp`>iq>OZ_g_*s~!Pm^Gsf&L8YQNs(P+#6ai7u!8U%w~` bMV