From 26ad191f4d303e1994ac58caef44fa0f34c93fc9 Mon Sep 17 00:00:00 2001 From: wangwl Date: Wed, 24 Dec 2025 01:34:47 +0000 Subject: [PATCH 1/3] add Sequencer2D --- .../Classification/Sequencer2D/coverage.txt | 3 + .../Sequencer2D/model/__init__.py | 5 + .../Sequencer2D/model/layers.py | 253 +++++ .../Sequencer2D/model/two_dim_sequencer.py | 447 +++++++++ .../Sequencer2D/model/vanilla_sequencer.py | 236 +++++ .../Sequencer2D/sequencer/Dockerfile | 30 + .../Sequencer2D/sequencer/LICENSE | 201 ++++ .../Sequencer2D/sequencer/README.md | 119 +++ .../Sequencer2D/sequencer/avg_checkpoints.py | 121 +++ .../Sequencer2D/sequencer/benchmark.py | 606 ++++++++++++ .../Sequencer2D/sequencer/clean_checkpoint.py | 78 ++ .../sequencer/datasets/__init__.py | 5 + .../Sequencer2D/sequencer/datasets/cars.py | 112 +++ .../Sequencer2D/sequencer/datasets/flowers.py | 125 +++ .../sequencer/distributed_train.sh | 4 + .../Sequencer2D/sequencer/erf/__init__.py | 0 .../Sequencer2D/sequencer/erf/models.py | 35 + .../Sequencer2D/sequencer/erf/save_output.py | 9 + .../Sequencer2D/sequencer/erf/scaler.py | 5 + .../Sequencer2D/sequencer/generate_erf.py | 291 ++++++ .../Sequencer2D/sequencer/hubconf.py | 6 + .../Sequencer2D/sequencer/img/BiLSTM2D.jpg | Bin 0 -> 73526 bytes .../sequencer/img/RIKKYOAI_main.png | Bin 0 -> 99021 bytes .../Sequencer2D/sequencer/img/Sequencer.jpg | Bin 0 -> 65080 bytes .../Sequencer2D/sequencer/img/Sequencer2D.jpg | Bin 0 -> 28229 bytes .../Sequencer2D/sequencer/img/anytech.svg | 33 + .../Sequencer2D/sequencer/requirements.txt | 66 ++ .../Sequencer2D/sequencer/train.py | 870 ++++++++++++++++++ .../Sequencer2D/sequencer/utils/__init__.py | 1 + .../Sequencer2D/sequencer/utils/helpers.py | 76 ++ .../sequencer/utils/timm/__init__.py | 2 + .../sequencer/utils/timm/checkpoint_saver.py | 163 ++++ .../sequencer/utils/timm/dataset_factory.py | 158 ++++ .../sequencer/utils/timm/summary.py | 28 + .../Sequencer2D/sequencer/validate.py | 384 ++++++++ .../Sequencer2D/sequencer/validate_c.py | 343 +++++++ .../Sequencer2D/sequencer/visualize_erf.py | 48 + .../Classification/Sequencer2D/sequencer2D.py | 78 ++ .../Sequencer2D/sequencer2D_loss.jpg | Bin 0 -> 36221 bytes .../Sequencer2D/sequencer2D_loss.txt | 29 + .../Sequencer2D/utils/__init__.py | 1 + .../Sequencer2D/utils/helpers.py | 76 ++ .../Sequencer2D/utils/timm/__init__.py | 2 + .../utils/timm/checkpoint_saver.py | 163 ++++ .../Sequencer2D/utils/timm/dataset_factory.py | 158 ++++ .../Sequencer2D/utils/timm/summary.py | 28 + .../Sequencer2D/weloTrainStep.py | 692 ++++++++++++++ 47 files changed, 6090 insertions(+) create mode 100644 PyTorch/build-in/Classification/Sequencer2D/coverage.txt create mode 100644 PyTorch/build-in/Classification/Sequencer2D/model/__init__.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/model/layers.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/model/two_dim_sequencer.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/model/vanilla_sequencer.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/Dockerfile create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/LICENSE create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/README.md create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/avg_checkpoints.py create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/benchmark.py create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/clean_checkpoint.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/__init__.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/cars.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/flowers.py create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/distributed_train.sh create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/__init__.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/models.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/save_output.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/scaler.py create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/generate_erf.py create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/hubconf.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/BiLSTM2D.jpg create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/RIKKYOAI_main.png create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer.jpg create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer2D.jpg create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/anytech.svg create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/requirements.txt create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/train.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/__init__.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/helpers.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/__init__.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/checkpoint_saver.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/dataset_factory.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/summary.py create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/validate.py create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/validate_c.py create mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/visualize_erf.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer2D.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.jpg create mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.txt create mode 100644 PyTorch/build-in/Classification/Sequencer2D/utils/__init__.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/utils/helpers.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/utils/timm/__init__.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/utils/timm/checkpoint_saver.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/utils/timm/dataset_factory.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/utils/timm/summary.py create mode 100644 PyTorch/build-in/Classification/Sequencer2D/weloTrainStep.py diff --git a/PyTorch/build-in/Classification/Sequencer2D/coverage.txt b/PyTorch/build-in/Classification/Sequencer2D/coverage.txt new file mode 100644 index 000000000..1755bf4c9 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/coverage.txt @@ -0,0 +1,3 @@ +all api: ['_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_copy_from', '_has_compatible_shallow_copy_type', '_local_scalar_dense', '_log_softmax', '_log_softmax_backward_data', '_pin_memory', '_reshape_alias', 'add', 'add_', 'addmm', 'as_strided', 'cat', 'contiguous', 'convolution', 'convolution_backward', 'copy_stride', 'div', 'dropout', 'eq', 'fill_', 'fused_sgd', 'gelu', 'gelu_backward', 'is_pinned', 'linear', 'lstm', 'matmul', 'mean', 'mm', 'mul', 'mul_', 'native_layer_norm', 'native_layer_norm_backward', 'nll_loss_backward', 'nll_loss_forward', 'reciprocal', 'set_', 'sigmoid_', 'sigmoid_backward', 'sum', 'tanh', 'tanh_', 'tanh_backward', 'topk_out', 'view', 'zero_'], total: 48 +fallback op: [], total: 0 +coverage rate: 100.00% diff --git a/PyTorch/build-in/Classification/Sequencer2D/model/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/model/__init__.py new file mode 100644 index 000000000..251f6696f --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/model/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +from .vanilla_sequencer import * +from .two_dim_sequencer import * diff --git a/PyTorch/build-in/Classification/Sequencer2D/model/layers.py b/PyTorch/build-in/Classification/Sequencer2D/model/layers.py new file mode 100644 index 000000000..6abb1abbf --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/model/layers.py @@ -0,0 +1,253 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +from functools import partial +from typing import Tuple + +import torch +from timm.models.layers import DropPath, Mlp, PatchEmbed as TimmPatchEmbed + +from torch import nn, _assert, Tensor + +from utils.helpers import to_2tuple + + +class RNNIdentity(nn.Module): + def __init__(self, *args, **kwargs): + super(RNNIdentity, self).__init__() + + def forward(self, x: Tensor) -> Tuple[Tensor, None]: + return x, None + + +class RNNBase(nn.Module): + + def __init__(self, input_size, hidden_size=None, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True): + super().__init__() + self.rnn = RNNIdentity() + + def forward(self, x): + B, H, W, C = x.shape + x, _ = self.rnn(x.view(B, -1, C)) + return x.view(B, H, W, -1) + + +class RNN(RNNBase): + + def __init__(self, input_size, hidden_size=None, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True, + nonlinearity="tanh"): + super().__init__(input_size, hidden_size, num_layers, bias, bidirectional) + self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True, + bias=bias, bidirectional=bidirectional, nonlinearity=nonlinearity) + + +class GRU(RNNBase): + + def __init__(self, input_size, hidden_size=None, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True): + super().__init__(input_size, hidden_size, num_layers, bias, bidirectional) + self.rnn = nn.GRU(input_size, hidden_size, num_layers, batch_first=True, + bias=bias, bidirectional=bidirectional) + + +class LSTM(RNNBase): + + def __init__(self, input_size, hidden_size=None, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True): + super().__init__(input_size, hidden_size, num_layers, bias, bidirectional) + self.rnn = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, + bias=bias, bidirectional=bidirectional) + + +class RNN2DBase(nn.Module): + + def __init__(self, input_size: int, hidden_size: int, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True, + union="cat", with_fc=True): + super().__init__() + + self.input_size = input_size + self.hidden_size = hidden_size + self.output_size = 2 * hidden_size if bidirectional else hidden_size + self.union = union + + self.with_vertical = True + self.with_horizontal = True + self.with_fc = with_fc + + if with_fc: + if union == "cat": + self.fc = nn.Linear(2 * self.output_size, input_size) + elif union == "add": + self.fc = nn.Linear(self.output_size, input_size) + elif union == "vertical": + self.fc = nn.Linear(self.output_size, input_size) + self.with_horizontal = False + elif union == "horizontal": + self.fc = nn.Linear(self.output_size, input_size) + self.with_vertical = False + else: + raise ValueError("Unrecognized union: " + union) + elif union == "cat": + pass + if 2 * self.output_size != input_size: + raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.") + elif union == "add": + pass + if self.output_size != input_size: + raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") + elif union == "vertical": + if self.output_size != input_size: + raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") + self.with_horizontal = False + elif union == "horizontal": + if self.output_size != input_size: + raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") + self.with_vertical = False + else: + raise ValueError("Unrecognized union: " + union) + + self.rnn_v = RNNIdentity() + self.rnn_h = RNNIdentity() + + def forward(self, x): + B, H, W, C = x.shape + + if self.with_vertical: + v = x.permute(0, 2, 1, 3) + v = v.reshape(-1, H, C) + v, _ = self.rnn_v(v) + v = v.reshape(B, W, H, -1) + v = v.permute(0, 2, 1, 3) + + if self.with_horizontal: + h = x.reshape(-1, W, C) + h, _ = self.rnn_h(h) + h = h.reshape(B, H, W, -1) + + if self.with_vertical and self.with_horizontal: + if self.union == "cat": + x = torch.cat([v, h], dim=-1) + else: + x = v + h + elif self.with_vertical: + x = v + elif self.with_horizontal: + x = h + + if self.with_fc: + x = self.fc(x) + + return x + + +class RNN2D(RNN2DBase): + + def __init__(self, input_size: int, hidden_size: int, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True, + union="cat", with_fc=True, nonlinearity="tanh"): + super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) + if self.with_vertical: + self.rnn_v = nn.RNN(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, nonlinearity=nonlinearity) + if self.with_horizontal: + self.rnn_h = nn.RNN(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, nonlinearity=nonlinearity) + + +class LSTM2D(RNN2DBase): + + def __init__(self, input_size: int, hidden_size: int, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True, + union="cat", with_fc=True): + super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) + if self.with_vertical: + self.rnn_v = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional) + if self.with_horizontal: + self.rnn_h = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional) + + +class GRU2D(RNN2DBase): + + def __init__(self, input_size: int, hidden_size: int, + num_layers: int = 1, bias: bool = True, bidirectional: bool = True, + union="cat", with_fc=True): + super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) + if self.with_vertical: + self.rnn_v = nn.GRU(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional) + if self.with_horizontal: + self.rnn_h = nn.GRU(input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional) + + +class VanillaSequencerBlock(nn.Module): + def __init__(self, dim, hidden_size, mlp_ratio=3.0, rnn_layer=LSTM, mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, + num_layers=1, bidirectional=True, drop=0., drop_path=0.): + super().__init__() + channels_dim = int(mlp_ratio * dim) + self.norm1 = norm_layer(dim) + self.rnn_tokens = rnn_layer(dim, hidden_size, num_layers=num_layers, bidirectional=bidirectional) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.rnn_tokens(self.norm1(x))) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class Sequencer2DBlock(nn.Module): + def __init__(self, dim, hidden_size, mlp_ratio=3.0, rnn_layer=LSTM2D, mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, + num_layers=1, bidirectional=True, union="cat", with_fc=True, + drop=0., drop_path=0.): + super().__init__() + channels_dim = int(mlp_ratio * dim) + self.norm1 = norm_layer(dim) + self.rnn_tokens = rnn_layer(dim, hidden_size, num_layers=num_layers, bidirectional=bidirectional, + union=union, with_fc=with_fc) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.rnn_tokens(self.norm1(x))) + x = x + self.drop_path(self.mlp_channels(self.norm2(x))) + return x + + +class PatchEmbed(TimmPatchEmbed): + def forward(self, x): + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + else: + x = x.permute(0, 2, 3, 1) # BCHW -> BHWC + x = self.norm(x) + return x + + +class Shuffle(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + if self.training: + B, H, W, C = x.shape + r = torch.randperm(H * W) + x = x.reshape(B, -1, C) + x = x[:, r, :].reshape(B, H, W, -1) + return x + + +class Downsample2D(nn.Module): + def __init__(self, input_dim, output_dim, patch_size): + super().__init__() + self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + x = x.permute(0, 3, 1, 2) + x = self.down(x) + x = x.permute(0, 2, 3, 1) + return x diff --git a/PyTorch/build-in/Classification/Sequencer2D/model/two_dim_sequencer.py b/PyTorch/build-in/Classification/Sequencer2D/model/two_dim_sequencer.py new file mode 100644 index 000000000..70411fd84 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/model/two_dim_sequencer.py @@ -0,0 +1,447 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import math +from functools import partial +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT +from timm.models.layers import lecun_normal_, Mlp +from timm.models.helpers import build_model_with_cfg, named_apply +from timm.models.registry import register_model +from torch import nn + +from model.layers import Sequencer2DBlock, PatchEmbed, LSTM2D, GRU2D, RNN2D, Downsample2D + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.proj', 'classifier': 'head', + **kwargs + } + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)): + stdv = 1.0 / math.sqrt(module.hidden_size) + for weight in module.parameters(): + nn.init.uniform_(weight, -stdv, stdv) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def get_stage(index, layers, patch_sizes, embed_dims, hidden_sizes, mlp_ratios, block_layer, rnn_layer, mlp_layer, + norm_layer, act_layer, num_layers, bidirectional, union, + with_fc, drop=0., drop_path_rate=0., **kwargs): + assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios) + blocks = [] + for block_idx in range(layers[index]): + drop_path = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) + blocks.append(block_layer(embed_dims[index], hidden_sizes[index], mlp_ratio=mlp_ratios[index], + rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, + act_layer=act_layer, num_layers=num_layers, + bidirectional=bidirectional, union=union, with_fc=with_fc, + drop=drop, drop_path=drop_path)) + + if index < len(embed_dims) - 1: + blocks.append(Downsample2D(embed_dims[index], embed_dims[index + 1], patch_sizes[index + 1])) + + blocks = nn.Sequential(*blocks) + return blocks + + +class Sequencer2D(nn.Module): + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + block_layer=Sequencer2DBlock, + rnn_layer=LSTM2D, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + num_rnn_layers=1, + bidirectional=True, + union="cat", + with_fc=True, + drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + **kwargs + ): + super().__init__() + self.num_classes = num_classes + self.num_features = embed_dims[0] # num_features for consistency with other models + self.embed_dims = embed_dims + self.stem = PatchEmbed( + img_size=img_size, patch_size=patch_sizes[0], in_chans=in_chans, + embed_dim=embed_dims[0], norm_layer=norm_layer if stem_norm else None, + flatten=False) + + self.blocks = nn.Sequential(*[ + get_stage( + i, layers, patch_sizes, embed_dims, hidden_sizes, mlp_ratios, block_layer=block_layer, + rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, + num_layers=num_rnn_layers, bidirectional=bidirectional, + union=union, with_fc=with_fc, drop=drop_rate, drop_path_rate=drop_path_rate, + ) + for i, _ in enumerate(embed_dims)]) + + self.norm = norm_layer(embed_dims[-1]) + self.head = nn.Linear(embed_dims[-1], self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(nlhb=nlhb) + + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + x = self.blocks(x) + x = self.norm(x) + x = x.mean(dim=(1, 2)) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + return state_dict + + +default_cfgs = dict( + sequencer2d_s=_cfg(url="https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_s.pth"), + sequencer2d_m=_cfg(url="https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_m.pth"), + sequencer2d_l=_cfg(url="https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_l.pth"), + sequencer2d_l_d4_3x=_cfg(), + sequencer2d_s_unidirectional=_cfg(), + sequencer2d_s_add=_cfg(), + sequencer2d_s_h2x=_cfg(), + sequencer2d_s_without_fc=_cfg(), + sequencer2d_vertical=_cfg(), + sequencer2d_s_horizontal=_cfg(), + gru_sequencer2d_s=_cfg(), + rnn_sequencer2d_s=_cfg(), +) + + +def _create_sequencer2d(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for Sequencer2D models.') + + model = build_model_with_cfg( + Sequencer2D, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +# main + +@register_model +def sequencer2d_s(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_m(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 14, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_l(pretrained=False, **kwargs): + model_args = dict( + layers=[8, 8, 16, 4], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **model_args) + return model + + +# high resolution + +@register_model +def sequencer2d_s_392(pretrained=False, **kwargs): + model_args = dict( + img_size=392, + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_m_392(pretrained=False, **kwargs): + model_args = dict( + img_size=392, + layers=[4, 3, 14, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_l_392(pretrained=False, **kwargs): + model_args = dict( + img_size=392, + layers=[8, 8, 16, 4], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **model_args) + return model + + +# ablation + +@register_model +def sequencer2d_s_unidirectional(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=False, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_s_unidirectional', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_s_add(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="add", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_s_add', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_s_h2x(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[96, 192, 192, 192], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_s_h2x', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_s_without_fc(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=False, + **kwargs) + model = _create_sequencer2d('sequencer2d_s_without_fc', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_vertical(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[96, 192, 192, 192], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="vertical", + with_fc=False, + **kwargs) + model = _create_sequencer2d('sequencer2d_vertical', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_s_horizontal(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[96, 192, 192, 192], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="horizontal", + with_fc=False, + **kwargs) + model = _create_sequencer2d('sequencer2d_s_horizontal', pretrained=pretrained, **model_args) + return model + + +# option + +@register_model +def gru_sequencer2d_s(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=GRU2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('gru_sequencer2d_s', pretrained=pretrained, **model_args) + return model + + +@register_model +def rnn_sequencer2d_s(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[48, 96, 96, 96], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=RNN2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('rnn_sequencer2d_s', pretrained=pretrained, **model_args) + return model + + +@register_model +def sequencer2d_l_d4_3x(pretrained=False, **kwargs): + model_args = dict( + layers=[8, 8, 16, 4], + patch_sizes=[7, 2, 1, 1], + embed_dims=[256, 512, 512, 512], + hidden_sizes=[64, 128, 128, 128], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM2D, + bidirectional=True, + union="cat", + with_fc=True, + **kwargs) + model = _create_sequencer2d('sequencer2d_l_d4_3x', pretrained=pretrained, **model_args) + return model diff --git a/PyTorch/build-in/Classification/Sequencer2D/model/vanilla_sequencer.py b/PyTorch/build-in/Classification/Sequencer2D/model/vanilla_sequencer.py new file mode 100644 index 000000000..a9de5ea1f --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/model/vanilla_sequencer.py @@ -0,0 +1,236 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import math +from functools import partial + +import torch +from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT +from timm.models.layers import Mlp, lecun_normal_, trunc_normal_ +from timm.models.helpers import build_model_with_cfg, named_apply +from timm.models.registry import register_model +from torch import nn + +from model.layers import LSTM, VanillaSequencerBlock, PatchEmbed, Downsample2D, Shuffle + + +def _cfg(url='', **kwargs): + return { + 'url': url, + 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, + 'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True, + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'stem.proj', 'classifier': 'head', + **kwargs + } + + +def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): + if isinstance(module, nn.Linear): + if name.startswith('head'): + nn.init.zeros_(module.weight) + nn.init.constant_(module.bias, head_bias) + else: + if flax: + # Flax defaults + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + else: + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + if 'mlp' in name: + nn.init.normal_(module.bias, std=1e-6) + else: + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Conv2d): + lecun_normal_(module.weight) + if module.bias is not None: + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(module.weight) + nn.init.zeros_(module.bias) + elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)): + stdv = 1.0 / math.sqrt(module.hidden_size) + for weight in module.parameters(): + nn.init.uniform_(weight, -stdv, stdv) + elif hasattr(module, 'init_weights'): + module.init_weights() + + +def get_stage(index, layers, patch_sizes, embed_dims, hidden_sizes, mlp_ratios, block_layer, rnn_layer, mlp_layer, + norm_layer, act_layer, num_layers, bidirectional, drop=0., drop_path_rate=0., **kwargs): + assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios) + blocks = [] + for block_idx in range(layers[index]): + drop_path = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) + blocks.append(block_layer(embed_dims[index], hidden_sizes[index], mlp_ratio=mlp_ratios[index], + rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, + act_layer=act_layer, num_layers=num_layers, + bidirectional=bidirectional, drop=drop, drop_path=drop_path)) + + if index < len(embed_dims) - 1: + blocks.append(Downsample2D(embed_dims[index], embed_dims[index + 1], patch_sizes[index + 1])) + + blocks = nn.Sequential(*blocks) + return blocks + + +class VanillaSequencer(nn.Module): + def __init__( + self, + num_classes=1000, + img_size=224, + in_chans=3, + layers=[4, 3, 8, 3], + patch_sizes=[14, 1, 1, 1], + embed_dims=[384, 384, 384, 384], + hidden_sizes=[192, 192, 192, 192], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + block_layer=VanillaSequencerBlock, + rnn_layer=LSTM, + mlp_layer=Mlp, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + act_layer=nn.GELU, + num_rnn_layers=1, + bidirectional=True, + shuffle=False, + ape=False, + drop_rate=0., + drop_path_rate=0., + nlhb=False, + stem_norm=False, + **kwargs + ): + super().__init__() + self.num_classes = num_classes + self.num_features = embed_dims[0] # num_features for consistency with other models + self.embed_dims = embed_dims + self.stem = PatchEmbed( + img_size=img_size, patch_size=patch_sizes[0], in_chans=in_chans, + embed_dim=embed_dims[0], norm_layer=norm_layer if stem_norm else None, + flatten=False) + self.shuffle = shuffle + + if self.shuffle: + self.shuffle_patches = Shuffle() + + # absolute position embedding + self.ape = ape + if self.ape: + self.absolute_pos_embed = nn.Parameter(torch.zeros(1, self.stem.grid_size[0], self.stem.grid_size[1], embed_dims[0])) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.blocks = nn.Sequential(*[ + get_stage( + i, layers, patch_sizes, embed_dims, hidden_sizes, mlp_ratios, block_layer=block_layer, + rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, + num_layers=num_rnn_layers, bidirectional=bidirectional, drop=drop_rate, drop_path_rate=drop_path_rate, + ) + for i, _ in enumerate(embed_dims)]) + + self.norm = norm_layer(embed_dims[-1]) + self.head = nn.Linear(embed_dims[-1], self.num_classes) if num_classes > 0 else nn.Identity() + + self.init_weights(nlhb=nlhb) + + def init_weights(self, nlhb=False): + head_bias = -math.log(self.num_classes) if nlhb else 0. + named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + + def forward_features(self, x): + x = self.stem(x) + if self.ape: + x = x + self.absolute_pos_embed + if self.shuffle: + x = self.shuffle_patches(x) + x = self.blocks(x) + x = self.norm(x) + x = x.mean(dim=(1, 2)) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.head(x) + return x + + +def checkpoint_filter_fn(state_dict, model): + return state_dict + + +default_cfgs = dict( + v_sequencer_s=_cfg(), + v_sequencer_s_h=_cfg(), + v_sequencer_s_pe=_cfg(), +) + + +def _create_vanilla_sequencer(variant, pretrained=False, **kwargs): + if kwargs.get('features_only', None): + raise RuntimeError('features_only not implemented for VanillaSequencer models.') + + model = build_model_with_cfg( + VanillaSequencer, variant, pretrained, + default_cfg=default_cfgs[variant], + pretrained_filter_fn=checkpoint_filter_fn, + **kwargs) + return model + + +@register_model +def v_sequencer_s(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[14, 1, 1, 1], + embed_dims=[384, 384, 384, 384], + hidden_sizes=[192, 192, 192, 192], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM, + bidirectional=True, + shuffle=False, + ape=False, + **kwargs) + model = _create_vanilla_sequencer('v_sequencer_s', pretrained=pretrained, **model_args) + return model + + +@register_model +def v_sequencer_s_h(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[7, 2, 1, 1], + embed_dims=[192, 384, 384, 384], + hidden_sizes=[96, 192, 192, 192], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM, + bidirectional=True, + shuffle=False, + ape=False, + **kwargs) + model = _create_vanilla_sequencer('v_sequencer_s_h', pretrained=pretrained, **model_args) + return model + + +@register_model +def v_sequencer_s_pe(pretrained=False, **kwargs): + model_args = dict( + layers=[4, 3, 8, 3], + patch_sizes=[14, 1, 1, 1], + embed_dims=[384, 384, 384, 384], + hidden_sizes=[192, 192, 192, 192], + mlp_ratios=[3.0, 3.0, 3.0, 3.0], + rnn_layer=LSTM, + bidirectional=True, + shuffle=False, + ape=True, + **kwargs) + model = _create_vanilla_sequencer('v_sequencer_s_pe', pretrained=pretrained, **model_args) + return model diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/Dockerfile b/PyTorch/build-in/Classification/Sequencer2D/sequencer/Dockerfile new file mode 100644 index 000000000..0edc4dca6 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/Dockerfile @@ -0,0 +1,30 @@ +ARG PYTORCH="1.10.0" +ARG CUDA="11.3" +ARG CUDNN="8" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel as python-base + +ENV PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 + +FROM python-base as initial +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 8.6+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" + +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub \ + && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub \ + && apt-get update && apt-get install -y curl git build-essential cmake ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +RUN conda clean --all +ENV FORCE_CUDA="1" + +WORKDIR /workspace + +FROM initial as development + +COPY requirements.txt /tmp + +RUN pip install -r /tmp/requirements.txt --no-cache-dir \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/LICENSE b/PyTorch/build-in/Classification/Sequencer2D/sequencer/LICENSE new file mode 100644 index 000000000..16e2abf83 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Yuki Tatsunami + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/README.md b/PyTorch/build-in/Classification/Sequencer2D/sequencer/README.md new file mode 100644 index 000000000..583b4fd7d --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/README.md @@ -0,0 +1,119 @@ +**[NeurIPS 2022] Sequencer**: Deep LSTM for Image Classification +======== + +[![NeurIPS 2022](https://img.shields.io/badge/NeuIPS-2022-373737.svg?style=plastic&labelColor=5779eb)](https://nips.cc/Conferences/2022/Schedule?showEvent=55158) +[![arXiv](https://img.shields.io/badge/arXiv-2205.01972-b31b1b.svg?style=plastic)](https://arxiv.org/abs/2205.01972) +[![Support Ukraine](https://img.shields.io/badge/Support-Ukraine-FFD500?style=plastic&labelColor=005BBB)](https://opensource.fb.com/support-ukraine) + +Created by +* [Yuki Tatsunami](https://okojoalg.github.io/) + * [![Rikkyo University](https://img.shields.io/badge/Rikkyo-University-FFFFFF?style=plastic&labelColor=582780)](https://www.rikkyo.ac.jp) + * [![AnyTech](https://img.shields.io/badge/AnyTech-Co.%20Ltd.-18C4AA?style=plastic&labelColor=254BB1)](https://anytech.co.jp/) +* [Masato Taki](https://scholar.google.com/citations?hl=en&user=3nMhvfgAAAAJ) + * [![Rikkyo University](https://img.shields.io/badge/Rikkyo-University-FFFFFF?style=plastic&labelColor=582780)](https://www.rikkyo.ac.jp) + +This repository contains implementation for Sequencer. + +## Abstract + +In recent computer vision research, the advent of the Vision Transformer (ViT) has rapidly revolutionized various architectural design efforts: ViT achieved state-of-the-art image classification performance using self-attention found in natural language processing, and MLP-Mixer achieved competitive performance using simple multi-layer perceptrons. In contrast, several studies have also suggested that carefully redesigned convolutional neural networks (CNNs) can achieve advanced performance comparable to ViT without resorting to these new ideas. Against this background, there is growing interest in what inductive bias is suitable for computer vision. Here we propose Sequencer, a novel and competitive architecture alternative to ViT that provides a new perspective on these issues. Unlike ViTs, Sequencer models long-range dependencies using LSTMs rather than self-attention layers. We also propose a two-dimensional version of Sequencer module, where an LSTM is decomposed into vertical and horizontal LSTMs to enhance performance. Despite its simplicity, several experiments demonstrate that Sequencer performs impressively well: Sequencer2D-L, with 54M parameters, realizes 84.6\% top-1 accuracy on only ImageNet-1K. Not only that, we show that it has good transferability and the robust resolution adaptability on double resolution-band. + +## Schematic diagrams + +The overall architecture of Sequencer2D is similar to the typical hierarchical ViT and Visual MLP. It uses Sequencer2D blocks instead of Transformer blocks: + +![Sequencer] + +Sequencer2D block replaces the Transformer's self-attention layer with an LSTM-based layer like BiLSTM2D layer: + +![Sequencer2D] + +BiLSTM2D includes a vertical LSTM and a horizontal LSTM: + +![BiLSTM2D] + +[Sequencer]: img/Sequencer.jpg +[Sequencer2D]: img/Sequencer2D.jpg +[BiLSTM2D]: img/BiLSTM2D.jpg + +## Model Zoo +We provide our Sequencer models pretrained on ImageNet-1K: +| name | arch | Params | FLOPs | acc@1 | download | +| --- | --- | --- | --- | --- | --- | +| Sequencer2D-S | ```sequencer2d_s``` | 28M | 8.4G | 82.3 | [here](https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_s.pth) | +| Sequencer2D-M | ```sequencer2d_m``` | 38M | 11.1G | 82.8 | [here](https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_m.pth) | +| Sequencer2D-L | ```sequencer2d_l``` | 54M | 16.6G | 83.4 | [here](https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_l.pth) | + +## Usage + +### Requirements +- torch>=1.10.0 +- torchvision +- timm==0.5.4 +- Pillow +- matplotlib +- scipy +- etc., see [requirements.txt](requirements.txt) + +### Data preparation +Download and extract ImageNet images. The directory structure should be as follows. + +``` +│imagenet/ +├──train/ +│ ├── n01440764 +│ │ ├── n01440764_10026.JPEG +│ │ ├── n01440764_10027.JPEG +│ │ ├── ...... +│ ├── ...... +├──val/ +│ ├── n01440764 +│ │ ├── ILSVRC2012_val_00000293.JPEG +│ │ ├── ILSVRC2012_val_00002138.JPEG +│ │ ├── ...... +│ ├── ...... +``` + +### Traning +Command line for training Sequencer models on ImageNet from scratch. +``` +./distributed_train.sh 8 /path/to/imagenet --model sequencer2d_s -b 256 -j 8 --opt adamw --epochs 300 --sched cosine --native-amp --img-size 224 --drop-path 0.1 --lr 2e-3 --weight-decay 0.05 --remode pixel --reprob 0.25 --aa rand-m9-mstd0.5-inc1 --smoothing 0.1 --mixup 0.8 --cutmix 1.0 --warmup-lr 1e-6 --warmup-epochs 20 +``` + +Command line for fine-tuning a pre-trained model at higher resolution. +``` +./distributed_train.sh 8 /path/to/imagenet --model sequencer2d_l --pretrained -b 64 -j 8 --opt adamw --epochs 30 --sched cosine --native-amp --input-size 3 392 392 --img-size 392 --crop-pct 1.0 --drop-path 0.4 --lr 5e-5 --weight-decay 1e-8 --remode pixel --reprob 0.25 --aa rand-m9-mstd0.5-inc1 --smoothing 0.1 --mixup 0.8 --cutmix 1.0 --warmup-epochs 0 --cooldown-epochs 0 +``` + +Command line for fine-tuning a pre-trained model on a transfer learning dataset. +``` +./distributed_train.sh 4 /path/to/cifar10 --model sequencer2d_s -b 128 -j 4 --num-classes 10 --dataset torch/cifar10 --pretrained --opt adamw --epochs 200 --sched cosine --native-amp --img-size 224 --clip-grad 1 --drop-path 0.1 --lr 0.0001 --weight-decay 1e-4 --remode pixel --aa rand-m9-mstd0.5-inc1 --smoothing 0.1 --mixup 0.8 --cutmix 1.0 --warmup-lr 1e-6 --warmup-epochs 5 +``` + +### Validation +To evaluate our Sequencer models, run: +``` +python validate.py /path/to/imagenet --model sequencer2d_s -b 16 --input-size 3 224 224 --amp +``` + +## Reference +You may want to cite: +``` +@article{tatsunami2022sequencer, + title={Sequencer: Deep LSTM for Image Classification}, + author={Tatsunami, Yuki and Taki, Masato}, + journal={Advances in Neural Information Processing Systems}, + year={2022} +} +``` + +## Acknowledgment +This implementation is based on [pytorch-image-models](https://github.com/rwightman/pytorch-image-models) by Ross Wightman. We thank for his brilliant work. + +| | | +|:--|:-:| +| We thank [Graduate School of Artificial Intelligence and Science, Rikkyo University (Rikkyo AI)](https://ai.rikkyo.ac.jp) which supports us with computational resources, facilities, and others. | ![logo-rikkyo-ai] | +| [AnyTech Co. Ltd.](https://anytech.co.jp) provided valuable comments on the early versions and encouragement. We thank them for their cooperation. In particular, We thank [Atsushi Fukuda](https://github.com/fukumame) for organizing discussion opportunities. | ![logo-anytech] | + +[logo-rikkyo-ai]: img/RIKKYOAI_main.png "Logo of Rikkyo AI" +[logo-anytech]: img/anytech.svg "Logo of AnyTech" diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/avg_checkpoints.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/avg_checkpoints.py new file mode 100755 index 000000000..eace47dc7 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/avg_checkpoints.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +""" Checkpoint Averaging Script + +This script averages all model weights for checkpoints in specified path that match +the specified filter wildcard. All checkpoints must be from the exact same model. + +For any hope of decent results, the checkpoints should be from the same or child +(via resumes) training session. This can be viewed as similar to maintaining running +EMA (exponential moving average) of the model weights or performing SWA (stochastic +weight averaging), but post-training. + +Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) +""" +import torch +import argparse +import os +import glob +import hashlib +from timm.models.helpers import load_state_dict + +parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager') +parser.add_argument('--input', default='', type=str, metavar='PATH', + help='path to base input folder containing checkpoints') +parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD', + help='checkpoint filter (path wildcard)') +parser.add_argument('--output', default='./averaged.pth', type=str, metavar='PATH', + help='output filename') +parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', + help='Force not using ema version of weights (if present)') +parser.add_argument('--no-sort', dest='no_sort', action='store_true', + help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant') +parser.add_argument('-n', type=int, default=10, metavar='N', + help='Number of checkpoints to average') + + +def checkpoint_metric(checkpoint_path): + if not checkpoint_path or not os.path.isfile(checkpoint_path): + return {} + print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path)) + checkpoint = torch.load(checkpoint_path, map_location='cpu') + metric = None + if 'metric' in checkpoint: + metric = checkpoint['metric'] + elif 'metrics' in checkpoint and 'metric_name' in checkpoint: + metrics = checkpoint['metrics'] + print(metrics) + metric = metrics[checkpoint['metric_name']] + return metric + + +def main(): + args = parser.parse_args() + # by default use the EMA weights (if present) + args.use_ema = not args.no_use_ema + # by default sort by checkpoint metric (if present) and avg top n checkpoints + args.sort = not args.no_sort + + if os.path.exists(args.output): + print("Error: Output filename ({}) already exists.".format(args.output)) + exit(1) + + pattern = args.input + if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep): + pattern += os.path.sep + pattern += args.filter + checkpoints = glob.glob(pattern, recursive=True) + + if args.sort: + checkpoint_metrics = [] + for c in checkpoints: + metric = checkpoint_metric(c) + if metric is not None: + checkpoint_metrics.append((metric, c)) + checkpoint_metrics = list(sorted(checkpoint_metrics)) + checkpoint_metrics = checkpoint_metrics[-args.n:] + print("Selected checkpoints:") + [print(m, c) for m, c in checkpoint_metrics] + avg_checkpoints = [c for m, c in checkpoint_metrics] + else: + avg_checkpoints = checkpoints + print("Selected checkpoints:") + [print(c) for c in checkpoints] + + avg_state_dict = {} + avg_counts = {} + for c in avg_checkpoints: + new_state_dict = load_state_dict(c, args.use_ema) + if not new_state_dict: + print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint)) + continue + + for k, v in new_state_dict.items(): + if k not in avg_state_dict: + avg_state_dict[k] = v.clone().to(dtype=torch.float64) + avg_counts[k] = 1 + else: + avg_state_dict[k] += v.to(dtype=torch.float64) + avg_counts[k] += 1 + + for k, v in avg_state_dict.items(): + v.div_(avg_counts[k]) + + # float32 overflow seems unlikely based on weights seen to date, but who knows + float32_info = torch.finfo(torch.float32) + final_state_dict = {} + for k, v in avg_state_dict.items(): + v = v.clamp(float32_info.min, float32_info.max) + final_state_dict[k] = v.to(dtype=torch.float32) + + try: + torch.save(final_state_dict, args.output, _use_new_zipfile_serialization=False) + except: + torch.save(final_state_dict, args.output) + + with open(args.output, 'rb') as f: + sha_hash = hashlib.sha256(f.read()).hexdigest() + print("=> Saved state_dict to '{}, SHA256: {}'".format(args.output, sha_hash)) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/benchmark.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/benchmark.py new file mode 100755 index 000000000..53fe1c88c --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/benchmark.py @@ -0,0 +1,606 @@ +#!/usr/bin/env python3 +""" Model Benchmark Script +An inference and train step benchmark script for timm models. +Hacked together by Ross Wightman (https://github.com/rwightman) +""" +import argparse +import os +import csv +import json +import time +import logging +import torch +import torch.nn as nn +import torch.nn.parallel +from collections import OrderedDict +from contextlib import suppress +from functools import partial + +from timm.models import create_model, is_model, list_models +from timm.optim import create_optimizer_v2 +from timm.data import resolve_data_config +from timm.utils import AverageMeter, setup_default_logging + +import models + +has_apex = False +try: + from apex import amp + has_apex = True +except ImportError: + pass + +has_native_amp = False +try: + if getattr(torch.cuda.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +try: + from deepspeed.profiling.flops_profiler import get_model_profile + has_deepspeed_profiling = True +except ImportError as e: + has_deepspeed_profiling = False + +try: + from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis + has_fvcore_profiling = True +except ImportError as e: + FlopCountAnalysis = None + has_fvcore_profiling = False + + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('validate') + + +parser = argparse.ArgumentParser(description='PyTorch Benchmark') + +# benchmark specific args +parser.add_argument('--model-list', metavar='NAME', default='', + help='txt file based list of model names to benchmark') +parser.add_argument('--bench', default='both', type=str, + help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'") +parser.add_argument('--detail', action='store_true', default=False, + help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False') +parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', + help='Output csv file for validation results (summary)') +parser.add_argument('--num-warm-iter', default=10, type=int, + metavar='N', help='Number of warmup iterations (default: 10)') +parser.add_argument('--num-bench-iter', default=40, type=int, + metavar='N', help='Number of benchmark iterations (default: 40)') + +# common inference / train args +parser.add_argument('--model', '-m', metavar='NAME', default='resnet50', + help='model architecture (default: resnet50)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--use-train-size', action='store_true', default=False, + help='Run inference at train size, not test-input-size if it exists.') +parser.add_argument('--num-classes', type=int, default=None, + help='Number classes in dataset') +parser.add_argument('--gp', default=None, type=str, metavar='POOL', + help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last memory layout') +parser.add_argument('--amp', action='store_true', default=False, + help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.') +parser.add_argument('--precision', default='float32', type=str, + help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') + + + +# train optimizer parameters +parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') +parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: None, use opt default)') +parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='Optimizer momentum (default: 0.9)') +parser.add_argument('--weight-decay', type=float, default=0.0001, + help='weight decay (default: 0.0001)') +parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') +parser.add_argument('--clip-mode', type=str, default='norm', + help='Gradient clipping mode. One of ("norm", "value", "agc")') + + +# model regularization / loss params that impact model or loss fn +parser.add_argument('--smoothing', type=float, default=0.1, + help='Label smoothing (default: 0.1)') +parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', + help='Dropout rate (default: 0.)') +parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', + help='Drop path rate (default: None)') +parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', + help='Drop block rate (default: None)') + + +def timestamp(sync=False): + return time.perf_counter() + + +def cuda_timestamp(sync=False, device=None): + if sync: + torch.cuda.synchronize(device=device) + return time.perf_counter() + + +def count_params(model: nn.Module): + return sum([m.numel() for m in model.parameters()]) + + +def resolve_precision(precision: str): + assert precision in ('amp', 'float16', 'bfloat16', 'float32') + use_amp = False + model_dtype = torch.float32 + data_dtype = torch.float32 + if precision == 'amp': + use_amp = True + elif precision == 'float16': + model_dtype = torch.float16 + data_dtype = torch.float16 + elif precision == 'bfloat16': + model_dtype = torch.bfloat16 + data_dtype = torch.bfloat16 + return use_amp, model_dtype, data_dtype + + +def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False): + macs, _ = get_model_profile( + model=model, + input_res=(batch_size,) + input_size, # input shape or input to the input_constructor + input_constructor=None, # if specified, a constructor taking input_res is used as input to the model + print_profile=detailed, # prints the model graph with the measured profile attached to each module + detailed=detailed, # print the detailed profile + warm_up=10, # the number of warm-ups before measuring the time of each module + as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k) + output_file=None, # path to the output file. If None, the profiler prints to stdout. + ignore_modules=None) # the list of modules to ignore in the profiling + return macs, 0 # no activation count in DS + + +def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False): + if force_cpu: + model = model.to('cpu') + device, dtype = next(model.parameters()).device, next(model.parameters()).dtype + example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype) + fca = FlopCountAnalysis(model, example_input) + aca = ActivationCountAnalysis(model, example_input) + if detailed: + fcs = flop_count_str(fca) + print(fcs) + return fca.total(), aca.total() + + +class BenchmarkRunner: + def __init__( + self, model_name, detail=False, device='cuda', torchscript=False, precision='float32', + num_warm_iter=10, num_bench_iter=50, use_train_size=False, **kwargs): + self.model_name = model_name + self.detail = detail + self.device = device + self.use_amp, self.model_dtype, self.data_dtype = resolve_precision(precision) + self.channels_last = kwargs.pop('channels_last', False) + self.amp_autocast = torch.cuda.amp.autocast if self.use_amp else suppress + + self.model = create_model( + model_name, + num_classes=kwargs.pop('num_classes', None), + in_chans=3, + global_pool=kwargs.pop('gp', 'fast'), + scriptable=torchscript) + self.model.to( + device=self.device, + dtype=self.model_dtype, + memory_format=torch.channels_last if self.channels_last else None) + self.num_classes = self.model.num_classes + self.param_count = count_params(self.model) + _logger.info('Model %s created, param count: %d' % (model_name, self.param_count)) + self.scripted = False + if torchscript: + self.model = torch.jit.script(self.model) + self.scripted = True + + data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size) + self.input_size = data_config['input_size'] + self.batch_size = kwargs.pop('batch_size', 256) + + self.example_inputs = None + self.num_warm_iter = num_warm_iter + self.num_bench_iter = num_bench_iter + self.log_freq = num_bench_iter // 5 + if 'cuda' in self.device: + self.time_fn = partial(cuda_timestamp, device=self.device) + else: + self.time_fn = timestamp + + def _init_input(self): + self.example_inputs = torch.randn( + (self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype) + if self.channels_last: + self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) + + +class InferenceBenchmarkRunner(BenchmarkRunner): + + def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): + super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) + self.model.eval() + + def run(self): + def _step(): + t_step_start = self.time_fn() + with self.amp_autocast(): + output = self.model(self.example_inputs) + t_step_end = self.time_fn(True) + return t_step_end - t_step_start + + _logger.info( + f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' + f'input size {self.input_size} and batch size {self.batch_size}.') + + with torch.no_grad(): + self._init_input() + + for _ in range(self.num_warm_iter): + _step() + + torch.cuda.reset_peak_memory_stats() + total_step = 0. + num_samples = 0 + t_run_start = self.time_fn() + for i in range(self.num_bench_iter): + delta_fwd = _step() + total_step += delta_fwd + num_samples += self.batch_size + num_steps = i + 1 + if num_steps % self.log_freq == 0: + _logger.info( + f"Infer [{num_steps}/{self.num_bench_iter}]." + f" {num_samples / total_step:0.2f} samples/sec." + f" {1000 * total_step / num_steps:0.3f} ms/step.") + t_run_end = self.time_fn(True) + t_run_elapsed = t_run_end - t_run_start + + results = dict( + samples_per_sec=round(num_samples / t_run_elapsed, 2), + step_time=round(1000 * total_step / self.num_bench_iter, 3), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + peak_memory=torch.cuda.max_memory_allocated() // 2 ** 20, + ) + + retries = 0 if self.scripted else 2 # skip profiling if model is scripted + while retries: + retries -= 1 + try: + if has_deepspeed_profiling: + macs, _ = profile_deepspeed(self.model, self.input_size) + results['gmacs'] = round(macs / 1e9, 2) + elif has_fvcore_profiling: + macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries) + results['gmacs'] = round(macs / 1e9, 2) + results['macts'] = round(activations / 1e6, 2) + except RuntimeError as e: + pass + + _logger.info( + f"Inference benchmark of {self.model_name} done. " + f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step") + + return results + + +class TrainBenchmarkRunner(BenchmarkRunner): + + def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): + super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) + self.model.train() + + if kwargs.pop('smoothing', 0) > 0: + self.loss = nn.CrossEntropyLoss().to(self.device) + else: + self.loss = nn.CrossEntropyLoss().to(self.device) + self.target_shape = tuple() + + self.optimizer = create_optimizer_v2( + self.model, + opt=kwargs.pop('opt', 'sgd'), + lr=kwargs.pop('lr', 1e-4)) + + def _gen_target(self, batch_size): + return torch.empty( + (batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes) + + def run(self): + def _step(detail=False): + self.optimizer.zero_grad() # can this be ignored? + t_start = self.time_fn() + t_fwd_end = t_start + t_bwd_end = t_start + with self.amp_autocast(): + output = self.model(self.example_inputs) + if isinstance(output, tuple): + output = output[0] + if detail: + t_fwd_end = self.time_fn(True) + target = self._gen_target(output.shape[0]) + self.loss(output, target).backward() + if detail: + t_bwd_end = self.time_fn(True) + self.optimizer.step() + t_end = self.time_fn(True) + if detail: + delta_fwd = t_fwd_end - t_start + delta_bwd = t_bwd_end - t_fwd_end + delta_opt = t_end - t_bwd_end + return delta_fwd, delta_bwd, delta_opt + else: + delta_step = t_end - t_start + return delta_step + + _logger.info( + f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' + f'input size {self.input_size} and batch size {self.batch_size}.') + + self._init_input() + + for _ in range(self.num_warm_iter): + _step() + + torch.cuda.reset_peak_memory_stats() + t_run_start = self.time_fn() + if self.detail: + total_fwd = 0. + total_bwd = 0. + total_opt = 0. + num_samples = 0 + for i in range(self.num_bench_iter): + delta_fwd, delta_bwd, delta_opt = _step(True) + num_samples += self.batch_size + total_fwd += delta_fwd + total_bwd += delta_bwd + total_opt += delta_opt + num_steps = (i + 1) + if num_steps % self.log_freq == 0: + total_step = total_fwd + total_bwd + total_opt + _logger.info( + f"Train [{num_steps}/{self.num_bench_iter}]." + f" {num_samples / total_step:0.2f} samples/sec." + f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd," + f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd," + f" {1000 * total_opt / num_steps:0.3f} ms/step opt." + ) + total_step = total_fwd + total_bwd + total_opt + t_run_elapsed = self.time_fn() - t_run_start + results = dict( + samples_per_sec=round(num_samples / t_run_elapsed, 2), + step_time=round(1000 * total_step / self.num_bench_iter, 3), + fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3), + bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3), + opt_time=round(1000 * total_opt / self.num_bench_iter, 3), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + peak_memory=torch.cuda.max_memory_allocated() // 2 ** 20, + ) + else: + total_step = 0. + num_samples = 0 + for i in range(self.num_bench_iter): + delta_step = _step(False) + num_samples += self.batch_size + total_step += delta_step + num_steps = (i + 1) + if num_steps % self.log_freq == 0: + _logger.info( + f"Train [{num_steps}/{self.num_bench_iter}]." + f" {num_samples / total_step:0.2f} samples/sec." + f" {1000 * total_step / num_steps:0.3f} ms/step.") + t_run_elapsed = self.time_fn() - t_run_start + results = dict( + samples_per_sec=round(num_samples / t_run_elapsed, 2), + step_time=round(1000 * total_step / self.num_bench_iter, 3), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + peak_memory=torch.cuda.max_memory_allocated() // 2 ** 20, + ) + + _logger.info( + f"Train benchmark of {self.model_name} done. " + f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample") + + return results + + +class ProfileRunner(BenchmarkRunner): + + def __init__(self, model_name, device='cuda', profiler='', **kwargs): + super().__init__(model_name=model_name, device=device, **kwargs) + if not profiler: + if has_deepspeed_profiling: + profiler = 'deepspeed' + elif has_fvcore_profiling: + profiler = 'fvcore' + assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work." + self.profiler = profiler + self.model.eval() + + def run(self): + _logger.info( + f'Running profiler on {self.model_name} w/ ' + f'input size {self.input_size} and batch size {self.batch_size}.') + torch.cuda.reset_peak_memory_stats() + + macs = 0 + activations = 0 + if self.profiler == 'deepspeed': + macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True) + elif self.profiler == 'fvcore': + macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True) + + results = dict( + gmacs=round(macs / 1e9, 2), + macts=round(activations / 1e6, 2), + batch_size=self.batch_size, + img_size=self.input_size[-1], + param_count=round(self.param_count / 1e6, 2), + peak_memory=torch.cuda.max_memory_allocated() // 2 ** 20 + ) + + _logger.info( + f"Profile of {self.model_name} done. " + f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.") + + return results + + +def decay_batch_exp(batch_size, factor=0.5, divisor=16): + out_batch_size = batch_size * factor + if out_batch_size > divisor: + out_batch_size = (out_batch_size + 1) // divisor * divisor + else: + out_batch_size = batch_size - 1 + return max(0, int(out_batch_size)) + + +def _try_run(model_name, bench_fn, initial_batch_size, bench_kwargs): + batch_size = initial_batch_size + results = dict() + while batch_size >= 1: + torch.cuda.empty_cache() + try: + bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs) + results = bench.run() + return results + except RuntimeError as e: + e_str = str(e) + print(e_str) + if 'channels_last' in e_str: + print(f'Error: {model_name} not supported in channels_last, skipping.') + break + print(f'Error: "{e_str}" while running benchmark. Reducing batch size to {batch_size} for retry.') + batch_size = decay_batch_exp(batch_size) + return results + + +def benchmark(args): + if args.amp: + _logger.warning("Overriding precision to 'amp' since --amp flag set.") + args.precision = 'amp' + _logger.info(f'Benchmarking in {args.precision} precision. ' + f'{"NHWC" if args.channels_last else "NCHW"} layout. ' + f'torchscript {"enabled" if args.torchscript else "disabled"}') + + bench_kwargs = vars(args).copy() + bench_kwargs.pop('amp') + model = bench_kwargs.pop('model') + batch_size = bench_kwargs.pop('batch_size') + + bench_fns = (InferenceBenchmarkRunner,) + prefixes = ('infer',) + if args.bench == 'both': + bench_fns = ( + InferenceBenchmarkRunner, + TrainBenchmarkRunner + ) + prefixes = ('infer', 'train') + elif args.bench == 'train': + bench_fns = TrainBenchmarkRunner, + prefixes = 'train', + elif args.bench.startswith('profile'): + # specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore + if 'deepspeed' in args.bench: + assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter" + bench_kwargs['profiler'] = 'deepspeed' + elif 'fvcore' in args.bench: + assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter" + bench_kwargs['profiler'] = 'fvcore' + bench_fns = ProfileRunner, + batch_size = 1 + + model_results = OrderedDict(model=model) + for prefix, bench_fn in zip(prefixes, bench_fns): + run_results = _try_run(model, bench_fn, initial_batch_size=batch_size, bench_kwargs=bench_kwargs) + if prefix: + run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()} + model_results.update(run_results) + param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0)) + model_results.setdefault('param_count', param_count) + model_results.pop('train_param_count', 0) + return model_results if model_results['param_count'] else dict() + + +def main(): + setup_default_logging() + args = parser.parse_args() + model_cfgs = [] + model_names = [] + + if args.model_list: + args.model = '' + with open(args.model_list) as f: + model_names = [line.rstrip() for line in f] + model_cfgs = [(n, None) for n in model_names] + elif args.model == 'all': + # validate all models in a list of names with pretrained checkpoints + args.pretrained = True + model_names = list_models(pretrained=True, exclude_filters=['*in21k']) + model_cfgs = [(n, None) for n in model_names] + elif not is_model(args.model): + # model name doesn't exist, try as wildcard filter + model_names = list_models(args.model) + model_cfgs = [(n, None) for n in model_names] + + if len(model_cfgs): + results_file = args.results_file or './benchmark.csv' + _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) + results = [] + try: + for m, _ in model_cfgs: + if not m: + continue + args.model = m + r = benchmark(args) + if r: + results.append(r) + time.sleep(10) + except KeyboardInterrupt as e: + pass + sort_key = 'infer_samples_per_sec' + if 'train' in args.bench: + sort_key = 'train_samples_per_sec' + elif 'profile' in args.bench: + sort_key = 'infer_gmacs' + results = sorted(results, key=lambda x: x[sort_key], reverse=True) + if len(results): + write_results(results_file, results) + else: + results = benchmark(args) + json_str = json.dumps(results, indent=4) + print(json_str) + + +def write_results(results_file, results): + with open(results_file, mode='w') as cf: + dw = csv.DictWriter(cf, fieldnames=results[0].keys()) + dw.writeheader() + for r in results: + dw.writerow(r) + cf.flush() + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/clean_checkpoint.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/clean_checkpoint.py new file mode 100755 index 000000000..f8132eb6e --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/clean_checkpoint.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +""" Checkpoint Cleaning Script +Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc. +and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256 +calculation for model zoo compatibility. +Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) +""" +import torch +import argparse +import os +import hashlib +import shutil +from collections import OrderedDict +from timm.models.helpers import load_state_dict + +parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--output', default='', type=str, metavar='PATH', + help='output path') +parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', + help='use ema version of weights if present') +parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true', + help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint') + +_TEMP_NAME = './_checkpoint.pth' + + +def main(): + args = parser.parse_args() + + if os.path.exists(args.output): + print("Error: Output filename ({}) already exists.".format(args.output)) + exit(1) + + clean_checkpoint(args.checkpoint, args.output, not args.no_use_ema, args.clean_aux_bn) + + +def clean_checkpoint(checkpoint, output='', use_ema=True, clean_aux_bn=False): + # Load an existing checkpoint to CPU, strip everything but the state_dict and re-save + if checkpoint and os.path.isfile(checkpoint): + print("=> Loading checkpoint '{}'".format(checkpoint)) + state_dict = load_state_dict(checkpoint, use_ema=use_ema) + new_state_dict = {} + for k, v in state_dict.items(): + if clean_aux_bn and 'aux_bn' in k: + # If all aux_bn keys are removed, the SplitBN layers will end up as normal and + # load with the unmodified model using BatchNorm2d. + continue + name = k[7:] if k.startswith('module.') else k + new_state_dict[name] = v + print("=> Loaded state_dict from '{}'".format(checkpoint)) + + try: + torch.save(new_state_dict, _TEMP_NAME, _use_new_zipfile_serialization=False) + except: + torch.save(new_state_dict, _TEMP_NAME) + + with open(_TEMP_NAME, 'rb') as f: + sha_hash = hashlib.sha256(f.read()).hexdigest() + + if output: + checkpoint_root, checkpoint_base = os.path.split(output) + checkpoint_base = os.path.splitext(checkpoint_base)[0] + else: + checkpoint_root = '' + checkpoint_base = os.path.splitext(checkpoint)[0] + final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + '.pth' + shutil.move(_TEMP_NAME, os.path.join(checkpoint_root, final_filename)) + print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash)) + return final_filename + else: + print("Error: Checkpoint ({}) doesn't exist".format(checkpoint)) + return '' + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/__init__.py new file mode 100644 index 000000000..085412ed2 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +from .cars import StanfordCars +from .flowers import Flowers102 \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/cars.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/cars.py new file mode 100644 index 000000000..da93940d1 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/cars.py @@ -0,0 +1,112 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import os + +import numpy as np +import scipy +import scipy.io as scio +from PIL import Image +from torchvision.datasets import VisionDataset +from torchvision.datasets.utils import ( + download_and_extract_archive, + download_url, + check_integrity +) + + +class StanfordCars(VisionDataset): + base_folder = 'stanford_cars' + + urls = { + "train": "http://ai.stanford.edu/~jkrause/car196/cars_train.tgz", + "test": "http://ai.stanford.edu/~jkrause/car196/cars_test.tgz", + "devkit": "https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz", + "test_anno": "http://ai.stanford.edu/~jkrause/car196/cars_test_annos_withlabels.mat", + } + md5 = { + "train": "065e5b463ae28d29e77c1b4b166cfe61", + "test": "4ce7ebf6a94d07f1952d94dd34c4d501", + "devkit": "c3b158d763b6e2245038c8ad08e45376", + "test_anno": "b0a2b23655a3edd16d84508592a98d10", + } + + def __init__( + self, + root: str, + split: str = 'train', + transform=None, + target_transform=None, + download: bool = False, + ): + super(StanfordCars, self).__init__(root, transform=transform, + target_transform=target_transform) + + self.data_dir = os.path.join(self.root, self.base_folder) + mat_anno = os.path.join(self.data_dir, 'devkit', f'cars_{split}_annos.mat') \ + if not split == "test" else os.path.join(self.data_dir, + 'cars_test_annos_withlabels.mat') + car_names = os.path.join(self.data_dir, 'devkit', 'cars_meta.mat') + + assert (split in ('train', 'test')) + self.split = split + + if download: + self.download() + + self.full_data_set = scipy.io.loadmat(mat_anno) + self.car_annotations = self.full_data_set['annotations'] + self.car_annotations = self.car_annotations[0] + + self.car_names = scipy.io.loadmat(car_names)['class_names'] + self.car_names = np.array(self.car_names[0]) + self.class_num = self.car_names.shape[0] + + self.transform = transform + self.target_transform = target_transform + + def __getitem__(self, index: int): + img_name = os.path.join( + self.data_dir, f'cars_{self.split}', + self.car_annotations[index][-1][0]) + + img = Image.open(img_name).convert('RGB') + car_class = self.car_annotations[index][-2][0][0] + + if self.transform is not None: + img = self.transform(img) + + target = int(car_class) - 1 + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.car_annotations) + + def _check_integrity(self) -> bool: + for k in self.urls.keys(): + fpath = os.path.join( + self.data_dir, os.path.basename(self.urls[k])) + if not check_integrity(fpath, self.md5[k]): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + for k in self.urls.keys(): + if os.path.splitext(self.urls[k])[-1] == '.mat': + download_url(self.urls[k], self.data_dir, + md5=self.md5[k]) + else: + download_and_extract_archive( + self.urls[k], self.data_dir, + extract_root=self.data_dir, + md5=self.md5[k]) + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__) diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/flowers.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/flowers.py new file mode 100644 index 000000000..bd684172a --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/flowers.py @@ -0,0 +1,125 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import os +import shutil + +import scipy.io as scio +from PIL import Image, ImageFile +from torchvision.datasets import VisionDataset +from torchvision.datasets.utils import ( + download_and_extract_archive, + download_url, + check_integrity +) + +ImageFile.LOAD_TRUNCATED_IMAGES = True + +class Flowers102(VisionDataset): + source_folder = '102flowers_org' + base_folder = '102flowers' + source = os.path.join(source_folder, 'jpg') + url = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz" + image_labels_url = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/imagelabels.mat" + set_id_url = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/setid.mat" + filename = "102flowers.tgz" + image_labels_filename = "imagelabels.mat" + set_id_filename = "setid.mat" + md5 = "52808999861908f626f3c1f4e79d11fa" + image_labels_md5 = "e0620be6f572b9609742df49c70aed4d" + set_id_md5 = "a5357ecc9cb78c4bef273ce3793fc85c" + + def __init__( + self, + root: str, + split: str = 'train', + transform=None, + target_transform=None, + download: bool = False, + ): + super(Flowers102, self).__init__(root, transform=transform, + target_transform=target_transform) + + if download: + self.download_and_arrange() + + assert (split in ('train', 'test')) + self.split = split + if split == 'train': + downloaded_list = os.path.join(self.root, self.base_folder, "train") + else: + downloaded_list = os.path.join(self.root, self.base_folder, "test") + + self.data = [] + self.targets = [] + + for i in range(102): + for file_name in os.listdir(os.path.join(downloaded_list, str(i + 1))): + if not file_name.endswith('.jpg'): + continue + self.data.append(os.path.join(downloaded_list, str(i + 1), file_name)) + self.targets.append(i) + + def __getitem__(self, index: int): + + path, target = self.data[index], self.targets[index] + img = Image.open(path) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.data) + + def _check_integrity(self) -> bool: + root = self.root + fpath = os.path.join(root, self.base_folder, self.filename) + if not check_integrity(fpath, self.md5): + return False + return True + + def download_and_arrange(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + download_and_extract_archive( + self.url, self.root, + extract_root=os.path.join(self.root, self.source_folder), + filename=self.filename, md5=self.md5) + download_url(self.image_labels_url, + os.path.join(self.root, self.source_folder), + filename=self.image_labels_filename, + md5=self.image_labels_md5) + download_url(self.set_id_url, + os.path.join(self.root, self.source_folder), + filename=self.set_id_filename, md5=self.set_id_md5) + + image_labels = scio.loadmat(os.path.join(self.root, self.source_folder, + self.image_labels_filename)) + set_id = scio.loadmat( + os.path.join(self.root, self.source_folder, self.set_id_filename)) + + self.classify(set_id['trnid'][0], 'train', image_labels['labels'][0]) + self.classify(set_id['valid'][0], 'train', image_labels['labels'][0]) + self.classify(set_id['tstid'][0], 'test', image_labels['labels'][0]) + shutil.rmtree(os.path.join(self.root, self.source_folder)) + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__) + + def classify(self, set_, split, labels): + for n, id_ in enumerate(set_): + cls = labels[id_ - 1] + filename = f'image_{id_:05d}.jpg' + dst = os.path.join(self.root, self.base_folder, split) + path = os.path.join(dst, str(cls)) + path = path.strip() + path = path.rstrip("/") + os.makedirs(path, exist_ok=True) + os.rename(os.path.join(self.root, self.source, filename), + os.path.join(dst, str(cls), filename)) diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/distributed_train.sh b/PyTorch/build-in/Classification/Sequencer2D/sequencer/distributed_train.sh new file mode 100755 index 000000000..067bf21ae --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/distributed_train.sh @@ -0,0 +1,4 @@ +#!/bin/bash +NUM_PROC=$1 +shift +python3 -m torch.distributed.launch --nproc_per_node=$NUM_PROC train.py "$@" \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/models.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/models.py new file mode 100644 index 000000000..634ba360b --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/models.py @@ -0,0 +1,35 @@ +import math + +import torch +from torch import nn + +from erf.save_output import SaveOutput +from utils.helpers import rgetattr + + +class ERFNet(nn.Module): + def __init__(self, model, attrs, channels_last=False): + super().__init__() + self.model = model + self.save_output = SaveOutput() + self.channels_last = channels_last + hook_handles = [] + for module in attrs: + layer = rgetattr(self.model, module) + handle = layer.register_forward_hook(self.save_output) + hook_handles.append(handle) + + def forward(self, x): + features = [] + _ = self.model(x) + for feature in self.save_output.outputs: + if len(feature.shape) == 3: + feature = feature[:, feature.shape[1] // 2 - int(math.sqrt(feature.shape[1])) // 2, :] + elif self.channels_last: + feature = feature[:, feature.shape[1] // 2 - 1, feature.shape[2] // 2 - 1, :] + else: + feature = feature[:, :, feature.shape[2] // 2 - 1, feature.shape[3] // 2 - 1] + features.append(torch.sum(feature)) + self.save_output.clear() + + return features diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/save_output.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/save_output.py new file mode 100644 index 000000000..796370a03 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/save_output.py @@ -0,0 +1,9 @@ +class SaveOutput: + def __init__(self): + self.outputs = [] + + def __call__(self, module, module_in, module_out): + self.outputs.append(module_out) + + def clear(self): + self.outputs = [] diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/scaler.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/scaler.py new file mode 100644 index 000000000..d26b27e1f --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/scaler.py @@ -0,0 +1,5 @@ +class MinMaxScaler: + def __call__(self, array): + scale = 1.0 / (array.max() - array.min()) + array = array * scale - array.min() * scale + return array diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/generate_erf.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/generate_erf.py new file mode 100755 index 000000000..212ddcf77 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/generate_erf.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import argparse +import os +import glob +import logging +import time + +import numpy as np +import torch +from contextlib import suppress + +import matplotlib.pyplot as plt +import torch.nn.functional as F +import torch.nn.parallel +from timm.models import create_model, load_checkpoint, is_model, list_models +from timm.data import create_dataset, create_loader, resolve_data_config +from timm.utils import natural_key, setup_default_logging, set_jit_legacy, random_seed + +import models +from erf.models import ERFNet +from erf.scaler import MinMaxScaler +from utils.helpers import train_rnn + +has_apex = False +try: + from apex import amp + + has_apex = True +except ImportError: + pass + +has_native_amp = False +try: + if getattr(torch.cuda.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('validate') + +parser = argparse.ArgumentParser(description='PyTorch ImageNet ERF Generator') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('--dataset', '-d', metavar='NAME', default='', + help='dataset type (default: ImageFolder/ImageTar if empty)') +parser.add_argument('--split', metavar='NAME', default='validation', + help='dataset split (default: validation)') +parser.add_argument('--dataset-download', action='store_true', default=False, + help='Allow download of dataset for torch/ and tfds/ datasets that support it.') +parser.add_argument('--model', '-m', metavar='NAME', default='dpn92', + help='model architecture (default: dpn92)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 2)') +parser.add_argument('-b', '--batch-size', default=1, type=int, + metavar='N', help='mini-batch size (default: 1)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--crop-pct', default=None, type=float, + metavar='N', help='Input image center crop pct') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('--num-classes', type=int, default=None, + help='Number classes in dataset') +parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', + help='path to class to idx mapping file (default: "")') +parser.add_argument('--gp', default=None, type=str, metavar='POOL', + help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--num-gpu', type=int, default=1, + help='Number of GPUS to use') +parser.add_argument('--no-prefetcher', action='store_true', default=False, + help='disable fast prefetcher') +parser.add_argument('--pin-mem', action='store_true', default=False, + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last') +parser.add_argument('--amp', action='store_true', default=False, + help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.') +parser.add_argument('--apex-amp', action='store_true', default=False, + help='Use NVIDIA Apex AMP mixed precision') +parser.add_argument('--native-amp', action='store_true', default=False, + help='Use Native Torch AMP mixed precision') +parser.add_argument('--tf-preprocessing', action='store_true', default=False, + help='Use Tensorflow preprocessing pipeline (require CPU TF installed') +parser.add_argument('--use-ema', dest='use_ema', action='store_true', + help='use ema version of weights if present') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') +parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true', + help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance') +parser.add_argument('--attrs', default=None, nargs='+', type=str, + help='select layers to output features') +parser.add_argument('--result-npy-dir', default='./erf_results/224/npy', type=str, + help='path to save npys of ERF') + +parser.add_argument('--seed', type=int, default=42, metavar='S', + help='random seed (default: 42)') +parser.add_argument('--num-batches', default=32, type=int, + metavar='N', help='number of batches (default: 32)') + + +def generate(args): + # might as well try to validate something + args.pretrained = args.pretrained or not args.checkpoint + args.prefetcher = not args.no_prefetcher + amp_autocast = suppress # do nothing + if args.amp: + if has_native_amp: + args.native_amp = True + elif has_apex: + args.apex_amp = True + else: + _logger.warning("Neither APEX or Native Torch AMP is available.") + assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set." + if args.native_amp: + amp_autocast = torch.cuda.amp.autocast + _logger.info('Validating in mixed precision with native PyTorch AMP.') + elif args.apex_amp: + _logger.info('Validating in mixed precision with NVIDIA APEX AMP.') + else: + _logger.info('Validating in float32. AMP not enabled.') + + if args.legacy_jit: + set_jit_legacy() + + # create model + model = create_model( + args.model, + pretrained=args.pretrained, + num_classes=args.num_classes, + in_chans=3, + global_pool=args.gp, + scriptable=args.torchscript) + + if args.num_classes is None: + assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' + args.num_classes = model.num_classes + + if args.checkpoint: + load_checkpoint(model, args.checkpoint, args.use_ema) + + param_count = sum([m.numel() for m in model.parameters()]) + _logger.info('Model %s created, param count: %d' % (args.model, param_count)) + + data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True) + + if args.torchscript: + torch.jit.optimized_execution(True) + model = torch.jit.script(model) + + model = ERFNet(model, args.attrs, args.channels_last) + model = model.cuda() + if args.apex_amp: + model = amp.initialize(model, opt_level='O1') + + if args.num_gpu > 1: + model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) + + dataset = create_dataset( + root=args.data, name=args.dataset, split=args.split, + download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map, + ) + + loader = create_loader( + dataset, + input_size=data_config['input_size'], + batch_size=args.batch_size, + is_training=True, + use_prefetcher=args.prefetcher, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + crop_pct=data_config['crop_pct'], + pin_memory=args.pin_mem, + tf_preprocessing=args.tf_preprocessing, + ) + + model.eval() + train_rnn(model) + # warmup, reduce variability of first batch time, especially for comparing torchscript vs non + input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).cuda() + model(input) + + random_seed(args.seed, 0) + segment_ps = [] + for idx, (input, target) in enumerate(loader): + + if args.no_prefetcher: + input = input.cuda() + + input.requires_grad_() + + # compute output + with amp_autocast(): + outputs = model(input) + + ps = [] + for output in outputs: + output.backward(retain_graph=True) + p = F.relu(input.grad) + ps.append(p) + input.grad.detach_() + input.grad.zero_() + segment_ps.append(ps) + + if args.num_batches == idx - 1: + break + + for idx, p in enumerate(list(zip(*segment_ps))): + p = torch.cat(p, dim=0) + s = torch.log10(torch.sum(p, dim=[0, 1]) + 1) + s = s.detach().cpu().numpy() + + os.makedirs(args.result_npy_dir, exist_ok=True) + img_size = args.img_size if args.img_size else 224 + np.save( + os.path.join(args.result_npy_dir, + f'{args.model}_{img_size}_{args.attrs[idx]}.npy'), s) + + +def main(): + setup_default_logging() + args = parser.parse_args() + model_cfgs = [] + model_names = [] + if os.path.isdir(args.checkpoint): + # validate all checkpoints in a path with same model + checkpoints = glob.glob(args.checkpoint + '/*.pth.tar') + checkpoints += glob.glob(args.checkpoint + '/*.pth') + model_names = list_models(args.model) + model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] + else: + if args.model == 'all': + # validate all models in a list of names with pretrained checkpoints + args.pretrained = True + model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k', '*_dino']) + model_cfgs = [(n, '') for n in model_names] + elif not is_model(args.model): + # model name doesn't exist, try as wildcard filter + model_names = list_models(args.model) + model_cfgs = [(n, '') for n in model_names] + + if not model_cfgs and os.path.isfile(args.model): + with open(args.model) as f: + model_names = [line.rstrip() for line in f] + model_cfgs = [(n, None) for n in model_names if n] + + if len(model_cfgs): + _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) + try: + start_batch_size = args.batch_size + for m, c in model_cfgs: + batch_size = start_batch_size + args.model = m + args.checkpoint = c + r = {} + while not r and batch_size >= args.num_gpu: + torch.cuda.empty_cache() + try: + args.batch_size = batch_size + print('Validating with batch size: %d' % args.batch_size) + generate(args) + except RuntimeError as e: + if batch_size <= args.num_gpu: + print("Validation failed with no ability to reduce batch size. Exiting.") + raise e + batch_size = max(batch_size // 2, args.num_gpu) + print("Validation failed, reducing batch size by 50%") + except KeyboardInterrupt as e: + pass + else: + generate(args) + + +if __name__ == '__main__': + main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/hubconf.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/hubconf.py new file mode 100755 index 000000000..d0eedab1c --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/hubconf.py @@ -0,0 +1,6 @@ +dependencies = ['torch'] +from timm.models import registry + +from models import * + +globals().update(registry._model_entrypoints) diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/BiLSTM2D.jpg b/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/BiLSTM2D.jpg new file mode 100644 index 0000000000000000000000000000000000000000..942aa712efb897dc3cbe8163f84c29ecb50b576a GIT binary patch literal 73526 zcmd42cU05q*Do3?f&$W;l2N1!8I@j=5eJYUARR)?edk!2tboj_op<}`##{~k^lc0kK4jnpp_|TCfhYt&+?+bnh z9hNvEdFI09qf(AHgwBRbUwrW7`7srX*Y9PV2HEE>-HdoBEFvo>ub`-^rmmr>rKfLT zXk=`1#H_!XBSsDcMng5-!1>!0fDHX$f)R;SajUIM~O+vxW_4}**Up+`2|n^ zEPO#IBbJk1k}KZS)=}#l-ZnP1(>pr5y8r6w9U2}P9UK4rg~4Ra{yjIpu(-6$+5Gl> zi@Oc{*!jn<1E53y?y`Rw_J6ZW;DiGQ4<9~sSm+{7_G$`;HOrjtRH{kjOR%$6cXuzN_dzVN zTBcg-8-U0%tvQ0ZI2t{D5KwPdKKq!`@Q7zzy%N(93zNWD2YGc~TvYUFOTy?fylEOE zW_HXy_PR)!G2z2kraXyV*`vl-9s=CPN7n-SQ=OxNA_ksVmDt$SWNf~F+XY!I=c~8o z<<&%g$F6>v!Kbu|(Uh%c_Cb&FlDC0c5=zdXk3G?Kk9TPw)ZSgh{Jvw57hWFNCQXf) zALyLAz@8_lkqMBDwclR1a*#NRp1E_!*f2n|!@7^&B%~A3-FbUuH09t8nL+cl!6}BP z4$8_ggQy!tljZArX=}FC4L8kCRvO0nv^}f5^vi$LiT{IMzeK(7Ey7^9{4gfsdcO zy5F4OY{q^2-ji^*&D8xd@{-Hll_%z^PGNsm^Ip4Uk@Q*}vMHTeB~?%f6V#FxRWY|9 z_L$Y+_XT{-oqZ4xs1v|X+7tNwU}z;|E4g9s(9k~UUAZ5>{w8cuGK(+t^FAo~;TU!i zISgZ~Eoc2#5%9`w75)Q_eb7v#`2Tg8m-`@(VC*Mz2v6k3fAT7qdT8bV-3^#!LuRet z+ngP4xdiSKPn?#Z$+o3PPA#}&#Yr9FLMzHrAt6!ut;0uOGOmyI?<{pY4R(eTu*K9*vC!`3|{ zAl>Wj`51BEDHA`9{~?EEKBMAynu`_m~;1$uZ9U+}3*RT_Be7E_B zGZw@B=kwKt`;KmvC}#9(x1AfB7bwoUFJ8XvhG5=?w+8x&=vtt{?d22a~%?F?DI;)u$!CNmpHqChMl4`eS`;%CQEpAo z74sII$UdEKsTL*hCuH|2d*OB+IBGa8a@)?&qHG9dJq1XUj`haGY^!Lh>qHF(?TyAh zUrIO*G%}H=(cP@9hf)3`Dx**-pnzT~JGQ>zZf|!9LE~7{@gf+9;q?}Xc{MRE{N)k{ z9oeOL%RSPEff`mL9ND=X7ValQdmf!}%^dEs=$h9UezKUA_u~b5jC`3)lFnHmX`h=T zxT7XDb5-Tr9Z;P;VNtnrLjC2LocrG!e8jJPdm;Jw#OohxO4rUz(NHO#jzo!txD1~` zg?9{vJexjH__v?T7873_I-d}4HGC|#rtqel3+mUnDL?lptGfot{!S6^q&^(^CWXyw zHR~@8D0mz8rKFh~5Ys=cyXt2apmzW<&u5G&eJSa{In>8DKRpvT#XRd#mKUiI&G7fQ zwTWp@owHXu@EBzf030u3T7vf=xn z5LANn7*8`~R=$f&T9PwqK{E0Y=6}hJ-SeL!ff=m)W_jfQks*a&O zJ1d!H;AL5b0#6j)Zk>gE*_g%BH;zp$WrWCll{<;1cPk(KWOi|WhY?sl{G31fjVgWj z4xzNJ=3yl+y~KuzKi}xDh;GpdN_DUG{g{Uuux_4vvvQMUbPveAcfyvwcCtAIL`$@s%4w3#t0r)0An*y|*g#uidONc4W_j(rxn{ApB(*3R+xt>*D9QEfX-30kUSIH_MT zdm-_DvGZJUVey6$J-@JaJ~ythtjO(QZj&@2T0N@KDJHakk{DP`@m)jRDq4)T)wAF1 z%I@0y#r1ZEk0&$W^4aqh!hwBVz00Q#{`EfD<lah0PAPNE>2tKt=~fIW-2x(uw{lP}L+)9M-@*!-0~O*iQ!f7J|Dz26oa zUn}OBvO`lQt6NLhHFwFJ(fAR6wMVa|wPS6vVH6+P9_@b~s+AVfm4}FYxVGk}dak9$ z&NL)uY7Kwc{S)5fKO_45TMPqV}1ZQS;;lbZ~B{U5TVuTmY$zrK@W+j^}&?jN^0^gGW4#jZ; zf@6G@IR$C@c3$>%Kx`!%<`kcGEO?xzN6osH)_%__rE0N2u&WHKjNp}z(&1^st~VF2 zXPh@zl$rJHcX_d>>uwS?t6(ztlHw#%G#{<1(PQy-fc(;x8^L|`FvG|qDCFCfC!TLR z)jyQA{!OU4gqn!#NVt^HXEL4<0-Jf>CPf0Y{a<1tIOp3$w`#uZb{f=kciK|*rX|o2 zE(Z9u-ViCwbB#WVCkKuzT6c;HIo96ygFm7d6wa+D1va%vGCq4CCZca}2Fo{7k4ugz zDl?4U8q5lbJZ-TYy@#TzYDqcd+XrcdgzvAYXH|BY)7Up?Qmzt2!`h+I; zVR9BHjk(dWJI#ogW$8X)%g&EGvcuLcvBNqmXS0MbXNr(O63gKcLx+^8hm}JYE^r@? zC8*g(#xk2J$6YywN#ip@^npW+<$FHFe2V@$?&arTL@8B;KKkMnJpHvz#nbbaFn{Nh z&#mNBOtMKw5;LT8M+m}up;UJTLnRxdE)|kSyt1{sjU!pv)Bcky_);ntt~6=oI~Gyf zRBLxG1}e3eNBR^M3{G*w9;1Ap&V0QOdT#|i$GeO@eTA?z$h(|2(~!Ckx)b)eAvRJy zWdwVtlfCT)TwwL>xh>G2AgiB5mIIHj(~4{jcakP-O0LD4mB>8~JHD4?mGx#mp>IWR zb6Hc@A!#gAt@tJ>)L+9=)AOu9Vk4*YJHJwXb8>Un(n1r}aPeh!YfV(%YKHisgg!f_ zogp);BgBw-9)awd-HpO{bK25*@v43*`mMm>?mT82Qr|~f_RQ3JQ6BV?C;v; zdfK($HRyepceQTO_3LS5HD-X^`p{gRs&rw*ppCI-UlH>KNwiv~J+@mwmrmuNgxYT`QUBsrf|TY6?BE#%G*#fY>qQyaBH50~(r52O#pJ^}jY z)*HQl6fm16?`(bgb~>0F1*9-*KCgeX7&dd{O z9_J)9GpB{TI$xlJltONM*@<5t{Jvsm>sffb`RSeWh0BJ15iyy%>EDiMUJBN-IlSTNfoc6hr>z7zPWgF?2sjJ%|7>XaVWagZL)9`K9_mB)feY!5JyCm`#4lD_(h~%9a#4^Dg>(#hf>``<)tGt;F zbKzuV{0;c&jCbszF#2)NzD$3<%nEU~-v=empdEe@yxOl4n?9#s^eDaet$O)4_UmFT zH;unZqz1KLtcCHIJ!8!v%)%R%cdm}Ky@FodU% zVpa|)qD(qRrj^a?Sl~3jYd{7GJ++LK^jPA?nDkFQXNk&n_-Ynh9XaiN&3n1G{$j~J z{R>fAzmdtkNcY5Ga$`kAs)xrF@p~7`8kIsdEv`Szj(AyPX!5#P)lHHhJ;6oUnZBg% z);&iXElvbmWe2qIWcP}m0s5Q(t|LeM_XIglKv$vdws?xK#NM+O8^V&w{bT|QsgMjD zsQ_Sn*-Y{say)BVRy3%OrL$g8y%G_0kJ8#don}@9_>ED|wa$0ULSN&$+bOY*RUwK` zufpxRqy5duz4 zhO%S}M;3>Ax%8^FpPQuGQ9gDKqSKi%)S=swd)?ROc|2OxqgzKFA9yjujT2dQilcYN zDc8H=dIW~-(_1{W@CMOn*`=fVb!@DMw;?gNb>S%@`eo2X9)?OThyO8j>%R}oh#azw;()% z*E0X*Y`%qkES&u#L_2c3?EGZy@7nq0LV{j)^ylwH^_E7bu+gr5-um6KzPmCD1Mb2W zk@0;`G-1X@UwVEtzrpdekq5D#UYGBKrX}}59LB)zr33q*U+VY^{M*RSUyyqT9Cz_Q z{vTqn+MmJSk2|bm=fVG93E>Z8-&;k2Q~45mg;1@aeNZix^eEw!19Kn|rjT$Jy)qm6 z8qEDjWv~1Kgv^+3NHI`&@fOJ*tzbTs)4wW8L?PxY&-p5|b@TkHf&BGp#P|*N04_S? zrGK0sS;kDq*!OzsLnpYsJ)?Ose=YPoLYq2}(!&s0vkP9OvKLscZS=qv^WCdJ?E5JdPC+tpcvUYfHQ|((4^%uYL8kH3_kLSDPG~Dr zTRS=HeS$HyaD9V5WiYsE2n1grk4npuLR+qW7mXRV7(83T`6v_I!5BdOOA^<6VQidTPDA)&O4(|Q=yC8@lu#x+qpa1_L zH#BUf!1PfJk1h{X^e6W2^(EF@m%?`D^k}!91hnP%l>VS-{`M-@f5288F1e&C{dcQU zGg3{kf1gk3H7+s(hYJ}&!adkz`*oOFW?@t2r~jE42#o}M*aJ)fu&Xzz(p9PsV0VpI z67<;(>DWL~Nc+@zz=vrk=QNBlegQPIYBF_Nu8>fQ+~+q8C}rsGj-9X^p|t}jl8+Z7 z7%78g8l~=-J~g1+urEHBJ0~I*UJ|X~oEoI%s{SFju&KC!GZYhTydL9Dag)q5a_zDz zUSj&2L@(RGBS;oSk8+(I^NdVev{7$Aabs#tCY1y&h7fa<{Q|(w;MzrnF+u`8PXZ3_ zRtOk<_U)&%6YPexvQnfhI=KB|pXKl9SUO)le}3NKRoJk4sfc)Km}A1z90a%@;iSs_vqeTn{J*fKG& zRGM?RK~6Hb^z<;azbt6T;nMV%uX@!UE*jNdj%AHEnM%$N=MrfeR()6ds@$?Fgl}@3 z7UuuaK+B3^A2bR!_wdRt+ko6FJKq?f-D}_$cpvvp{P!If#3@DOdsyPVJIeBG-&*!G z{qARsAv>hlR~d+789-Rz1A*Zai-YAkGp*4*_XFC;4o+VgmCvWWF3t{g39LlqsrCuk z8<1rBgfZ6(N7z-)^d*hXe&-Mf3tgph|!NLr0w%T0gOcPj0$(ck!=$WzZ;oVT5-&=q!6}7c-8C zA{ELgYK;+X5MtR>Bb42S9jXlM0g2xFg@Y#bW9-j-b9J4&HwxBA^Yn>v!EWYSu=`Pd zXZP^X1N77B*Leb0OCyl<_>D(}x55RUz7@VSeT-@uJwlWD{37tVK!;^*70UL5wT=Y& zX7})L;$QNAbETv9`gJs$Y`5bC;Db7$9+G&7>0};3wS#p@m!wez3Aj1Dr8-d zEshu0&}^3&2BMYjarFvIr@eI4-AAn!(1+6HY8$iWy#tQdB;XDSJ(yZJFwKc{)@}g zTxij3NgLf3o3zj_6=XszeM02@)W(thlYm?{!Q<-4W~!(fbitJ&NMXK=jYSlhylccH z2I;!IC@a{7)N>OCve#5?depwUYvpD~YQ^Es32Jn=JV=N$*r7=J`Qu`8ug(1N6DuZD zLxU0dXDpoHm$F~Z3fshiJO07IxH9ZmoSHm*l>jO>+S}g1I_$2r zKLOO;fYIj|S&sO-Mog?f+u4m>Uo%s4Ac&E-HqO&hQd(I%-hn8|ED%slCs6JQsvHO0 zosCHHjzRuy;z0|MY%7TK$-V-0G=3 z?_QB|Xf}CGB{^SBo@m4JGa9LsCvGL@J}a)Uho3C+&74x%E6|}1q%#Jx_(~YbVg4x; z@(>?{F>M$OYrDOnv^1@to1loXA8qkrPK)tQa~2+$S+j>G5aTVHESSV%p*@a0FoVUJ znUjOYn$i`M%4?aX!L1H-?&h??X_M-x#fqSeW1gQ1D_56|t@KF3&MgGZbL7EvFZ-pg zbv3i6@^Zx2M*Oy(z3y+CzGR1yQCv%BjQpR06o?QgT96g6D*bN_K zJ7cURL3FXHjKT0o_yAP{?h8DWGpoIkW|Ex691v%qE3W~jEaW3ByTTvHQ~zBLe5{`Y zly}>VnQ04v>RnrQ>&KFs%jXp>`-xw(@JCtGZ|u4BUa(uv%Mrg1WOzX>w>Z@x+ukhI zApZE5+u_AGwo)vufX}Wwd-^!1;;+hQ<+H+8?^jj$iaFw`xf?=pYN-s%g;gfUnYG=H zJb_l9*?t$MfKlBe?1TKpGZhpDb=9Sj*Dj0c~FR{!<547(HJ>cCCBeSR9HzoRNvyn*($aelo&iUAT zdgN*VNe@lk5yEXUNSXXg1z!>>4U?qkH`G{~Z_5^`cloL^Y2P2QT_6vfDD5 zV}g)s=$zI?Q)K(805CdDCv)4X#LVV7+HuyJJ@%w3Z4zd2%W7zC4gOHbD!s9h?1Wjq zP_p#?TNn6*fAogNUhaBoh&2)IZ6Lr3ySD0)AJ3$4wh_0uQ`uUNiS)*Rxg=uoOk z<%hQh8W4e!Uccu1UMv2i?UigAbOBw-C6_OhuInFXrD@g6)QZ=2nVZxp~2joefAg9)~ zN$Qv#P;H?drg9cX>b#!J9uGTWi$bX5~H8>gKDU>-Rv3|@>eNdvca7>?)#Cvz$R8H$eA z`+n9HaznBrwxOFhcMz{k4jgj|9h18#Hh-}JWEZj z1zNLaj?)Qx!c}JQ$3HV{zwCOop?dlTV*BHZvpVp&1R*x3nUj^o6WuinOt=Unaj@L` zEx-d-O$tA@_xdYK&EyI+u8RlZWW6Q4Pmp;%ejK zNCaHxEnVE$lQrTrPgqlGHZSV*)nSt!M2T8=CKz%IaV@8r_m#nQ%lva3H)=T$|H}|% zTkAf-aZEqH$lRps^eTD&>(^h4{&bJID)T8Pw8vnoN4u@@RLIN9p0fP4y6hjde`{v} z8o1Tk_;Y_?OgQctG_&5Sp3?)X3+!0dLU)#^-PxXVzknYjPhezNu;hBjp7Zuke3nZu zym}Lv>{WuhAM|SLUgs5FL^^m&(c>FWr9CD za~sEh%e#zo9$Oa#q&6Jm?)jtBR1fqDw_wl=b*Fh-JBfW~^F`PCO)YL4s-|+g(k>1X zxDV3s;;5_`GBN_@_lmJ5DSV;F{K0*Y%swaxpNX#>@6LLp4BktN&Jqhk%0W-?905Pl z3RD~T!MnaqiZedNIuv8e5Puvv+UVQiT z@MK(Ik9Ti8EKH&od7+t4TJR^A`H~H8e^k^R=Z?8KLP}H{uO@CTS~l#CeoJiowa+7^ zEa$n7B;zi&$m$KBF{L;Bi}KHV$!);{VlQMosiLP6`e71L^?gTBRnLIvU2CAAGiTzt zztEbmJx!(2!uEf3Yk6NPmNC#c@{k0yA=F^UWN@(I`nq>z2k5vjeLwa}m5 z0FP#~q%p@@ORusHasL1uIDTBrAaAK`lVuyG!1HD!6I+BT%Ca^oV=1f>`@RpJHw@3l z$3J~ufUBs_f&r=ue2I{nLd)}Z%C&a|oBE_gU$Ck67qY$%y#9U~dzs@VK*G}Kic?Qm zF!2@H*HcR->yOBuD~bkWe_c#vtEuzw{M$FJ)EuF{xAmnSc%k58H#@!6VY{e%mu?N) zlo#GBFB$f|#@+{s@l4sU3^N~&oFk|0L0dqUtS|b~>=b_1;UH4bM3`8$vNnVPk^^AN z?j7pu#7l%y z(N5E)DW-}FhYx&-H@EK@tF$kXfmiDml9QsA$sZj_PGYUMmj)e!i$uPMUTliqt9?Dn zz<>a8QeU~l4YmXGbncFN))p*vM{f)-%2Nu_eZsLt_!JIwP%d7Lx)m=DUhdfpO1MD1 zfKJ^@@S<2bb~+I(5hG-6yHPiisFwM%j$YA<{sy!Pm(~fr#W6}I!u3@DDYcLB^!zu1 z;jr69+j74uV*lwXt~H(7`BzuLm* z3M2$!fgt`cw+3>rIN`(TS>+V*A=*LA@lQo`$_tDvU`G%PF1b^HZ|!zOj4?Z*bV`=f zokA#tNTLVm<-=Bqi>2C}!|qbu6d>)|u5L}GwOJ1~?(Q3-+b;*b>xp&|Jsj60Ui1C!~mXT*fyLlAg(Fs5c0bE; zjPUkPeeI&WJi17Rps^e(Z&cN^7-2Q4LQ);yZ2N1|ZdXlT7w7;(4W!h-f{$6$=m%wU z{~+;q4~3`?lIUSg8%ZG~h5}NXLq>>71kC7Y zUu8${gSyFPkz(Wuo>76_YukYnK$=g^`=$+^+BU;KKf|E9U@Rp3eg(Ckg;+Rtots%4 zIGpy3;H2aH@OpIP`qCHQiv{Nz8SrgHb8S%Gh0;!Uaw^4W_SE8t6UEKx9&0moHYme4iO{W&Zmn^>*yD!t-qi;orgy!)Hdvu`TpUp8sgc)s+}3VMN| zAddSDsl3{I43!Fo9pV{>l=J(LVi-$i+`3NN?Y)D2_JA`pB#eFu6Wcnfw=xQ7mN72- zW1ows%W)Z_CA?ChhPte7K_sp+4686ROl|TKwi(QL^?eRJW2qG$&h00yX4hs>V;!=; zu5Z)I%!M;NNZl=J# zSft@c7U4hVlDkWj$Xy|16DRM={20tfVNFv~ z{G}JfQ$v(%*ECuN6of|;wMV!f^(!t!`QE}Bp^sme)a8zihF-VV@zlFlCy~%Tb&%80 z9+q66-}VOoh<0X?e`-XqOAn@w*3|;G&(IB)Yb3{-yM>2Hcl6X=jLNIpUvwnLSA-9~9PoPi#ldImXDlNcHC{Dk zLR7FbvvXVNEv|5+mY|xuoC(nZ29u^W$%`aE$m(%HM+ZhLF%9xzx57?f?Ag_L76jKO z20gX(il;ot3ucBuIyZg$Sonj0Y5Q18SQU`a*ZF22gtDQakyqcm!F4$4z8H^@t2zC} zg+>%^&)%sEvV==7)!ZVQwEg3@hC{e-C7OgHLnjwH$0xj_|? z6PrE>i_MP+p-grdF=TAymk{VTM_+&1#5ma`>^K5@bz}pO4nkv=jv8Rd}7SrrWw|!NrUW&*N1R zU#t$sx^s}Lk6s8uK|v~C6r;OW0@VbpJFq7)=DX+6VIDvxw$-XRiL3UwC2Cqj5i&+@v;z7_uOB~c6 z9vCUW+W%Q>^9f;2%+b^*!AdR=8rH z`^`rrLnhd%j3Q)%DAp_AF0UIBuaaxpA*8T44uwGRmHCRw-mMB~5C;f`Z;n;nb4l1) z?6Uj^Z%a0ne-4~sKh&seOWvLq())`A1ke06J4}-ZN?jpiv|s-^Xu-f3j<{KSZ5*0- z%)1a%&6>6S<3i7>#?o#f!a1s7ok>)0)i2MT_P{`1-4B!Az}TxyRlSzn+p-In0D{}% z5q(7Wjp}T1AYkD41aC)d`yasNIWOM%w}8T7tblG+n4`1dYo4Keu@3y_WTEI_RVC+;sLz72TK8T&&DyDa*D zE`R(pRRmbss$}tDV5~Kr5&j8dJyx0=*#CO4nXd$%7d@PAQ&N85sqy7kr!GoeI zKmYc245Ne!SAO7E-f1>=FUtAsVft`ZxoQss*-KKfd(Qp?H2XnO*DIW_)$Jo!$> zT_E)v#Y#qdHo)HF<#_JTTk2XL>k6GHRu^;9I&FNc2;OUq)t%5Lv7i3i@bo6^;e{W* zM;ei}7zlNs1DQS)hZHLR#*>|j?Sd)WasNcBhh>Ht?$xsDV4E8pC#=H6Oq_>;BLJIY z?9CyW7;q<^Z@*362RRsjOm`48C)4YK3UWhaRF6L7CMYF?gTyIE>`EIE#ms-r9SU_} z6$MQ}@IEND4GfjWIKJlim#58?=$ILYafaWG%NjN~Vce;J^{f$JJ7M7U%q=t3RUGY+`MeozPxTi0lf%c8uj?MR*P~ zU}@g{Ebi}3BfMBxr$9|5Al9{!6O+Rca%0o7n4;ocChByGZ#-5BHGXgUAtt=?hYnNf zK9T+st=%~U+|;H7L&#J&CwTF5t*I}KyVgduXhRtpRiTY?rXhRMS8}d-G42IA&|+(8 zG+AyrRWI)dJltgteksq!IWKT3H#cY~V~ccs*H-|S+*_koy$(Y8#hzCTd(>i0_E69b;jLY9AEs0@ zjC5`{Bo~ZM%4mFxkM-3d^+l)cjQE|S)(%|^_F*)phkx?;%<>oBg;ktC$0}Tyi;L)6 zFltzz^ci^=W8bSo@ywVN|KiRn$TG8JIixD$)A`^)taMnwUVe)o^RM$!r{jItY9+G+ zUuHXylF<}vhtWG58{b>Q9N7h_f+G*^d*+2?_%7|N=bSZsd?P88LWvr8qK_049Y0FE zm3g%w*Y zl^9JpkAaTYIi&cWGl8Vx`LeRlWxCQ{0Fj*(BlhhYCZN}9GVY!?LQEmce{*>lM93it zyOQL@>|N!YBwbZe@f(&37+p>6)J*k57Lk7wT*;2tGw!{jkCPQk2kl)|EUNsiPL1q? zB3cD>J4Z!bjn#RPeZ30D}Nt2p|BQY@b#Iq;)7puV;}*Fl$UEkU>Z`bz(KoO6NB9c5+O>8=nBP3Pj^ z$Zu9BvvcF(&abywRkm#Y+dT>KpMcNVHq0=w$01Nz|n%9bywAxN!aXbX96Tqg4Jn~*)%V~D)Y<0O=!0z<{p-${X09^wD)#B*`gCbwO z6F!!n9T6!S5ayi%v}^35nOTQ<7luNg zzPgc!hIIt)c$`hp+?4Lu<_JPZ7Hz|8s*0K`U@LW}120c%Pzd@7QH_nn#${L?GLz>l z)GhuHc7zEG#O@mMpqy^{J}BK+Z-lS9m_IG%+(A1h7?`41GxPAtG&yvzJ3wLL6#%7Q zF9kv&oUknYN}Cq#3Ur$9keS@szzB~(hIJb0rf@{BcgvblrRf*-_@2$WDY9lq#T%K)hMC!SLi!gYn^304Yu6xZT{joup&mC zL#I!P1GdaoL+*p|7E4xGDhNu^Z@(QJUpNEHjh3 zsq@28Ymx7scui};m+_Y(!^1-za(*3ZL5xl=?}kKu`0=%*wc)%U{E{8 zqWtsd>25{!jCJT)0Nh!s-N=qEC(UG-(l^o0cB4Ggs`nBj=h8ad3|GSR#Pfw;ZjvKi zi;au0BH=cbdNq{w^_HZ5C%99@-Ee|PG1=5Z zYl6=qkMqw3Ca3^=teP~FwT86=yEjmZgTXuvOUWi_v@Oc^>MX2#cB^(2ync{L2 zUjgoykgZAIiB6MzBJ=V5$dR@Obr2CCVAu55de;WG)YQ48!uj#9nT7Rr6L#^Fc~_X( zkr9|J=d`)u?b`9Hr0VsZ7@I&yH~0ka{2sm~{`t!EoA4aw!dlww>0m452?1xQR_cc1 zEDMcQd3obz8{ShSr&ivF0|)eqin=-OYsXm-@jzcyz^fd-LrdX^^{zepwABN!_J}2V z_%@xH)wMpI(;IbCLCD@-Y;$2$Xhlz@g_6rCvQ)7L~+R<*a~3g#+%u4dT9M@ znz$+J?~8G5;%eWbcRyfM0atNG!i92&FMJJ19#;>SF)gZ-p!C&m`v=q&mf9e|p3=Bb z_1y|==RSy30$X8}$4Apn7*Ho)?SnRd#8+-_O}^S&jBexcqYJK#YdaVJmz!fy@OaxT zurS7Uj|hceTmd3;x7C?Gbq0uLJGV~>HxAN%0%qGAUTnV~Z<7hdc*+v}W(FhxY=07# zslon+`nHU{bFOvy%TrLJRD|tu#iyxMEwx##D8h03@YuAMbJ4=qKM29olq|SH65Pnl zTtv#TSo>yxBRo$lbyVhv{l_wOUQSa+lkbl5A+{+u0+`#?^We#Il>cPc3PP0})j>e` z-CKC)6WM;8bI59t3On?1T4qa-ahDiVI2J@^!uxYjsUoMesPYR6FCmaE6wrlcXJ#s& ze$SHGD|8*YOR%Uv?UYRtU<5vL%2CCzIL&h6x#9F-I1z8yD5@G_!twZ^dx7=a%7C~6 zYc@5>!C%v=f_t)TR^u{BVK_X35Zx1$C#MJ5xHrB^-ql7o&lF@}WU2w$M7!J&;LN=O z__N(az33gS^*EN#C6+@+n-ZGdt}J1@VjJQ%@eW~0dW=ygpcCc;_2x(Q2E>l1zu^GBq`z-ufo)SzofrPLb)M)z2DV5U3-rL)1)6j7h0liN|oXTwV8A$J>Fp z%Yy{_5_o>7ZdI`sJZK}|Ij?NEnR8lYCTvZUDHtvU*M_aGu<#FekR4eMX$OHYzn7Tb zKAFKdu=X99fezT?ov>6$s2Pm?twYE+`=j%dbPhJt~h-79<5 zX4n_#Rg6Er3lUxVoPGPa0P__v^7Rz(W1jIjr(gk1i*HYx2GN??rQ7dg9SFgO>hu^! z^7EPC47F{q)}~x-o12;%mQ~kA_G&KL=P?DenQ3p-SBMSWdcB+(pk8vyS61r%O;xub z4-qHgRPZd8cxL)k!T_T=STSSi_f}dfX%Ts1>uKHMIh|;Q+TpdCd>GW6dfu#9 zn<%KQI-RTP$9^smirEMKX;swl!YaJm+4XKjBwqDIQ@t$0T-~Wu*NlwxXmrcEG~qVY zCELhu;67!~4XpjitT})&W7m^|Iat>EBhEOpt;5GbF#Ot*)?d%AS>0gTxwpsAL^uVw z7E6-g(AO1(CG+*iT8^`_WCNQ99_Y2ndZAUEN2{nx>LaVGE@YO<*2{K5c&`d}emN(^ z)v)Z&4$pR#t1U__E0%6lYHD&70W_-s*Ffw9BV-_lVaRupI#CaUMUih z;prT$B3n|#IjzkN+#^R#Y=D7*>+DbhtJT(RkKA?}%s^fp;<R&+@D1(#0BXF3(o%T~Qw;ikC<8 z)aJ)(7?I!LF07L2)jAz*x>Ub>^6d;o4>t$>n|ZkkDxNWWb_AB;J^L z1l73xzd6i!=r8^;`g!FejhXjh%<2LN@4TQ%)yKD^WRb_jVYA>YHLs#%>nHBrhJ8QL zfXZW$Tx_Z#^uq5+7xDycszG_{PLq?MCWY}zHk|pTktOmaL6s&$dmq7w9hZGjEb)aP z9oYVRle#dwy}Gv>!E@v+WMBjCt9t?jwm8dwqZ=ZNF1XBbe?NTyqs?nOOsJ=PRZeA z$CoQ`$2<+^;s6_skKP{>hQedQZAkLdg-aCtr<u$}&Yjcf)mJ0@HIm~{2(!Rk+^Zpu&`$9nS--GjyBruumUya}6ifVZ;dkGY z?3#Ai0U*tqZC^4gei9wj-2v4Y#9ZMx(`mv(+6Xqw&6PgL`}O0)O07D zw*`N$$1=pbp^BfNih#+~0>Yrwv}tJF>+PcFjYKJ}c`aFczP2$uZ00)AzvDP*NJ| z=7?e%!M&uje|TOnXstGs6Z$4C616E9pQ($5f0!fX!z*%`sq`jzSzyK2wGA|_4}s9hBg5O);$aE&N^|Ky&wn>oncaF64M=LW*Qg3 ztT~Et{M5$OV(SeC3MiU@M4s1d*2xv9v;b9k-lD`Fpik*>Ldw&*;r#?`Xt3|;!RUgm zx6LLl%VSx!NuKcHn%tpyHAhHr3G#Y8e3&Gpz5DfbF=1BOWbY0Ctf#{+CJ<53?vRQUGt=JH0Sr44AOGYu z)5nkpTm02B{GRcS7qOmke&vgdLk)E{Qnp|xrM(1K;S!Vu$v9}JYr%mwsJ9PFaaR@^ zQ5}ErX|q%-c!fMfwl_leR1CWK`zu0+i=K?O6gMQsIiZNoPo^fN-z-CRb!{aq>NwOh z3OC9*Epbo78JbRyikTYDu0hVxs;6a&y=%9oCmiUC;%U6d7XxhEtmVMN1Ua5N zoAe(4cpoGm^1bFj7LG5+Daa^zhr9gbPBru=I!C+%s+{TYm@kut9LHRm@$ns}V?*<2 z-8{~J-?N|p3;mXC7q3+rtc@M>`k5mO@mB`QO!eK<-sT#s+f&^Knom>GnO|e(O29GIxg_eH4blG9Y;R$9vO=gTqQDB+zN(z zuzvm+D%x8_bdm|QwOfMMn6dD0f3)T}xJ`==Uq18@e!pM#x4rBO?cZh>a z7HeeTAJ8O6qHDy^1kcgxeNZ4o8oz40GI5&GmUyS`udsjyY%K#_|M4<68D6iZ7amP= zN~irFbiH{rTkHGxe};}))tZNv)2f;|ZKY;8)uQ5%iaCZHMM)GjRgiMpnn`_XJT>JQ zB7#suD1w>~ijs(VNKkVnEs;v({PyQrzx6z8J?s1Y>yN$c?0xV1zTVgC{l04Igl=MG z%)gdOuH0CNj3$vDWdjU)YL3*88jh|B1Pig#ShS2yNS_@} zn*8aAUho`_8Jj4$Ao$q&I+3w-vX5Om5G%DCa|IL05)^xpQ1Wst!wOgqlsIWi$E_A|y9*jRh2S!XI=Dbz?;lInpr>~Hm& z=C%Qaoa=&H_tc^+eZQ*VO7GzMBlFWG63QYkZFCouOAlPZdHTvWg;t^6x0*&sPNfTA zo$;^DX+YdXVV-15naK-A?5sNHR^!?Yyk}?5~Ica zg48hT;+#tjN^;btcR1TqZdyl3jE?sXq8O)mWk476tA#{uaooHApoYRlcji&r{3@Bw z-d?uIPBIUMvfX$%&ksJ%=(z5Tw(E{2s^R+PUky8 zA(5*UHc>Ke6wXRz)>N_YEkn~3>!@GCwSkZid4N`Cv|D|Mxjr7?L)BVoo;bKprC62B zZ_EPwad5E2qsT`O6T znF}dwf46$6_S=3!Mk!R$F6-;*9Gp7wlF&N z1Fj(f$Fv&u6Fp;tCkg;xib#hL-<+=b2NrJVOmSk1^+~;P9WkzMjpI;_%sp;6HhP}H zH@yaG(XQ9FypmMs2RG&kFzda__~YkUp32`cO`5otERp5r4*|Nc3Bd*aO;%9PKjFiF z)lw`X86%<-R5$ix{Q1W!2SL)@HyMbF+#VO_^lYGO765gal>McSJY(h{4GGPstgg-O znQbLUE1Gpykk(}LP_YDb~Z_;XJX2~UTml4JCBT4Q@$6~N5Z+ZNh?dOJ-WsB|)5 ze6@IxNa{Z|0*l`Auh@c{P5nU$Ik6YCghar~La`D7sB)kvB695Jx?qLl>mKw< znB@Be%x~v<@s5r>dE?vv{(va-`E)QuTL<4c2%1YB5Gb&vlF6D*jP!1OV4siXq$hHo z*?O|~Y@x4@@CQr1BVvs3=9ETg96Y_-5GvyTw7h}TRXNXnMrC5)Z&|w?#m{|=cMl4p ztk-|;8@-howiyjVOe0-*j~C3KZ((6A9yMRo@ZSUJm1EPdT_6wRybzam*Iylde(!ML zj5l>y(09T{=yf>1C$<_Nx;iik{7L~rYCTTd~2Ka zsV`JZ8mUGkf$vUm`Aw_i8nQ^giM951>`H%TDc2uLXW)Ks8hx!g*+3C@cl%v5xT2h4 z*5R1R_0DCBe@B>!@9Xxi4PU@Z$kr1to17{|;m_J(e}}mZ4uZyPtY_1R`c-gPekv>m zhmVD^Ap1T&8154&15ykk-RP4St}Ql(67)0kV3+s_CnP(G0W+8oHt3$}97Bb=Q4 z%SyAazie|<$5YL+b$+@)$IFzux<=A<$|$tZRD{4BlsJz@7VdF6xo3S#xQcG?*t*F^ z&bW*F)bCFKsMdmxu5aCY)gm?uPFW8MR`~DHy}j zv0^kJ#dcq<=sKMOX4+aaEAXfAs?qn?B!t41O`O{--MqMblI%03tMxsO(|$0+7V?_3SI72V!!f+E>oHsLPH+51qVQG zSy}p!i{sI6htl+KN>tdFs&Uak<1V-HP|3xb_L<_KblSG8yo8~52OYr~<2rb9pPl_CB;<3}rfC(&e%nS()X72iVFm{LwtMMAvZaR+FI%Esh91E&9hz4~8c%ws0_GwUrWMhH$IC_OcJ1&S(Kph_4 zX<2dif0%E^(oHl`;bz*he0yS(J1Z!8~Qp07r{2wk5g)0VLPWF3!EQrhVbpwxkb}6pUdx?o{Pp9Fe}B*k5@+e< zWi31~)l{KnysCnL@Up6{_3>w|9Se{ZgM;0|S}uEo>fmLCzSv6`yZ796sU(d?R}j~S z_Z#;c!~A8KfAsdVHr8JJ{@8hs&BNA~c3SwQV3M^KIjlKX@= zbLI!Nec@q~wbhSGs1ZT#?z=v#(ybT&en)u?onxpv;cnYd3(E7b8I8{;y}ul``I@nO znOQi=FoY~>!Y{d4H!en-@6Lt)Vttn^K~v-+R*oaOZ2f&^hyI!EXpnrJ7nc_^7hjk0 ztl4#reD}72gK;me_A(^yGG$&|%;Zx`x zx#XSnT*1W{I#^nuci#xX+Fm~F#))C{X&753^mLCeUhwHBXqtkEb|coxeLQT$YX$nJ zR*amhU(v;P?`iu<>1jsB#oVbrc|DeO{LU@p;$=3yygAQ0-B`mKzIb3@kGCFrSWn_j z>&)6zU(qS4Y2P3YJ%v6PS3 zB3@}}f}FsX9hzLigdbO6BG}DLh3;A@z8v=~qfn6{S-M;WO_IOPS{mp~iczH%Ee&{x z21z2t(1xZo;ChI&bN+U&ALyLCGy{4IBQC)y&k9*9DlRsuHk~|Ks1GZZu zjD|C`;(}%t28LFdzrI&ygl`gwm$hDD&WtObw2#J16 zEIVm(?6fRm?q`97&a$?9#1EM(h{hncLUMJZf-L$Om}~tDYuL@~?Q&VdfF(aQQ9!scG$0b+OI^nE%%9RUBHY{~{!aS*y= zJY7zFfo_aN;abmnq)9~X=kK|U2)f4(MSbco4ZF>w#d!BErl!27-6-H4wb)Jzr*$0{&TrP zq7aaaC-RTy_pA8IUpkqwq2DPr0dZ8Z!~lBw9&tO7-g=nR=-6DvPyJpUwT23f|9?i9 zoqof=)WlPM5$GuZr0TL2rglHlq>t6~ZFf-u0bY?xSLRtSjD8+_9mHR1=G!HLH#)_> zU_AX_tSRzax&}sh*fmlz9p5qT^l(C(Yn--WMtQB zr?^o@W(Q-k4%^G(<3K6Jmt9ZN*zLm#E<+-(I-fj9-z?#IT=-3hOfo zuo_yIZX3ju9%qN1YOiobm-Ya`M$d&C1Jt4CV5qWS@f8y@>lQn#Ju^R}KJU;+O4WrB z6#32EW4FGW`cVZIru4Gbz+;@ALV%ac+g7YdL%tHUW5m*~j!3p-zIq z-6zAf<2svs2D+`m{Y7V37B2+X;eLP+=|Qa6MlK6qfS%ikxdrr+bQ3kg_7Y8uV$YE~ zUPVNgZOA6Zm69hK{jrK&aGPR6YD;F(l{=eW&oc_H3MHFsc9;zjq=vHfzs}q#S@9;F zm>iIG9yC|Klnpq^VI;T~@JI$`hi4&$556_SbB$NC$}2r!Fyi%{dVvx^z&rH{+j0JW zImM}a$69&;_6Jg4#(MUWJuW2nL^+ZogQ`&qXS7YuO>=ASu^o=ErlVJMdqOskvhq&k zWS%>YfFwkM&>C8`joa}sXC%e+Xx{6*wXV>cHS2{54g>;q@SFKM=h#gCw9#_~qIcu# z%xKL|6x29zlv&_-^^u#Bwck7e{cY(1WWE_5fhEQLdXeZSplw>;(_^Ad`-KxZ+9uq z+S6ZOuQHYbcnia*WEa~ieVfdX6px+91nG*x$CS_j z)f_w^XAQhxSD^8#7qEm<*Y6RMx?omi*f-6-Q7thZ{M#SLy)s1mZasQBU6Hp{SGnY? z;9EisTBvP*Bjl2%=2`h162N%(t*0;c7Ot66)C$^NOofI=_$IpZFCXXoatEaPY_xIv81sX-N7 zG*`hYwTB)$#&p;1iHga0CdYF+rnVopl6EsWPea>Up8|@`kkGB*my}2;?cS0%6ZpS0 zz>|vzhkj>7fA*#7IgLMAd_umsnNA!6p()h3rIhxka>?->8wl(db&|GI~HPJe{N}=xF zjeKzqb-^R-VQoU`X_4ygU|44SWPz8een?Y?`kmulEfT@0FSUE6&cPw5f=1A_DL}#9 zhC&HvsOy$63#Vb@VQqzqOp}ZW|5k7k#u&6)Ln`;DH}!9Z3*R~I{o|619T8HX2;Cl}rn!ke zO!F>%oL#ADUm8u+41ArpUqvUX{rki8SBEti$ZgFrT(w2j90n`Ze)h1iz(;Zyc{{c{ zgs%%t?i3a%^4*Uc13GRFvpo4;y4^ghrJuOK`pqWoVKLwchOm_*yQ7ogF1d)1K;7iR zkG5^B0z`YdJmlIs-iEhZ0SRx|gwz{ZE3m{yg2DSqr>t~(sP#hl+dQ&=|5fO9RI?ro z*}}MT*-W9@d^$bgK#>WDx#Pl<-!==Cs&>%(p)rCCrW0o;yNpKS#8#lM99C;^ZEL8<2ghFAI^HRGz-7|1p#Ji6*lbR zgr0ooA$s3aRD;OP%!Vq~03_(JF*dmj5OJN3jSyeA1w|qGj6#C3zr`_5v@@huH%dY$ z7p0Bb!^=PW{|`Ywe)HXpC__4d8^F6#BsXLp=4dpVr_Yb5qk@ptBs`Fjs)=AMl5SM_ zdd9FplZ7>e8@c5s#X&(D!5$rjkET==;jVw}mn}8E+*lVjzFzI?pzUir=U{`!xj))F zoOwRr#ZtIG9tKFKhzsG-!irY)lVkv4#|e8+g)26RUSQ}Zd=-( zkCJ`84>LZ}n<|@F(u{{MA1~=-@58cl1EMbExxIPj^c1@k8y*-fVd)7F;gJeGhCvP7 z+?CAO$9w~-V3HjohtvIVh(k>s{LcR`)y2u}%bJhrV@ptQb#pHKwisC#zW)0;!JEu% za7t6w)Lu*v1@~oh+lg#5EO^elUD8JRwUz#?XYR1_Q~KWa=Dl`#(!W29t-FL7>)K6* zwTpJI+tb6W!H*JJ0WXDA|<=s{R_XXT9k$GO9Gy(++cJir5gS^evxX4!0(+cjE7&gh-Pnz}Hy1d=B-eHl{5nl`WrJI*Hxy$Ac1 z>h)WjKP*6vYF)66>W`r4`&coi;(<7s7QSH>x092}g_5`_b4=Y`U$#PTvx&kuc|q(! zJFOF$YL#GRE%kdF&c5YcJrHUhsqfuEBg<8#ipLr6PBQW$CG>66)pKrSW)PE!$`aS} zd!N~GY=f$z!k>hfCg&9(mv7#DRr+V?Nqtk8)v3w(yfy0)rFzJ*=qKnEwBjs>+4h?DVli|C^n7*{tVkZ$&{+7O8`*y;lcK58xY<6?!k_E*$EdNr zI~1X!9$PTX1HqKF&H6)@&q2MS9u05VZST*>%2GPpSD7MzY~hr5;VS^TYXv?&#($ZY z72kYxQ;1|_BX)VSm9{lOlrp0qFPvd~S5r5|sS|8)%GW#wawqvy0*||N(QuhMxS=HlurxtI}1EhLp z>E!_!{WH`WG1yecSTHgi)eeG7#59;Yl?pky!csBySq0146Q4u;vpXZKeWs-1NgA4N zh98*3IWfCS3AS0v;+1w-kWDon2|9VE1Ks~^5-tl`1&lSk*gxfkS*J2t5uC(hT)2X$ z1@DGnEVjX^*J>;&$~Uzp%+NT3^PDx7WCR8lLbBju-8fS!YPad{qF<*CP?qMkhtS4^*R6@jDGfFtI4bAys}ZH@MJ z6v*vrO-|S<`3q%Cf=gv>%`t;p#&-c%eNcQvhb%R0??vw7&xySPbuQyn;c_8jxi{uq z2ZcpW?*X6VW)JCXCpBL5xlJ^{y znPTszkB5H>YmrMK$fyO_*apzD+?Zuk8X4Hur;BuK*;l@4evXpEDj`0Ec-f*664b8tJhQ1Jr(U`& zF16A4)Nr4yLXzg5&U7QbZi0N}r0d;IYE4Zb$WWabe$M??^smi@_QB%Fp=MB&jf9#( zT|@4zkTm{x{(G>xfi0gAA#IJQf@`5GE>qUN33DKiw%`2TMTZ>fXZI+eL zUt4ajcb*q0qthACWQu#)23QvC)9%U$N;NUWJm41Vw6isS$bD1 zi=fK5$*zT-Gf!{ZxuR^KQ)+W-^4r(JK3)1$-DlAad3iRpuQQ>;0Enk}#ku!>%0vc; zAX>tn-c?*iB*2$m0#OQAHss};`FlU16V`3shG{B5+3k(~K)aIa;(yc}PqAQ>Mg|c0 z#-DDqQo#&`vAnEGYQZ)`+osHSmtpg;*XAZqv?&?N$cF`I1!jZ7hdY(fp4%{d+(&zV z+d?pLk|gw(8mw^F@hop5Y^l>s&-`g&e${pS*gXCb$U8NCf zNto{P`&!cck*H|u?$Gf^+0P?UZyZzHGY}zl-@fJQtZBC3RiE!RjK!GDJqL1G`q~gX zONsCS9BaD|pyzz}9uxu4A9&t&uU+9W_f2$5%SofTW+O|ICW=TNv=~Vy5T#Go5&XFQxG?DAaXnVTH~5$UzZxyipzVK z4?yZ~8O|XM94HwNGp`qW89l3+3DGxc;gjVvh zF}jcKyWBoC6LSq=;aM_6s_hFpV>7$><~mM8VDw4iLLH0V``BtvVFn#Y*Ipcoi_Q*D z3ybTFw1T`IjN!=E?hQowSJQ02q9c%Fi&Y}?j~q6t1{dSahP7yT7X7)ZegobA-hZP+ zO)$r#*wslevX-Yi29O>{HAa?BP;M5Gn4oa1Dl;jNfF2!!kCh5*yR^S6g3kbI-(OsS zUbl6>ktI-#O%^-}6P$^C3C=J!D0>H7&nTZks&eIKu7nnfeTe$4joTnGv4A!>G92;m zRf`elYrf(IF`pp6Aq7SYKl`zqhk0Gg!U`X9?_F{?KV>gao4xB%!-@zwUN8pyv{td# zUbOq7J=?eH$>-oDjh?)ty`1zY_&0NMUi_ zmiFkpoY=OXc$_orNZ<2vy6NsQ7L)tX<>PP@V6iLO1TkdzIdh~KrzDzJAP zwX(E4jA(VwnO*3t9s2cY4+A;5W}_z~HVAlK^?3A$$7Gu3qqb&^AAh--;C=n4-}Gfb2ev5Nw44dcTJxIpG2hcR zp+I0F1y4p31*16gZjc-Zo|5+Xum z`3~&OMsu2)j*B%-r0=qHACf-_uJg~ZC-?ze7v+BKAJKcNeG zklR@jyX#4T1Quv2fnpXBfn?`M?-sLsa&RQR$&~gNaxWEsaQ^oeGS;9VV=c?AYzMyK0EXh>oQ_fzu^O^#T|Yrmz=2B zSVIc^d~2+W2%Z~YbJ?CQ>9=oah(}_U~J83&8R813Ig3 zWmo~=9e#eU(A%1Jbh%Z0HoLEsTR*8|g#3dzDr|F`0DIUwdMkVbA0ru1C<17XGme-mszWl;oEqVMqC>tatl=|GnbjfKMd2+sFOANpQSW z`S+#)%B?Q0#TVp2)>+cbOrA3D&}z4INPG9G=bn_S&;~8u%%vae!dNhKcX+y9=o~sx z#@d7cAj_T-Xc^IP*taCJB{+isgqg5iIcv5;yonsfl6@y$x7YYw@1HZ&^)T~0Q{%1tk<F?B?oD-=IuX>LZl6SSrF3w%@Ro6^!J8Pd&U8g{q%g_V+t6gZX zUM)bG!XggclunVO=0*v74)H3K+ua_in8k_6^hrj_>q~7FAOnz>G=8u0U1d?~!X|ks zy;BtMZWM|=pmeaHN!0;@tC*`?_Z4Hg?j_-v>z1DQZNx=oR$yu^N;g(YJA9I5@}dUW zx}M6Q?T;loa^!DLda_n!F|zaq;lm`a&hcw{klT1wC7AF;uMq9QxyU)V_`B&M=dp_$-DUXHY^bMAaG{T$C(Ym5 z&ekq3+f415hTsmy6KBbeOEZ#teGkuI$)!*c-;XA$taDxBB|cZ2Xw8U60xX-o|8j zA%Kjk&uY20usN3VHF&`5vbsai`I2u!Z)4=;he#_**rP^uI@fA_0#oumy zEzy$Y2$f2o{h5>Uj@?nqaF2jbVANTXy{Hb+iQ4^hhHVxN+{}vZqr}96E784&vUe)3 zcfR!|yGJ}sh`#DYyiXr~&QX8DHW}OcOBZrG&c{ytJId0U5T$NeaTj8s87XGNrcXsJ zw>NVVykx)OVNdaYrW>n3@=()nwNN2Mhi<1#>a3;S*Fo3(>?o-oYbcDuCGgCrJy23p zjt1b@6yczb8ySE)CVP1W(JWmED_$-VV-!^N8eOhEc+!kRLq)ac3qz`ZJ`ecH&!Rea{*gp ze_>VqpH@_X3w_;BRT#p(V!w+`xgL<~5R@3HXM7+iwbz!6v@%k#^JS2|5 z>t$~0!cx+Eq=xpiwJ(j?lq-w5GuO=}@G4$U_2`Uv{AuZ-LO1cs{(R1@wCr$My<9sN z8>8{HL?EeD=v6n4HG!L6ngdJ3*8Tef!x4b2!jCh7ll|061Sg1p9l)WAfEc+>PeA02 z=OF5jbC2bCazlb%9{5Xa1En*k{%8aWe6c@$<_L!Kx8A$*z&_E4>BYmT{FTmG0~3N3 z51NUS8azM*4Il6$w8=+(EiEy1{;oDwA+9!d-4u1UGE?#e0Ta$r=eButvUU6SQV}v5 zF(})7gFDIvMz`uYlx{$~p6Vl@L;JdTw-QpL zD}{Z1$}X#X6Uwu@lskzvdAtifYwgKaJ6nNQEi1`hn!(i{i~s>!BJmAdQRP)6go5z( z2-%tzqKCh1AJ27h(UsI+T8R>KLm(C?!ODf<;YJM~KTuwOw5-*=oY6P6miDOSOR024 zpS!Y;gm-R+qj5E9aEU)Td<;^j*l2Hr7Kn!;W)Lj#d~RZ28 z4g_mT0SHW0>>peNR6u9nXgJ1I!WUv6`xzh#gN}=<7QZ8WsXG*I3^n|^ZDexi(=@dX zEJ~qU~U5l-oUPAZ6sJy zaCY%HmnqV{^wxb|Pk&8GcmyG}#BK&#jfPqxzaV=}zdQd=Pui+BIj?+05xq%{7q+3V4XN&rOVa=BacW%u@@NHPqU4dSg*5zB(c)c;SH*yEGi9HT z_tM4<-yOU3VOkU*;Aud0|9p(brkW&QPXtkvihY7fI-wgKR6q*#KW5h7p z03wqBKd##|8DQ;o-RbnN<0^7e)oTHM|M%2gNgx)p+otGwHP2`+vPt``nBuPcw2*!K zX3SRr1z*Db&2XJh{8ELv$&5ipp?kj<&c6Swc zw=?-yps&0%SFjPh+fL)LO%zY2yDLu) zQ7;&sYQ*hcL2XR>nv6t=ViFf-xLGbWP zH6?h}1rEs75dDOb*s-}z_B#_!pzL2`;oq{ zVzl_?<0kTGukP8{R8KOS%B8M!yoqG&NuqU`vAc3?DYL{6MFubD%vpZ~o!*K%v8Y_f zs(Hh-K5fko>WdXclN*PTS`@=_i%8~`dkFUsYDu|#T9r*1aWS{Pdl0a?OB>wcH0Ea` z7FE9*WW+5P8L56Vw6Qk*bq&)PjLt(x*_XPg36>O2+mJ6idL_Rsc2}`I{zz=C!wY4t zxJtpA)!Oxl_SsOsw%QYzP;N83!HVU=MaRfqyGLN+2`x3cfi z7UOTCjreM*rKwF>3y}uP%66`}8**bsJ8D*Q!>bOx@$jf~lJzYIk=RcQ-=vYwLNBSk z!>WWhmaLqC#4SM5XF)Plh-zDht3XP!m;rs=Vf0BOAgqFNkFD;&{jqFhMZL;B$Q1~0 zn*oiT+*0tt*|JY&MQL-Y1uC?^W6$%AC$L{$=_Zqp??F<&?3t&0zG-~yV&6THAF^6{ z*)3aV<+7O#q;m7_w~)$U%mE&$au+h>_-uGP1E1b&my+JtJEr}rnzkNSx~noY>Yz9{ zx-}TBVH8Ph%~m=wFXlAIj@Z4oQhRpWX;C`Ar%1Yu&OggW^o3#^JSA&SmZ$S{HzMRk zXiWC!93pWc(nP7AmEISdIE&@C7E=8pnq^P-p8chf3ZtuAt09+>_9=MkjEC~%J0VBEl#)WN}qk1uW3ShWUX0J%hvBnIWYJ09u6a0Z#yT!Z6>`eq!P0V zmaZ2X-Lu8E{LzZrJL2J5!AAvUndn6dPE% z@!{Jfsjfw(@y6qGzp+8g1$3$%`!LDqUa=ns?or2x6>;(Vx-}IMRej=VmVI6xQR{){ zq@}^WpItoaUAWp^NSTuAsL&^@ifKql+UK>QEDNI^F%_a+wsbV)uR+_F-OS){lqc&z z5t3~o1oD)&tK5S*r(~;k|5~Vqy8Ja1E|B2vvULXp*I3y8bP>Q`yF3&7xHc}f(IQnN zNasp)pNU#~?m(``%>cVDlPeufY;eN0eZW>`<748Z6t=te>Ezc6K=1R4X&&^?yQ$Uz!SPCD#g2O#eqA0EC+u#?~UE?eJ&& z!?NCil%zw4swdGEw8qK`+rtc$WL{cKJo_1e=*o}fs^eX^q&^{|a4%x+xSo!@4^Z=EdeArwfm9xLtX22HcrgRxDF8mxP+_O)ceqeE@KX5@^Y#K3U_wrqqg!RwMXfnohK} zf^$8Erjt~wJu)(y-6UzxFptPmp=IAM^=OiLq%w3fz1<2P?smPPR5ipdGYbwwx>)63 z5t_Uy{@u$?%v{{HNJ_#n=~J?2jX%0P^PAoM*qHp59Xj1{ z@{2a*o)p$Z0Rv`5`!|0bt7^hB{r`GQE1DQp*S~Sex!|0~s0h$CO|G(eNH_zri8qo0 zsZ^M&-ldY@1T{CxCy(7ZB#o#ciIj;(Rh_gr{i4)SLfwIQM#KHd;Jr)Au+Wf-Do@t_ z^Rj%Ll4Ai$LTX0vPgEzCJYun8Ubj-{>d^)aH)lf6pgF^L>9w_5*I4G+>~me57_A8- ziSp@rsAfmDNP>>Loz=LuXWi>r!(AwpG8yP8H8rIX`cV;bv&TH^kF2Y(%mPslx;@Gu zq)OR)bzMpI#-1NpqQw0B{q0fbnW;hxHL~9nR)3IeDxGqZBy=e?;4Un#_^irYo^7aq z-tRmZg*t0ea}10cLe|cm>jhtlQAQ&ixKfo{u`FVY0$QpII5ozcPRGXl1k}zbg_C4s z_21gZ!ASpM%~0#sPVSpZ{zKY6f%+tTSM7kJ%gURwrgccQXUXg2_U9EfbFz>V&Kin# z9tX04cB>&6!H7o@P6*418fMN8#vqcbTt4fqZZ;1lRTp1C_vsnblii&ttCN{<(^0V0 z?bzo?0J{JI7#zo)drb64oaHMs(l4Pyg0M?Kun=pQxs=ujM2sy=B^jMQ#xss>#`-;O zjpxikr1oxX4Z{@PD3JkHe_9 z34{PY8DQq)Dmn}1KjC;#kK=NX_AXJIKX?ZSqx<&8}unyB;fEvlVl zo13|IyR|7X=M&wzv24dF(+Lf!K98HM3*=(s19EmFFe?<)ky_RDw=)89%_1~X0w}~n zFW|0xoUriW@JfFo;C{ty6K$~0>dk85;!Xu6^rD*nY0K^$brDBL2#5#?5$rt952{*o z^8Hq{G2Wn9rC2H+_SE4`)_;bEep4H|d*b4mUtZou+=xj*?zBQXR)P?lH$^ z)|We9hXeB?+0Z|Auuisn6d*m{>}CIy8GW^px<}F&NDgx~*0!!6=ReHZ zW@>5;KDj}HJxmlI3l{HeU;r7ob9r|dt^|1Q724B|7 zhuKKh8=yUvj}n|{o!m3Jdj0J6kd52hoGE(V)U=PwiIOWYVe{P}l24>&jSMsA0o>K! zDZw38JL?e=GiheuIrk9RF9=4ueCY_}yrU`ZL?X?iPwU&()AK9vGGE(wl~hADi?CH? z^xqXubx67*;epAgUl=NAECl^_34k^)9b{cXB7<(z4n0 z(KjW1Nqt7v5vrDxI2~g2S;K8Lqe}Ua!*n&s0!---R^_Fa#nz_M=_FI5R*OrcThV?s znd_Mu{*U)R^-TdTlG;}$DpTQL(d~|(C-x3VUNGvx@OFR8WWXfl9{xtYWIi+1-^!J% zQElYnb1J>u)sy*Slt>F;y0f+iR(zx}lT5wiQ&co&s^t+R0L`P|!Ca>!=u7R%Ov z_wLQZp$2cC36U%vh^Ds+)?@}3{SW`MW?Pw&k%sCsRdq;l&=hYW6;F+^<~~GtT%N*Q z4QE)(Zugknm^^qM>oFcklKeaXn*uGMprWWe%J}byz*2CMQUJ>EpWj1gwo)YyGUw*+ zo`17i1c4J0J)|OsqNNZ+hNI~vx2rSTXf@7)W4}7>94!cF9xrugcL=r_N z=sEL>?@sJ&YXQ8blVDwO#iW19x<0yMzuj@@tasBpa#Q1L^VE+Q@5)MEHFE`|9GH6s7meYUzgB6$$d>FDF6-37qEWEG-4PTMGJ%WODD?PjM2v#=z?X6~BiJx(}R zu@e0fA&%hdvR04!CU$BPw+;&!%UxexxU|Gfi~o0?}k>pJF+OOFfNo-H*T zw;d}7E-d;Q*7bRgX+eD>21mw*sd@BHQlmk8fiZ!+=0VMz`F61S`SaTT=PBAcNg#_B zq96W7`va&}uK=P2<=zTdg8OW5hesIaTd+kBU1*q~s_B1rMueA=gDCf^=eM3*81$jt zP)jm;m$U7FLejDcLSdh4%Zkm+bxL>@DrOerGSzpkKnk)|RAEFoNjJdLPE2)(vNAK7 zZP^m>+4tVx=vZ@BQ@SUBZiyi2M>eVW?tuMc1KO>**>3O1--Ay*{-eYD^?cnL_WrtB zMreLW7CGR{k*pm-=poi1_+gwwsYJc&jDjUF!Ray$$Pip-6gJ_`4TX(~Iip{*X_%&? z*hC-M_Ky>F()X`T{s9Z_Kr)kUzDqWDb&ZLcX*Ym$SBv++Lj&@@Fddd&u5utMgrJ~r z??%+_5G3QsIGO$bkG=PfYBGJ-Mwy~0sHlK6B`Uqf0)n)R1&|;hy_26JB}9ny0D;Vi zfKo^5NQ(#|QbI&RFBz!=482JZVuEx62_*y);`ch=x6j&V?Y+N0&N}O?^{uo2iiUUP zZO?N*_j6s>eP5dl$Vrs92*=bwj-Dn5h<^=D*NwC$A=O&_BI{(Jhr>9ACO4+p@)v)C zz9}4?$3F}Hd3hf)Q>Zbff%_Q9o=iF@U96v5aehnQlY-0LWS( z9omHH#PfB(39Q^3Br?k}Q;ons)fq9zF}+e80CsxZ^C#>eaYYNyGUr)sd*YdQcxQO8 zRwfj_|KL~53DV|!K%NT-Q<REwKcQWtVt*<+8LxbCn1prG$h^qi`uWMVHM-sv@qG;37awg3hHS|;6~uJv}K zHAR=Ka$EH*OvSi(VMrFO3S=ER-1D5Jj_iZ&<+C3B#TE*4IL|S5v1{nif! z4jcN2kOFBF)6=DnswXW}cYTASR2qV3^6rrsVO1r$qZ?gQ?GIUQg>$)O>!0&wf|Am% zK^+1EBTK4?K93)I@P6dn#FU_dZ{^U~za7n>$pKKW3%uG54tki@sdL2xl|HdFTBTlv zd}3e~g+N5nQ}iF?uJ&}+XHV|A07dqJCu}q-Jrtlp?6IJ-{=r#X%-!(|CApTzYmcTs zwRlH*o&R3)=Iwl0+1wOajQ898o6*HK?}&mD$`k1}6;e9V1SM;;k_UNgBJpIf5UGhn9Ae)hB>bbwQQrD_uFFUTX3F75D-!26oen!;cCxjgY&53d z3x49H=T&G>83Q+@BW2d+7-8&ky~ZNyZr!`|Lc+FtbASI+%>q4`V?s>{PCC%idg1xP zfSy^I;&Sh%alIK%yw|r&vzto`@0bNlN)=NgXeZCQt94PrUX^f?tXz%b$E?Rl!p0wl z3HMJ+rcY=geCcUW#3g^l!iUdXO%t6ihYUCpq;5k22&oZzY7agzXSo^U!}8c`Zd)8} zr!7do&yv)2NWNraP-l~(-G9Uj5u~OZG?U$1q4hF=FU=A9Cg8|2AxF{NW!Icp#5n(! zGxS-LsD~OuK=(;sZH4;bTE{SmU$ybXu9(V2uza}1`PAiCszSB)lDF*AQ&dw4T7|#A z+ea$1W7cD6E!xK|31&921(DD=xHbW5o;$uYXT+;tX4IyJJ8?vrX|NB>_`GtCglS>N zpSeOD$M(rUnSM2Y{r04DIX|HsoSp;_GHjVGBfd>gcT~881V!W17{g*Bxm!Uyu08~w#` z9W=%|+ytwkJCZ`gDMN$fw8I$3pe3I@Nwyz8n5m11@Q!hA(O{=n_-f3(Uqdw1JVqAC z8MoW&Qs-sQTR4nv_1k`64wU4Gc+m)I59-G?yB5=mT;T`ye~%`WRn9Fcd#5fXcG|__ z^*&@O1cWd)w@CCwMnta`K}AQQcHP+E=&7uG05XCX`aOspHK8M=ALFq^Ndtyc(K(7 z?p>su$>QW$N9B_i>c)QO=lTMK!WJEptg?CaHWu)3q)i3w3a84{BmLdVc94FbrU84g zCi(5*hSO|>TxRQG9{;%C4LyxG>aQV7vggg0LoA?bhXFI4dj6$l!3vATcvnv8lx;$1 zEVQjDYeSQiGvR7c=uS|m5VNSOZYcRW;kY^7cOxk_&>`t*;MYZSV@(f4=(TLu-!dOc zB&2QZXUgNWZB(stt?liyJj@K;ZtlA-hu2|63`8ClaXdR*FmWVzNC*E!+t@BdXnFFS zwKJR|&d>DLi=M;6{Vpd;5$5O-KlvLyt}$0{dz^m#e1Tt;ZC;s@-plTiyp~cNqI8a+ z4{lg?iaZmiYK=QKR;=8YDJL#DmI>~oXQlgXF3v>$&Pk&Yf8}`?ShguK>oN$$WFDjl z<=Cb2?iU}6;Ep+|@!DC>Uq+F?xAoMH)1I;aVkc^@{FGsn;jd5;&>Qg~&+Nu5u$f|;`BDGkrx^%;A?W`CjBd-K z5_k}*rX41-Lq1|kG)&2F8Ys+v5+X`~pUbV!L|z)*)iCckuwOV#7R41XCbLRE4~eCJ zqO`zV&2(+>7UcNT7BKT}6VdT0>Qr>!IF!J|WR+3GqXq zs)%=Xs($~kO~!xPwxwsk)_nDb-@^_@Ikxjp0PS<0B_j6bjhQi(O>;@=E_JQk`+o0H zFE_K{?Q3mUpRpkl9HP~=qa!@~sx_B8T|a#yo%zGqFVs=};l@;0Y|~^jdTPpmVytzU{THEA;7yU&p zmQQnP@+P5E$=E1&-TKKXt7^uVn(|M+CRt8Vk-%hJTqu(yWUY^lj@@R1I49EX96l@tdWY>|n973x;kaV1DxP?SWirncr9TO#lM0@xT5BnUsd?hyZ8#@mEYoBM$&* zj6v<+1UM=a7)}%9Lpf&X6Xwe`k=^*T!=EMq((nz4xq*?!jF>XXe4$VnUo-9Bt}XVz zFYUj6YUYAG(Y!8#79yAh?s)Z!+wsS-cgHsr@A4H>k82Xr9e;YO-Fc}=N`fG)igg*T z&vZR>TvY1Rs$|5!quyN-YYP~p1{;0lNN{hWKwm7fW_*s#ziI4Z#Qa_gc?>;C zO$YMgPp<7JBE7SHQuPyRQ_U6|a&LI*nN@Lm`ISAqqBU|9}jW;H|uYXwSh!s#?r2%$5oAQLkEZ7QESS;HOHZj4vlMB zMT24j4+Mv>wM1Sno}v9sz58O3&iS&)HL>_-u%>6R(pF8ljWzT#aQQyAl!vj*u_LV&T!%SyCc zf^Dn~bEC4UuxQ>lhX!sZT;Wu7-{{xE-(}qmUa3J@gyfiq8A&ng<9Dr(TUh(#srro) zHoA2xqPk{>_Clt#xaO9w81+X71>!i=!yJR9{)7d;zy-T+0vHds%-Pmu7byNl8*fR5 zTjz%CNt?!ggj^^hwT$B%nR|&lYr$|lr`R%1Q{hHbq4E3ib%k`vP&MVnSBr7?C#M`X zwuGbpD$YeM^oTykzpU}^hC5pz@ZL>TGuk5sy65}aj?{{k+hnwqnN7p?i{e50E z@Q=|}O2D}5{?YUd+Kf5YDfwc~LdGu9#Et2pUFc;ZSN`&`0P{lA6!tL7mLJpcP2h)D z>s6PEr%u}NZIgIU$nw|u7l!7o4wU&40JU#f0mQ(O|Lf4-zX|+z?D#({;tGypej-02 zALXe|R-)bMh8|Wj9OC^~0U;R*&)}3(DX4@H$4N?*MTFsEv0q1M!R~Bd4^r9no%*Gg zMBx{a%;{*oYqt5VC6m>oBS{fEMe@JD>!# z6`^HvrDWNqqv8Gd?$L%ge+{MVuw9bFZpi0tcaQkrJXCLT*2N+X7O%LzH=IikmJ z4yLDGt+&aHD7URO-fX{t9!+IAg!}cZxL-Q*UpI1D#CK_^aV2gYrd5(@qRgdd^}sMU z);=`linaq5jPj9psWWH5k~Dg`EM!Mps#~uUate`gFr6dnWrY@&{hY zmGbe`DyNRwDuz_B2BLl-W}9q~Go6_qDFV{;8wZgJES@IimP9CQ&^%==VGDSq&++I6 zlj~g5755)iV5=@+IX^UwZ?2Qp9(=SII2=Fs^ytm@*H!YVS9$~m>MTzI=8b^QZgX8a z&s4I{awI8(AvVK(HHc~NU9*Ykyd;CdOve9r@thC4&C-z8NenEf07?l&LqqD^_6 z?}8{i;Aob>Q-=s(hPMGl3{Qmw<69I{MuFBf029ju(4aq}VP^mjH8so^AUtnf0_1>z z{%*&B0|B@)e;}Y___9glB?g3d3bF|L?@RrE^i<{Oti9j7Up-3bZabUpsC$pjl1-CO zjTS%0tO5P&`2Tzzng8o`6v{SDjFjgT0BWmO6UCK@RmoMUMI*!Ujt&A9YCqml`|;St z-*5eV-XY#q)|`cf@;@=B;{E@b7s+iyK)cw&io@4*|8V}9HA8)_#P`J?$uZaBQm=t) zIcXmufVl2?bBEr!Mc^~TV$Mj*5fVm%!ChH$(odYybW>8_@b?*d3OV*QrL=zX_~1 zuoT=OblYzN=P^4aK8_rF$l7PXdM`VS{4@D$GEeTCzz5GS;6vk(Tw4x~ue4u1#<|2l z#=p0-n*QAeU<(rO;>bnid80%dI{8h&pXZif&mSAEr81I+&ie!96cuK`jns5G zcs#~3Q&6ekbt303=6zik`4+P@j<}Cu8E(t><&r9*s#5@sS{85t;TFMW(XZkC{k-a= z)ewg)TD-?-@IuGdR-Zw6&gNSG^vA=7sJXGceZ2Bv#^mF4UC#)wSCGw`QL&RR-=I&-gmz?wL;{tSAg2KheM zfv50&u2UVW*u~#|!5X=AC5nWA$PE z?#{=LkLvzaVWAx|=04WM9mys00{^K zK=@-;NxM{zG0EnbE&}AKI5qxFyOm!uxQ4*H%%rxr4kdn72*fvH`lWd^l#( zKD1pk;?65>gKtkp&OUr*x*Zm9I+;ImaJ9nE;lL=T%x^EMtse(e1nB68ZEUF%zBc9r z&?_2Bo=61$kpK9%0Tpe@Mxe`K%uBbYN%_S`T1wuwK3*qZ-`ebs(V8*{Cb%J_2gW}j zv#qL~UP@K&^gEz?e0(I1y_7!Utnf*pG4#6SklJ;}B%6|Qvp%bl5c3a4g(;E`o;^S7 zq6v3(HX@LXW;C)B6rA(^)BFZx!XF}^2{n}Itn(0QYs_7`H6_e}8=1#XLeo zr+TZX`FHKsuBL~w&czJ^dXT*7ID+zkZ2&9#B}Q!psZ;~IKJfPyR>4h&L}-cl49w5a zc4Kmd?Z7~m@~^=)DuGtwVf|kUBziF3oEoO1gPwOQv4_@gC>3G_Fi{g}DFou<5N<&2 z7c1a>w7b(7Kgj&e(rhhWY5K7QOHtI>C?`sC6EUqN43!|9-+T1lDlNdbhoMzGzV&o# zA)iXhQXB5!FCtfw!bj-chkKr zTzdzj3k&q3&TchaRFu>Z(m=0Ci+|vuuC{|Jj%>^p<9RX@KnTD?cM!(k$UBg;)&&$ z%m@D)|5{%8;oDjfjy>!o*(=3|FK&b{1)L!;A8*$V08I;f+GmD1&U$R(VKW^`F}LB* z015EeBQEj<8h!}umyc6lkid07W`1mvNLZ6T`iWfK5{J^|W_k8YhuhCKHWm}YHGMQf z3-&48YZ+<3{OuE&=`*CqMj8LTvSqeKE0Q%nsaf_<^tocoH7($efS3<3=Q-EuIqjh1 zcbF3MUood3zDK6`C#k6G-7vwScmw|;RJ*&J5M)lb<`uoO^ZTZ-@lX z@Ev@zjD``u<2gSIc6+%~;Mg%I-;@ayiX{wL0u3T&EmtaBvsQ{RUb~UnDm!?wB zJqGa_(BTBNxDwpoScA|B1y+S`>(?Fly5k_o7e!@=6ISP>53wY4v-jXX#QwTv9Ckh#dB>ZI4@b!-20n(k|YT|_Iwn?llR!~sh>YuIg*WQ?A7?M_kGWnG5?xGa-D^OYL;b1 zlWZI7@|+sduN6vXIa>Oa6q$I8*kz^%;gkojJ_{Qui*nHkbLn%iY_CWm8atG(+^$lT z?HYZ4zF2y=;O3xzuzOd}r$dJFsUFdm^{(*K0ml zC~tc2Uo@KqdvQmIk6y-eF~`d(FfZANfvqu%ZkwQV#22%usq@tv(4dlK)tJcw)lfgZ z^w9I~RJTJ`Y?BHcGK(bJH8odD@<{62ybD}AptEsx30jE6iTINCgYTRQhoHnz!gJEs zdUQPWNF%&Up_EBQn&DwZ+LqQ$i{VLDuYr-BK&N*4i_m;hwNjMYU6k4{1hcz1j$bS^G$ufO`)+X@(X8KBZ@iRmcF>oXRvUXa38XQ)vp{@ZctSzD3cuf~hGT%H0rzj<&4e9+eA!Cn5z(i5T!MSQDq!)KE{Mb+y zAizFunVxfPamhNl;B&y+1u0`h2r1Ly`V7Q#JsBb(%;pVNNZi}1m z21O%->MOmru&IjJPHYXV6LcQsI8SD11Hdj#Cpqm|9+aioT*lL5QJ-lN(Z*k10}$|; zkV91&SESTxE(`#^5bu^{uebSIZo4w;UubFEwg`65O0A9>6A3?a44QhOyYl9yWcly8 z)>yJd^Ollwt+JYO-!-@D>8Im+%q+hekB#bnFkpJUU zDDv#8bahlS0ZcQEH&Wu@=?-~f!5s=2r~vy|#&NAW9oySiI&C_GZA;6ccJR@siD~Me zuZkunx_DHUndEH_Y&*Wpy07pAq~J1&J4RQXN`a=LcP8#Ds6Yc;8iwQGlP)X0xXH9i zVDV7q0Nj)g!;{tjU>t2$HpDCh^2V#gcI5HaKlO}LGrowt@VLmat>To+{7WKwFNuew ziAb%oDX~2=ZXA#Kj=-)Z*IOqS{&vI*pZggMeWQv=i=WP9)88w=oHiA6%ptGqept*x$<2&cEUHZt+nDaDNN5H zFDlC5#unn1N3A>|+(ykIeS$CpSD39!Uk#?`Iz71){-M}h2a z`xEQ=5BS1A|C!3nSfd!_y@{~J_(u3M?2B|HS4_exjHl_o#%lJlOJveS`{$7~3f#18 zTla~*?jRC-2FgL-N1m6x9N_j30<-R3?7$xk=AO>Pg?dgwXst$qnBf$zFNO~6_rAOZ z9SUa=tFe(Vj#0ul0re{fU62I;5PbeAjc*5H&S8HdFPj{aAS?ikU|NgtLLAI+`F~-h@FYdV4R> z;)E4x!-Sbo&~*BcRSnm>C<2LOS8$tM^)c56ni`({9N|y|g+2|itHc58AI9|Y7-ox^ zm{p`2oS5SMSw8)sPR@ZoI`?XfrwP20)0|*hP6ts00TIFY3mbagW-2`o@pl;FUjSC_ zBgY!Jnm8N>V^!T&$2BIdcNkXkjmOUdgV9RsEQ5ODgGt{4G&8?syv(_7YIAw-n*cF3 z4yA0YJK2;BfH&Tq0RVG{Hw_2MSy!0){O8D-JHP@=27dYUH-Tk0+`7+P4qv3`n?S$N z|2L1?l-bXo;an&IGWzYN_ONg-=S%U!nVlt=tZ+EJy9D1V9ILWI;r>^5c-8$1ouW+g|77X zQ&eHTb9JiuMe%N&#QZ;EJRD;?Z`m9kMnvgCJh^{y=SGHLk>7VmhLgL^fV$a*{9KMS!i@8HXgY7wGLTAWD? zm__xtMwP}RmTHClJJM*$WQul4;efmLVxCpeu4Fjc@FNBv0j#NL1RR!2*?1XdnK=S@VR`DWqC4uG}-7K&cF~9WLLU&(=O4{jX%)N zXnTD47BO|Fvq)vI>T;-2+q@OvopaCeS%-<3pLw?I(2Qz|f~-Sph6QeLmefH`HDVdT#PYepazs zqj5c|fpj33Tvcp>a|vlJRLx&9zHJj!I&b3i;&w%(Rc^k*fUdI%+*n0LMbQ^itJH}U z=D2>Lc9K)uB$+z#{0qE)F?Eu8UK&HONc5d@Kafa(R$AN*h-8a4Z8qw`I4u@BNz?`( z+j3Wu6_iEe`Q07Kf$;rEJLs3Bs@pu&nyoqfV)b_cCwph7G1y03S!^S2{O~XAu zukNqZYn7L7*C~n>L0k2*AYn^NX5ERMx6cNQWQ5RMUsu@=NOtSA&Vc|t=n3)snzU?= zXB9{A&$$zvJR0^We;9afD>T5%Zwq+}LV>$w zuW(>U?hU`f3*<3Ej3Q5;>6o&P@g=^@rFP`W+)GPqQ4Rg15hA>K!f(L<@urJTJcX#X zfQQ2Ukr6)S%jXDRXalBm`IqeJVi_T;mZ#!$$C00NG`^O!_6GIoQB6t~yu~8|h0_b5 znK7kbMhVaY<0OKrN(}pa@2z%NOB(6<4qOp;WVuZdFhIQRLGDaT$Tdo0rgOW1oj{w| z3}=Y$QLI=@KNK{XBJxn!)3A?oXE~@rZ8gR@3y3ijh3z!bVX48KFW4MVE`U2sAdH`i zJK+yJ28%=OR42Zc0+s^rqKqQOr=c4$>vBF`m33`3gv0=Xa(8TjkPyy$_MV9)v6daO zg0{SoqCZ%&Q8k*Zv(So1<#bA0@kGlzx~0Qv-cdLe_YV^pX!D13$&E$5*7_IfZATT$ z5S6&?n!!-XP|4B#fLV%X&|B|E`wF^U^PiaQx?d{kz5hr$d#hO1%nY~Lrv^9EEo*Lbl%F4*y5_gMKTivNZgiCuR?kd=0ZyZ@ zn8hAsrkl}qOgvQ_?dl1BxpC?@`UM^6WDA1Hows_%yEQLkVQN;>JQ+d|>^3?(f)2#I z`IAD857jpmzU#V}V>egvHQBFQ5?XEhm7V!!KYF>Y$R>IIXr5W$*{3$6X1z0(P!+o} zRnfAvA|)$Il=Osz{z9gwYG$?AlC>^gxf`cD5$HAyR425~_m`ke>keN?^mu?cELIhj z9^Ofo=E*a~Vy;dZUGkf|n9<%#hf#>_pi@iZJblkH!bJ9K6km2+vAQK&A8x!*nQ_O! zrlcyRcBREf{+)HmN_*wLoP}v#j<8;KhJ$~wT2=S?S_gNA+{;d`--NOaJG}zXL(juU z7oTN~!{lnl?3=u2_ZUhn5WT)jWLf~_V>iwdFNQZ^pBEn%8(hVIKIWN6Pe`M)sPR)Y zkMh1PDHyM`P{ib)L1sWtcBA^AzX>et+B?X0d)VP_*I#RH{u&9#Oyq8hJW>8hg6#mX zqH!E9J*=4pvz}=Ci9fuJHsHXCuDsJsYzO8io>uygoB;~u`~v&=>U%-F@N{K{+3C~ks+&GJDQS`;RSl-TSB?Z zoNIlkmw${R>b2FG#DD^O$*HnqA8q&E&g+EhM9Xgw#NVbA%w!klarFUWs(s`z1M_4p zjbmTm257^c56w?8Qh~}Ev=xwM#36zS7+((7If)M>95SoeCrHknFXQ3SNzP<$ApdVt zE+S3$$zT04GGm!HJsmh9jS-EW`W>|3(cX3HU1YZ9#LDs{9zZ*4Xcr zfk1X>4szYbPlvOFxmNshM~jLguq-nwr*Nsg&<*mb?Y{Rp1JY`hvDIwVzFi|$Dx`KND`v1P_(gd+byF*+M;vd;^=y)f6zU!_T#b9>D{~}4G`DG z{I4DNOX{T+8O1ia0fX1jtqXml`#bK$#lAO1=Pjf^Kw0^sIrc}iC|mBZD7E#pK_O;Z zd(x&*>W6C}kD_&9#uM`ny<kOzlbVabJmo9EPzhayx~!>W16Vm{=;I=lf5yVrnU(O8XIjq5b09IlhW+m9+Xf;} zXd390PG71fjb5(Fc9Ji9q}oe;t$-%8z5Gh-v1GK zN;Cg_#05v;@iGMvQhAV&xg?*tAg)+vvlwXB=h)qRn8_FP;CvYCmN1f-^w_gxeFcf_ z5KbCIvxG!`^rfDqOjD0X!>#x%>S%b)tu7CM>f=`)K zXWg1qc~qg?5Ui#frK1T&Zxl2}DA?u{bZMLo3M2Uk+@p91pI9u{g|2?*sa(o45h}r4 zr%_wBbzauzk~jJJ_<@*+?QpCZUxXt%eVG zv`puu2knIwA%o}y>4 z8Qqo-%G4cH?caP&&ryIogg%r~uJSeUJ6qp+E=<~I{g8=;a-s-4aBN3-PRv=B5wO2M zE|?f!O(#mMwJ89#krs2WBLT7}5glB)HZ`ZiI#;)~+;rEfd`?kb*s^q&Glye zC|%lA`KoGuFH|l2MzWk__drm$km{LKqOn-j%Z6cffOxNG0@Q%g>ybxz0EzJY32N8fP;tTCB6u$wxU=(2HUQy){;agC! zYuPttlcGw@EfLGbJ7g1PrYFC$tKnL?Ir5=?s}$|KpwK%?#>JpSTXr2|L&P|v%iUnSs1fT-;0XFz2v7NALv>Qi>i1DNXOY|AZ5G#(U zkp@a1P|`f#*A~$eVWTw*-8)0eba1=1>NNP0ZxuoVHEWUfI>UT&u~^ zX!QHIO|8HOgn~%({o~B1qhIr>wIRra&YstID8a$QWgstvk-ZDQcZly^c5^o9^LJ<2 ze@|YD$Z@>h0BjrzKq{7a7jF~_1Lw=3f7RP#chx5XwUBvqZ=u0tbycj9hLw%O z5NlB@^7Yg7jBDHrH@r$iqkcp8mDHCmR~!;YG;$m(R;LT}9+Z?6$=cr>EU69HE>luz zb*=q6^WlT5{Hw40bBEbR$5A(S=6u_vCv#;mC)Wx^nEa$lAeR++!Mpm!N*I-`73)6S zCXI3hXrXbQA#=)V-2ZZd1*%23bor$J+ZpHOb6`Y~(=@#9z+BYDZ!~=p8ssN7uhIuO zQ4YOo^KNX48q^$Mx#j=Iog?w3o08^VvUI!c{RYK~3>Ac~WPVmIO?N0M^2Le!mgLye ziN8UPw;-8o9hl=h$1iIZNZQ_WZ>Amp0&{nPU$d|>iZ*N*+b~c{G0`wF^jOsgt@k5b zKOsYf{|IGV-6W-*HZ6xomg0TFuM!=^6&$YnmF(}Aq)$;`dFIX@j0$k*boYQFsZcr& zm^pfWc5hZ1J~qPFbDvJ~20xc+&Dc05@4uylgkq*=zI3;~<9SW&!hZbp*$+dKh~GgT zNB^tU!qly{ft4LcW1QXY$Te&k@+to%B&u2C?Ebo_BVaskQmr0Fu^qywQ#8{wlAIDp z>}>LCkKY`1Ncx(Yn=$K`=SZrmjr7~R*8cD->ui?Roa!08`<&T$@*|qq`jnb=*kYPG zDTUfB6XasLp2XBKYn$427&Y*(TAfp`W}0B>t*$`&CTRpl_1mztAScmYptKlJf7KkS zxJQpfiKE8y?tl2atdm|(c@nZVYu>SnlFu zrOPw-9?j{@xU73#%MI6WW0PBoSD7MO4}BVElk)Rdp&R+TNs%R=jgHa}$SPYBoZMD{;;NxqfoTzTVKh}$!+yXkXu)vcQVeO{H^6nHlarWBZyRW*`%=pTJ z>n1~HoN9;8IM29tdC{#-yZQxMwz`;SpL90QH5h)r5A^E)vLvMo?Ts3Sw>h=W%ecE~ z2yHEvNxAo=yJe}`+N;dgT9)H=%imktkW^KSgDn)`y+$2;@tkK6niH_m$>Rg$W)czz zRUn=km}3ed)nfL~0@X`EU~uJ%a$$^b0yEStKpplAf0RGYpZX?{L##inIlNp<0rtGE z{B0~H*cpxfJXPBmqv4MTsZB9Vfq3r5z&>-qp1lzmk&KL-Tpm#2w79qywTj{wa4(`o zsz-Lgr`#z#JCIEbFQv4p z_^Nhq2f?gyX=Z`rzp%0i zbXS5T$`5iDZOp*}y{e~PgiuciH-LxYNrp&zT2c$FY+3KbUR}0h{egE{uotod=HvwP z>yM7`Lhvp$C66Z}VPd^JbVjN?%#)AJk2FsfrtadZd5NI@UwxG{;T zfLi!vxoml$BiISlyJu^_N0=>wN5g*+l zzbw4S6nWNiW*#Q`Nj|%tFF)EMfqWVwY#8nTHFeA}@0UEAiHoD!GHdW%HDR^h9jOmwZ0W%=`p$qF%|O;8It*I+b|jGJv*^P~?wr-<+rAm1f@ zG&1LxCwx^*!1vm|uq*(}n|i>q;$39R8gRoCwUaQMyPGYZO8|G%4)&?q$4*b_ZDK{( zZaO02i%MAy29F~Ey9(9(Pmf#kaxNFpn2m>78jVc>>P>Ec{9#3DI^WO(ep1A6SF^LC zsD`08^xsWyy8t5mAOHQT{eKrAi40&8=PqQlQ$Xv{CB4jp8CnvACuZt8X&UE`wwwbe zI`J$BF{5n?NnH$rR*iTgC+%D%_6$ACzNLVvq8p^3? z42JOwVH9n)TO**rH;N=FFx>s0zGA1!4t?n`ksZphP~PH-gX`Yl*=eJRKcmSkVRm3G z>nJXqz%jS38>u3eL?sb44{ky}%&nSes880o%4ayMSKdz(sB3(2&$V~UQ|)Y^eY;9; zU!FD6QKFVx;lCV-LU#ccA5M+$mT?lB&QEe9p|cyp=LsO8%%c5T35@L_>+&w%i3kZ!h@Y zhyEY>1Y+k8bI@V#cblf3r(f%c>WQd?uD14dQ_e%z^;NQd;4_O??J>DnDwuy4QmY&i z2jd+Da#_cOrRG{ixKP5(-|~iZ#FGjY`#xvVH;SX`*e!)QQIuYVnC5i!W9E9H zB9j_l>B))qXDL+1sPiOPXGQ{AEa$|M?xX=&ka)`_x>U?X!a@J>@qid<<5F^3CGTou zPFGIMh1iZ1vHr3@DCIr5SKSInC*aV}pX(ONq=Ko$ntHu7O5nn(PXbnc@vx#eQu2>r zU|NQrj!vK#CV#4ELo*AoYpAT|HDWL6H-rzIFgj@75 z)(Nth94rnZj5dh5IOinwSoU=m$X;vcIda(GOSyCA(Z$A8<7_QYnU}>*>B@wJ6n|vDHts-s zn%J953(V-4(+D05;MLb8ZAw%f+E?z4x$@S5dW6lipRQQ=w5z|?cis%meAJqBakxat z>gk1F`K&cJ?7syArQ*Y~JO368VllLw7R)z+TNpurckgidCa@TjR%Xxl!=TCg0{rog zZvqNP-cf)lhy3?}|EE8mU2+1brYk3ISIq*`Ndb)D@eftqoIW{s$w#{5KHryBD^tkV z=-PST;H=Sc2O(zjE?G`81tl68WOKlsDwP?g8n7L zn&m+BD?HuA3hMs}QKQI0Z&0_0a>0aibL^+tH@_k|WHzWo949=t>Z=r8lT!r|khy_cl}TF%%fglo zOWVDi8f3O=pjl2*f!<)2*<^)YV%6Dv*%Yq^$7d47qmP)NprT zf5N}S(yVIxX&il3zNV)XfB^fE=j(6cU1t~ZW**m;YT^CaFfWrGxgiO5$^Q9B6O#|A z;!&2+PcL1b0`8RK^FxA2xN3IS(Db0*)@`E+EAw%D^hq_~O0IFjxkwtZ{iI(RgyF_Fg&qlV9Ls31jHyjD#li)l_zV zbcqjcwf(@T!nTBmJ7TPtZE zle~~RMql^)CNSM5Sp5p9!vD+Fnu84ccx123`~`_Dy3#iYXr2VMZ~yif>}oYT%9laz zjg1oTfj=WGcFRg~cn1&r<0dDnLEYAATy4XrfNf$0&;T>6vgv`_aCG1q5e6}>(9M)D zPHY#^;;A=GELm~f`PaARIh1nhV@|XfmN}GH%5lE#F9DJ)RKuL3lzFFp@tS^6=~2() z;&}Ss1f((DDK;jyP=dS?`C&+W(1m+;CG91kAxw8m}tRMxA1zzAWDzcpu zihx|7_ep-c&DW|k8u=nzKo(Hsg4;QbTQOi`_K^6{Z{#m=Z78rSKYEmZVQLf4gMH;6 z1xg!NHa-CIsZ0HFqr26hoTc{|f34Akb_P)9dJn1#3)`(AQg!=2`2~#Y*`;x97Ap^i(eK@_p70v1WwAUb z2CyBl=c4^0zwWe}-ZIAmA%c`>*ACbp zmV?vJta#jtBB0T!kkc3RftVm%=HGJR?HtD*9HYY-vPyI2{HE9Q51Nas>!Y07sZN-7!d)fSqey%2ud$mRs;kDM35E*=_1lJ3M4j&((*}F zT2z`eBfS$L6a|q^q=p0$X_8Rl3n`rG-QT`@tzGUI=Z2 zXiqGY|1v!!w5wgVPuF92s+vCSUb{*+j>#lzpX z&ocxJT_Jv}anbMFXVEPOa`nf#IYZPbg$DhDTWV|(dZoE*j zC{_D*yITaNJG1hLb|SY*O(K2$2ytOtpH5qJH`JX%xaQAyDkl#x_f;^Lhr>dFc>Ugj z68+vo69@bwx3wU_@j#1UMvQ+;o@&31UMr^#ue7$)8me$de5)8q6kG~DyHPV<61^6U zzk^lpDi!e^ zTfr<%%yLwBcJUw=S5`M2;#V}nDr5FdSUz_@>?`DrpQJG|)|O&rjx)wGHRINS z7NK3!ZqZo&cp^-7A7d$3wojA!Y>JLkZ8Ou)6 z?4Ze|AIZ@fsK}j6Q%j%iwaU;N%(4y+I603f>FTjR1 z=%Z}mG65R~HCStZo_L$1ZEkft=Ef3SIP7Lg!t_YBJ@qHvwB$NsqxHV*iBL{DET$I~p>y6kL_vA+ z{6SaZqEya#mDG`r0|8I2G!CX-Y9@;~WJ>NTsZKW!G0l{Oo!MS6)Z(W6>9medaK5Xj zwW+nw$vg{9<$(l~>;0ZWd7MC|YWxqk%h=(#F9LW8Uo9iK1WRrnaZM?hfcuA%kqpxU zC)nO3EpKpU%G`=k+x>tz!)U|3RzkQlWm|6CG>AUgG#lpO(7?*M*_=>r^f{%vCU>K0 z`R!`H><_{=re#BCGsk5lPfUQ(=qBUSJ@q8@`?oxJnXOWXv)2b_?Wx>)DkHA40lDxg z*EopYaI3quJD1m=Qd`A>Kb$nd(j=H-xN}QQe!Ii0=nW3n{bi~qyUaQ8Y;?GMvKdSU z{qpsNcY3#N0YFwi`2L0X^^)p>()`ZICRa?w(Vf0d291?eUEQAXty`G z++pN#^ho8?-tXnJr}g7ihmkDaTBj@msqp-q}|A#n<6 zbEDbT&hsER$V;oypL0YnCJi&*1e|y&c9xUy)h@FyDYr8PZuV{ zP9NTPrgn5~viC?8dD+lNNdO>;_YSl0NjG2x|zkqq@z zEpQ%({1z}czy;o*I4RJ;W6-1*6`aTKh`2BtKe?&q``Qr_zgS_6StbFu_BMH#bB6Bh z9LE0^ay9Y9n==T}$rHM_&d!I{56r5xIkCWubS}J|cyf=<8z9KyFMJIAZ>#aYt;YX@ zt;R3`_DOUo-Lmy}9DgO8jFsZZLFVt!@OJniq|YL}<-Q5%^22{I;VKkMhe`QJcs|95 zcizk5?0-M7N{ZC5cN#nA&rC5Jm}1tIshnd3S)|ivwk1jwH<#(Lm^e@Lj_j8WE}N{G3}^n5bi|9X(#Z=ewJ_G{K$suAIx!@5|> z*bxiC$UL7fw&y>YwFO>EU|uDHqK)SG7We9r>bLaw?@|l$X`a@*2XZXR?|GzJ|585B z&7hpwaC~U=YyttrHor6$XAHp(7sh+uZoT|rpv^P!_=}gu{PvI7|WfjQo$r(<|xjQqrT*0(yBww2?(yqA0>q7eOyR-DE zn1xJXI;o^xgQ~=uY-y)FtJVH!VRYROAc|j4_I>hM**)pC)M=;EHzk+9WjTzMM&$XP zSn1@!&#;BCJwbFNPZF>eyo*GT%d`~@Hr$`7h6e z=c!vslewtUnMDvW3W)h#jS_R2Cu^AaXRn7m`)ET3@VnWwKU4QTd+WP_%LqUA`Ppt~ z{fo6;56^t>IB@vUBwqL_M}BIbXotG)grD)2y*m$FmUXzS-cxLAU~wc_BITIo_UZc- z*D`C*FTJJ*h&|z;n%B=O=y!Qd9)8$t`oz~XQct8n;c1IC*?KznO*-*Pk#JeB^Qs|& z^@>~BxP$l622vZHItHfxW)5>zkHeFZfio7{>gDic%dGYS$+GRe;faO5l`^D1-y2e^+9mG}tef%R=4=?Y=PuotCpiufQ2hKNiSANwyvg^*iCZ;6~f`RqQ^rCY}|>$WCD?)lmGim%OtLyfLR>P@D9$+XHO!z{(^_|4$EXeB}| zHX98-6+fD4pPMKJ(F!KCYE6vth!5InD-s&S$e`dU>&pZKC%A!|xS{lF`BeVuj0ie$V8w)9+zzKY!|Y$k3s4LFNI z9^*$i>n-TNkkUlr%0n5bk0evX6vK{tj}GG^jv6gJ;z_krcCWj_e&mu8Evx^otBlmR zV2J$e3qaI<@CM);C2I2yb!1)x5Z}IwUO-ckfC18nFZY$b_Mg=d`Io{XP#p{G{(sCY zcmp0Zi!lm{by8b9hLQ)d^8=*Sw=ae!@LUXzmms?+@$u_lJ%4-ncu4GI4iK&|xHM_u z@FbCYC37!v8nqv2kJ2xs_Q!yAyt5Y++2TD#Eyra_V!tB5pXO05{LLXu(2lzdK4S|2ZMva1R6x4g763|r8>wsbeQZ{RlymzX<)E_b611+FeN##MNN{AOpzBLi-EHeHvk ziRx}2{JZl+Mx1Q{1vxRX3)4(WbEbIztq_6`8c);4mz%>(%4~fbEUyf3PUK(xYCo!? zp<)>lquH;M$jI$N+v)|@;Zkv48I%T{k_DjEAmM?jLV{Pezc5m_-8%H7;7==x~ zmocdQybth7_o0M<9i#;&bI(=hSIW=a-K!TfwQhPq!Iw-}BJA$oJk@pvJ|-PO2v8Hv zwM>}&5wJbrYNWa%?MJ5wc{o>WU+(0L+#FrSI!E#xdnIdlooKX64wyC zBPlr2s_iri(g1o(83`7Pu80OJeC`U^Jdc&Z&V8eVoy>q&viZbVH=($DfFhQJS+aN8 zSxB~`_R(~818!L-#8%|ng%US{v3lY1P}*Vo&%K{+OKGNFFN^VjKVv{0U&EwwloJ`f z9c4Zm#qv5$fyx!Lr|PJw*};E+qb86yhFtZ-9zZe#fZmvny#prkDr1Tl#Q|g53FLFy zn2BJ?_=B?wSUe2y^$A`_Wt>|YrcU{PkUqhBikm`^L3V>dN~<;~5|_M#5LxD zj&Ttquk#bVR)w9uoC8MWh}$~#2Lq@MudQ+A=vx-SDcVPfUB22?C;jK}W6Hb$qw2p) zJWv*jKv}>SWq&IR_9g6GOI@+g5^Q4)Je-O8>k8Bc7tF2ce}qJT4I$b_UmAt+&JarS zxS4peKi3W`rsCiT=7MMqg#L}2<9+1X27Ke`JC=N23vEi0CYNL$rZ8tFl zYk9%gL_n<@Cj*Q5M&y^@Jyk-i>}Y%nfY88xa+OXI;nuZy6HeQB^X*l)t*hn4P(48mZ?ZX@vdO6!1v#Lfo($6pUYcvo@ei`$b0#? zDv)z9P8ymof1V!FiVo9GzC0;&@>8Ibv%QB1TKnnys}@Ha#q};TD|E2OBllGJ2o>J5 zi9BL&0?J5*_Que23OuJ@5@TyzG%ByJM#UMwI3xerhhFcB+h-<(yO(27rIBx>9%-&> zjk=7Uv%h(|=}RrjE`&ZOzc7M~K|tqjA!mNV*|d4KEbqvKGf47}@dpX*JmU^fPuL+$ zA4r->8vzRaxXHIJy!x28``9aUH#V9hT@(5=+kSQF*{g!Wt#>)cm{EI$+jo?q2v8PX zRKRF5eN-hY#r6G~x`@W?`C`-ZoLy|G&fU)A#uH&3X;D>Ih_449abFuqPPHA85u28g z3D7!&r;F&72VHDG*-@fnp!eyz>d%5gV?hxIiwf1+V5Q|SP37~}zig0J*vT^F))4=g z4Z^~T+DoGMh$3SV-@5C&7>lC4^V?jC%oeZDC{NjI(QLzCOY8~P#UH-w1jP-@Z%AWX zm$;DTb&^oyvEVscPSVtIwC$6t3I@sr^P&WOBDc`IpM`~$60}=|XR1e>N;`uah7c=c z_){u_>;ja?>v=Lrb~`pef(lqzdTzSFCg1t?Xe47P0Vo)S=1@yS3ojly`J5o&&lAFY zc9CO%zTw03!eg8pOpDU0E0?;qtB6n{40e0HPfSnIkhRxOhASOVlzNOZOogaPX!bL_ z#J6>s>tx7D`VeE`{ncby!DlTNl{;ikRl1Cd{w~GzMmcDRLL-rvB*RWMloG7V7?u$g)Pv*RtWsmauC>O9+%X^P zkc%H$Z}qk$|4 z65z8Zx9@T#zW`WA&IL;6ZC(^lsBohLdaoc@tG-WR)ORRV&S$qbArwpS1Wre1^?4hfB8)Op!- z(hq$Y&{cErQ=5FkHO^(KJq7 za_-5Eor?R}*V9tC_xj`HI=j8-=HqkcFwXSG5VPi(=q;Jw4=Q^Ml&m+X<{msA7`rEN z?sC&qKva2-3-h*Blj%N{iSOwyp9rs@pw+aG_2<8+zcdnvJaMx!7j`WFrD>&#_~XLe z&6z3!5y4p!CMM>cBH~9^%O%FMMoBbUqu?gr8&I5sBQcBOoWlTIBaCv>_!SgVbHy+ zi)7$eW;m9nmy@n~Cg*exys*~PrOzrwyMmQmr8dO6_3J18)%lILQRlQTZi!(a_R}eD=tR~elaESAgfok{%4;-Rj?ZC9o%l`u|L0RNq zdB)WVI}ISQHGMdc%#0-MX{d-=~y;mrJL&&V9YMG<5`*Fxi4;t@!ki+rg9rW ztePCG7Y@l=atMfKIGdjXL}k#8zOnvO2`rh+368x#*YpkcOgI1+-=N!g?qj9`uS~aw z?FTBBfcNm!C#1FcDSxgc&0&+zz$k&xKH0soMgS1@o-XfYKznO476-5}zRQ$nXS9oZ z7&;na30)`PymOef)~hI&pZQ zJb&8kWBHQ6Q-gCyS{C0-%x*+EqDHZ|xst`aEZ3h2sQL3obLMhjqahW0tKN_@Gggu2 zG@wh}Tvf+V6qOOH=){zt$2Z=#Px$$}6a6eRjv>9MQ4`x3cV1s-gsUEA__eoo?*usw zB$;9({jjr89m9s4DpSZH(Nm(-hz4R)_z~F;66;V9y^V=_QqNCYdZwpV25+&9Y?P^G zHsXAN+Y|H`sLVN^376#uB+(Vtfu&@-8Wxqu1AX z2#veKptLo>ri@#td}zxlZG5nwS?m4tDfGpih<(FQmhJogavhJ7G+OY6Hme;^X_dJ9 ztX^NUzTnc(r%xEx^7o;A>bKPI+_?)th7T*Ys3XKjUhHTVV(gu*=#-U(l}-G2Q&Y+v zlq)4;;JIfj03)RKQ*Xw-QG0{snIQLm7wDh%&L1NOormLiM>FS}CXPdXOcD+M2qW5A zY=3n&_MUVLc3=IuDWudaMz~_UAk3=oBm#7@GY)s{bvc-SnzvH!nAh>M!um;iF>ys+ za#3_ms?cI$?t6UfEwAMUDTPCA8z|Z>6aSM$X-zHO!7voJs8I=f?Zhh3T@9RP@n)`yoP8BR>>$uh^ivOa)_&xWdIY1h>-?+Rd? z6;F0YIpkKkPl+#lV=CYlSMhNZ@=REBCqXnf$8VCbP5UvjBeWS=;#iUn>w+*kh;y*s+51dN~~jDH>7U%Z8GrDJ1jr6u-Ij zcY_JN?PfVb`PNSFERF@WUrtMu%(MM6pCsQgU>#kbG?Fy_q6{-c5SNFJgCW{0UW%9CAGa46=Ts3e`B;pR`q&h8#dh4?FPYAdr?naSqtt8D!?zZ#QecF8T3 zr0V=(c`+qKgp!%m+e~+E6W=^neYpx&6NG5-8Eh0R++k7KJkv5$UW0c!1>~wI${iMp9MJS6J(0W zty^lep}{4OvM_`D^F-x4;oYCZ0s;XMjG%wMv0j7T79F2pqLq0nI%&M9JkV!WK9_=g z83kw(*g8nF00``31(vr);3bbzrd>3F;O<7d048x!AOrk^2A=Qm!l zd;Ugx0i*M47Vl4NIUX3DivW}r9{5>NSdW|^L~`ZYA_3jChTkM3Xm3Tv8R;{BzMgUpHLYXrXnQ6%Bzhl1(5$F=&-fCZ^9M*jGHt%FV0~r0 z_nSLhQ3Ns;;|&3pCCd(TgQ;c~N54lapmZMB^{{)|eBH(I z5&1^XWnWjhLUY@kl+)!kLrKb}D&rRzc<8R@Baj301_DrEhXa7vA}DR}t&fS~#aQA9 z&~ql>fK-f7JS7xp6A^+YgIt(l1Xxi_0mqDCSOhP*8)GCTFdtc3$xRP|?|hf-hF>)I z^JOfSyEmxA#&?e>wzM4&D>GT#tmT_~gTnlG((CP#|16}-IPN7& zCz?ggB(@)*3}x=gJW|(uzRqR5H!98P7cwspwkvRKMdef2?!t68FOV2XQe#x!Thluz z|DykcrJ!-7jIY#5!eFNQC6vQuN0!m(80j#l4NI1T?(!mkcShyAiG>Py0QyGdFn@6+ z8UaU1{t^Nwd)6*=4eD3~|2qDA? z9lPb{=Nfq`bP{>MzQprsisS)WM!;a=wkFLD++&obks7nE#UO#~M0rphB>Q1zpvwG^~Fk|dr!OIkBbIcN*vh}*om)vuzSCB%F zcwZvj4k;hZA7Bw2l6Q2bjTq`05oH|xKQEQ)GR6d-Ov&rImVAv7V})8hdTo(p>AeOr zxI)cqFhog5jA!_BCg!j*y_67Mg>^T{fvnPFj1SA6@27Tz)^B*YJ%k zix+B!Fb7gQkAwfK4LMR0SByR_Y+W#}usxq$pMS~Ra7kepp0~Df>fuAWzW@j~)4SAb zV3{*IFJV2`2&e2wqP+YTYu8-Mdr2q2TzGRA#yWGL$LISs&2Bq(YKsQ!w!RSE)}@s( zgtz@wh-`Y6J(ybC%K;_(Q!i4pkN=xG(jh)G+EW2NXRzC-FrvR_|4oh`&F^kA#UY|7 zpexAD|J`hr#LcQJ%!_BO&9^$8e4hP8R`0=>unSetGX(AXwAASE>(iM>D~(epPdM1$ zFHKak%YV?)-_o39LZ-$jo2JI(!0#O$8CZXJf>t2q*=%6mY+q#VBnp34s@i{q82x(J zWnx55>b1gab9Om<2z42zRz)+ry2lgWB=izeP>df~9CFo;1BBzzF@a6KIJ-4tjs^Sx zwpSYHYZ?am+Y*cfY-%P42!=828Hqtj0&4rbM+sQLV92k?du4VR&>JmEx=&vUpX#}q z((&@>A;F`%id(u}eZNMuHy4zD>mOF3P3vkGjy zXLafBh=!l9TiKS?ZM3~Rl{1xP{s@pav9U2d(5cZvFp%fJlM{aMT>z3XwG=^Eb!z+r zOVg)y^K?-$*mqxv8(V-nn(V;!U|XIus<@&YpzHuMH2&WM^2gGRIWJ8}{4kb|2 zl%Iv;xSIJfT3t`MqT7XUb>(#$g=Y@C(uzg4W*%R-{}7deCdLD`cjUv3s}c8Knyc>g zH`9s;lX7#(F->Hk$0V{8Ddm@?0!q{3f7QGeDf9ecB*hiiKO@d$WC4;$6UkLmPr zFU^W-nw&koKR>FOo?+jS5Ex|L+@@rBOzYPL))I6#{A)Ga(JxZ?kTl$<9<$ZGwSmdKh=W;2DTC>8}I|kN~g{YYj<_8 z`^X>8-=l3O~l*hPrE;Qs6cQ$ad8rZTw^tg(wf9LUzCb{C* zxA6Lqz*kvs$U3c4HTqVdGwTKW7 zvqv&0oLTsXu-yy~EWH_HpRm3_Og7^1h@?s2L-WFbdz3P#;M~(ZDZ5d|4EqW2D;~|` zrDbF6+DOaVK<8dNiCty=!Hpt@t^d7oz&7EQ6k!K6*0Hwo`RnScKH~RBmuXX=w41cF zN%!ZXEm9#O>+LbRm2Urltmx-M66kZ&>+fuuq%2=dSI-G5Lk72C3au2ft5=QExyxA& z`}23w`x0&Y4GofGRnPk;Un<^V9eWcan&A)#d0K?L7C-M|9qqjiR|63SLG+~!7;2M` z>J4_Z9Rvy)3u{hMxzaKKJ~To8JP#y*t+v&nya*jj&Q28P`7}wKrxbRo$4J^Y^<6Bz z!4A}}nC`nGP$vH`@{2p68|YDdZQC2H2~lRi(M6GIxC&|mK)2?3ukp2v z(TC_WT2oTQEiZ2z+Uv2e`~<%p%t1K#`>gxdg7UHWRLtUq^S;h>JPu3CX*9t0S~8Km z?T$c;8G_SESbu7XE3KyZuj6}5qVGZ5Z1SDe0u1CMU+nVKchm@h)ymBa6QA@Guwszf z@ZvvGC`KxkrJ3J5<_2#h53a^~+<6#1qVv_`k@zk?>0Y8H7qxntnbm5f%iQ)f@Qd11 z*u2q)>u7xuJed|S==CK@^~Q;!1eny>%Zg3x3%xCNxoLiZ1If6liIb4q2VH)_>FWBl zh|>BxVLSaST)rdBBHL5%zz4TEJ-^y`r*}EZfurcGNx(&;?=AvGY%I+b1HHnEum_pD#67Ds0zUe=GE>um_o-&eeni&kL|A}^ZG55k? zxlve9=^EpZQ!TEP+zd($sA}ztiZ@>|Hx2D*A8_ZO(x+eO=}4(E<|1}znp-^aRI$jO z>saRj#6!vI{w1yRas% zXt6(9Z|~)36352D?2F&fH*Ofpt*;b#YdvsAcj?LVijL}O-$et%4JF8!h8K0O_inB> zU?ty6IwNhl+eR{j(sn&ds(s6|&H$r+m|j54DA{#hQHvrs=27u%#mRAs1ZP&BiJ{f6 z2qg`4sxLQp3rT4?TEec@A61Uiz#sGaVxd)hIxQ)s;o#K3caVZa{8~4OTl7OdzGZa$ zD-^$MxuunOhPLtw9T~p&e5PEu^cu2a*Y%V|`Lr|dm(Bc^Pj~DLBjbNu^l*-B*Yn?> z%Z-l*<<&7pj#P8ZLCp2!8J)45EB3lJQSWAJGy6aVQv&kXyH#i!vvJ4hDsUHTV!&Fj zZik|QFM<0DY-5Vp1}O!53~=(mB3VOH_RjM#KtVAwk>A5>*m{^Q(q61PaVsiJ7qav% z8bSMBVuHOBt#*W>c`u}pT;$_`Nyv59bMBJ5SoxiDu4Ff#^V@lqM;D*KjCViDdX)X6 z$NExGP*~shu6p$;l%T0aL#S$MnX#5g|Amk7vC|KnAStkNg1+KE6)&#&4!oxeJA z`Cjb{Z6z$LKserqkrZCM87cu{0{hRVmB7{x1H&v(4VVWgfP>e-vc<2F){eYnwqm?K|cz)@VE^-e~O$(gI|EJ%PkYwieA{csuM`fd9DTWx#bTE zZs$H5-hslb2iG`vxjS8f*t`Mo@Tk90>?~Shz|UTByG=d~ zY0|tlu%pgoVrirBO}?%j`cxbe7NR&LZTtbVmlPpo`rHLVovn?P+w)QGa%S5(yFEW& zf7mPX!gKGbpC1_1ebXWCDAkV|PPOH;wKl~MJKpP8Hmk?rFb%m!AP00-HqZy~ z>3G^;&q-%(W3k9F(_>vd{l&!*O%E)OKk2G(Ihx`@RbL6n+l7kXAMtBCDt57AA!)(W zy4azTRpE1anuI7%&+{E&eZNHk{ClaMa@k+(WWc@s@1^bjDT4bvTLO=p**{#rnsY~{ z=%EPqkHz&KewBa4AW|?g{@KKCsAMu$g^qX_f?xFC<$J=1wSg%NaDVG96Fjo7?k(hd z*|==z*iHXeY@>>7>Tsc@zr@%*6~j6+|C1V@tfOPn9R?lEf7#7``xG;!lixa_#tOZ; zNa>RP+<(SjJeh9)>ZyiOy!Qb`@`d?>KF%F)z2$;qx9EDhpX)rn(^US|J|ip~eR{Ng z!>)ODtq#ozFNTmPCM$(jCOBMM&%i~j5e1>UsG+H9$DnJK8@DRxfS~zmwl~H5&a%(@ zdeLRFbjv>)?K->sLo`J>BYub)WOb(3iXv(vAkd7RpH2N#@hc{z=SO&y=NzVdOK;DAS}cISm0DuV0S2u%oNWjn0mkeV`Eg%6G?r3;-7tw2 z{Eg1$NE=qH*c&(hjtT$zN=A_VXV}hihz&5q&e>z2lC(l|co2Be;w(VNJuwJjG#N=- z(!=2`Jb1t6_>rDyL=*`IX#1le!#R`O;*h9)CpGW!QnYBK5aRR)=##;S1UA53aFpyA z47~u_^`gSDJSWT2e2|N+Nn%I=cH{((qcPJC-!S1{1XA=wn5bP z&*NcW7{5xTLi>OgcuD4o2TkF*a*tU6s5eXUc2hU`dc=&&aU3Y;7X`>;(?S8Z$K0R8 z4=l~k;DPtnFxz?Yr$C1f8XzvWF9DWTAZz3dFU*=f4@{UzmM!wf(JJ2WxYO^q|BJ!* zE*=IFt`xooXwBQ9%7j&6VEz0iO;&WUWVJAW;IjYmyV1uPY?D5yZ?yFX>mT66IM}!W z&hu)+B+HH6#UR!W3}!tYR5LNs0ijzLkbW@EVbWyACLec8@V`xtOh+Df`w(b*3dX|u z0%vp*q_DGc@JbwSKj8I&d*VT@VK#E6710RhNe$+1!vi+fqz;zn34`3b z@Y1=2=0aDs9Rv6IAOVKLWQ^VJqO`g@1Nf*TOu6S;DQ;Z+(fCpy-?uH`W?%sUQYg}cwrEnUEEmy==xJ^S$keD zX>0J3D5OtI`9K=IHEPdTY9YX8UE~>&u|Ni%BFnp*o!vgTIWmTd5Ub8+c7yd>U@>b5 znDpu94HbRlqQFO^E==-rp3~g0ZSahzQg2ywSwO6A{?%gJoEwX?C1i_;agi$-14=`+ z@fM(O6Tf6$FcopaZpDi+E=+qDLGzEImV&xL-iTpb8ytR;u!YwKlAw`@2Wnv=e$33J zrv6b*D5D~e%x-X`HP#trr!Kx?L?mKFdJX<$ZDdafnLAoKKFm?;4GdQ$uVSze$3jbcC0|-eu&~w%{<`X zFacccYm59%>gbUbhg!x-y{g9{ghiz2sQ0J5x%+W}x8LSw>9qffvP-E={WhK0S9SI1 zIz~k^_xpQWCDQ|m3W%|e-Nq9EnEurL2@OfF2JR6%(G7lX9hiMQ(1Y+n{dFMnW6YU0 z!dKpCyWY>oY@r|Eov?dvN5kY9cUzpXG|zTF$resKhnUm9ja6g92PQW{2MrzjR97nKz2Rr8Er z7VJqU$lah>={dbKU+hRMR!hD^MU6*I={NsN+6&BpNon$j9Pzm(AiBkK%99LV5UDdK z>h5ITf;{%SUDa!0oMFRy z8wHZ@4E@zh{WyuG#gOLaNq%sS5rHRQ8;aVv3vdbmwU3NTK$cGhiQ&lsuOc;oH}4&{ z9Lw0iLdT3Nz@LIi3>b8H0Xw_s&k+Y7LH_MZR*Tr#aRg@@Z&xUMBNV8;o!AdB$PKYk zyEJON$p^~=cNqA~-DKK%nLvFt5g6NehdO!h>R7$NNi++s`qdT#B}^`#G9W=a<=OZT~lV z=fA9D^w;?Ookwdtere6h{aH4e?DxPuRT~7j>B^htw+EyLJ@`|nfbU=F>TO%QLX@m2omV!G7+b9^`d=O6Arg=YVJbBD=I)$e}Gj%PpLuk6{q6mKVPG;{QUEU&X4 z*pfqtk03~q4?cV!%h_mMhcjPoVMVxg%Rb%})!LR)1#L|`U>wmreXAoQf z=->6V=w;;8pZqKr0suE97-NnX-+%QCyIo7mA6B*cAJF#LBLI7?E6X_Y;Mo7#S^fKr z0~E}#SX$R04lnUqYDjB;4EZ{MfNBEqM+^W)O?g0zYlUZBmHg|5{QDLD7rxrkjkamV zh%*qlV8Sj)|8yVc1TA*Yz*Vme(O7T6$d|bzxO-vIW*0<`MC>g!QQ6^CA8!z2R(D`D p(68#wFISUu@(1G`&ipk(;-3c7fB*P5XYc>g>EYWP{vUC&KH&fW literal 0 HcmV?d00001 diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/RIKKYOAI_main.png b/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/RIKKYOAI_main.png new file mode 100644 index 0000000000000000000000000000000000000000..11379407080366c527bd61d2bf362e3dc62c8238 GIT binary patch literal 99021 zcmeFZ`9IYA`#=6f!c?@FC~Kt>LaQxnNmBN;ETfJj$-XZ$krbhnRI+5r&a@~?7!*a> zx5%D-&(4hbT+gS@d4GTU{spgdyPeZHWuDK+<67?5{kmV*ocr3EYFlu-aR`EJQ9pM^ z7eQFX5QOC_>qhvUxJh|u_=U~koC*9Yz7PGI!6RAG4M7AE^)t%)9xul`OK)8sdcLto z;;n&gruvtrROa_e2|EPMIp1fox2tv>%0xy)WKuV~M1H?uHOF{kieoEFPuM0oBfwgyb zp9TJBf&W?He-`+k1^#D&|5@OF7Wkh9{(mhXJ+C~v{sWXhRqw9I?Y(5=aYJM?4d!PYou*Vk&j_UZn;%Cysn z*s&cuu}2cr2Tb1JQvw6tCHuci4)7G_p}glkYofnY+Z8{1z;P!jIfcCb?kz6Yn#;bm zYW2N;Nq&3)FH|3XPhf9fywvcVUJygild^SkFBX5SahoQNjU&jyJtEM5*^zr&x7z3E zu6XBXCvgaujxqKyl1!xamv49OJk~8sl#Y3UymaRrCli7Cv?UQMTrRMW2&%y%83k>m& zfB&!t_b2nZs>ykeLhTHy(-yVIVAH&s8Yt6NIh?KT!+d6kNgUH-nt_=cz9_?gdfiDJSx_itgU z9+Ip|DD&&%MTFwlKTOg6_AUWG5FGS4LPgX@&{$J#+l;S$ooB2x3&M5?x;*%+&Z9h{ z?AFVtoFtcpeSIeObu#DqHjv)`eZ}?6Q=)GB`f^?+A+zxURyw_cu3M48fWJo$jj;Zr z2_Mb$Ce4NI?JESoA4UH>UF6GT1QW zZ4&wMNw(>IYC1ddT}kYW6sqpJrwOk%ryQcnSKY@MDp06L3>Zk~|GhXXTpS{;O`VsS z`-eU=av7^{SeY<9v!@BCh|KA+Fc6OXwbR3w35;SdY4<#?F>gRx|9VI1 zXfTfK5SAb&GM)k_@B8;ngPeH69=@}muZjvH&vphfqfbY!oz-F3*`oZ!iW&Xg-G6`g zMwGRx=-$|~pS$-ZL`t(uK94F=9x{QHJRbhnin)-u5_`?>!21&p7AadRy0RiK)c+p& z(r=SDJFIjpht>N7GQ-5Q#VZ8-*7x z#^Sa1^>vWf7%r3D~$K7aqz^l~h|Lr2p9wQ^no-28PWLdpE=JBP!bhNr@v z(izEl3xFYyoAw~a;{Sf|n?rh8#=SPp80Ng4R?J9*u{s}S0n(RaFVm+>US%R7lh5BW zB5BXo-KJ!P2DyxZ25U|4(rz^$Uea%&o)Wvu^_3kVDpqD`o$RZq$jW>3W~ZZ%&q7=2G#8?D_^zm+ zc~IHW zs0Q}R5N|P%Ms_{Lcb(05!(G2igIpGZK;s?=XoBQ8m(e|`z;V^w@_+~)zc)P+UnB5s za;;;EJtuZEBDl9UctW@um(^rKO7m>wap6y4XF#})fRza9V5JGjrKKo6?RPvpwlg|@ z_Ohyh_XMKwd#e;HgLbemA>0t)2ltu>y=6SEuDJJtAwtMTk1Y1euWo79m;aO}yop5J2vXFc_g>_jLsRk*rykU7R+YE=|C$wH{J)jk{c z3D;eR$Vi-TE;253x2BekaDt%?6xa~Sq)a4qJ`XEk2uVcY9K<(i1q%p|ZH7upSuK;D z_gjn>uUOe#kfN@P35XZWCKl4yyy^ZpBvq(3ct|JvKGTZjOcyLfs9N*3*+@v zzrqgG_2%fIGor_4-jz^AZr#i@^SNGtzFrGnkoN>EY*->Sd`cbOc_10?TBlV0wyZ-~ zk+R{2(`xCp_r(S7Gq-KH@D;y2d1oBTq7l;m$JT)=$BsAZobid3-GG<|LJTXFgJCD` zdGKw6XjHmtR=}9BHqVimt*y<7h(EFZVs4CQxi}9{1hs?_$yPxxr+n8c@Z+}n^D)dX zMk^{VQO6}pf}4NLFP6xZCzRc@1!N~Zf*&!0d*ynx@VKm?p=;p#P!^D*vEJU@J>g|$ zd?s?jq|-E9uKU!z$lPt6Q#LuoRC9gl)t($FdjDcq<_4ro$B=Z^``;LBeG`FxzO~if zeX;Bx=G101NoPxF)vH#4{r%4)@nLFa&TGng@N)Mj`1pGEeSPWxy$)uEDiD8pmL{dq z5496R6@7P)mCy#3S*-KcEWe)#6!4ZV&0s*bJhlGfwj{&wV%+iHt7@QOcl(g2677;j zXGjOfNiBhAmO8yT(#URQ^yih~=Zg$mZdJtC^8*B} zj07~A(cf4&f8M(PAT4c{kFWt2@AUW^FujZH#7oP@?W0j_H_tY{01Zx4NfwGzRt>|*U!W-@4Jvy;Cf)*C7Q4y8jwUqSgvSi zKv9IGXGK!WS<;vgMAE#Zvop+z%TCmF=k)}LvAo%Nnz2b?QmuSoLfKc_*GBr%-a~59 z-o)v<2aw~Yv3NP1wM^uAD^Sf@Y4kbO=TS0jdclNY)F3U>f%kDFuHu40p00`I_n52Z zpPw)z*Yi$^e%RONI?Ah*mFbVf&!Eg?5b~=j!^z}Tx_SuCpc)rxLH|XZ&?oLi$k$Oq zn18gU7FTz(+k}}xsnP7{EGI(ZT$#8f3b#BN^%VbFUqA0pkr7q~Xp9Jfp$NAlGJb^J zE1;$t%3=I1v>ZC0g+rbAkSU2>TUI1{nS>gtDzbjn7 z#&cU0PCFzrH@bf1PuF}EuCj7pHFITR_O9g~W_LqBTCn@s&yKvSg&UCF;EV}jBW2`= z8|Sv}WC*f%C?S(P2!IAWKqH3f#dWKRYrmFP{Gd|CDM*(F(R1-=)Ey8N`RwTFkaQLAnhQ4(LRb;*aT{S5?_c&nc#67YJCKAz%lD#9ujdD>txPP-IOq~0JHKN#a92~~ zUwE|_^ZH6H+^Poe%-F$dGBLi#c3YbRjRARB zkwLcLzwrhN-}_=}W_vHd<+Fe0PdCeT^JN+9OVgu+9y2o{gUbJR|FAL_X5{HJhyuz3 zC}&)~Kh#pcj1GF`zAbzI-}Pe%y5+0Z_(X(I3ha|_n*?O-wZTV+ObvricrZ)5C``yjg z;P9H46jEh?*AcgpuKF1W^*WjkZx7vdxC-MIW+} zQ`~p)KFs(`OJ)n@Kd?>P`v-xGFMJ$tq_73o=a@V)F|L7U`I4s6-M~oRJa%r ziioAq26#poJfk>?MzKP8M($s3)nB1%>}ciUrxONjsR6E6qh|Z1cNsoRYVTgFrk`u& zf~O&gf+Q7hQ5PWInY$TLQA1`#|MCfSHMrKv`s9gfFS?fD!E6ameKKZ*yOx2xwQ-w8 zgzBxJF==)XOyezJMKs*~8svSr;olI$EvWHGye{ck5lJ8S&+Pb4VyUHm{y5gK@a0Tl zni>AB2To+}Y2ilv(U!v4I9kEzLMQw@#{+)^H(@ypmM~V0dKU!;q4XG2pK$hk`5}-w zToq)0{)H1=VXB^^MLtL)D^Y;=WQ|m+WZTq^1xNy=E^cN9&aUWJZ~+2-8`^{$Yp1jq zh}uZF@KFwU{YkcVnJA2nR!kMl55)gr7umeHnNy#;^YNpt2y*9(Fl&5X`@zogcXg_O z0>Rt>Xb734nQIW`!dbvh*kRZC%W$O9{Ja2ds{{4)FM7DIR#fiYWXGFR>5>JR)Q`kz zfnArG5G4e?#8g%t7=WCPj<%8SX?k{%LPdP&Q_R|nGZ$%}D>?1LPXun_)*n9XzD4Ap zOm}%=alFcQGk_1)0G+K$jxljA90GU684;3!DgjPH@Uz$GONnIQxJ4zXYW*s3b1Y5H z@Q=KzUCL`n-Z{ptFByctmeHXFZ}q&c|6=hoT<0r)pg367z)i-R4USRanx5s|S0T{z zMGs;tMZqs)F?9ZQab>(edXtJ|@tD0g6)Y8@wGd0!v1t+FyOYhNq-}o}d&XJx5EPx1 z_Vkbvmkpmh`dUtOE|pfjV8%a)mU}57^MFXU7e2Bb&TDm*bA$FxYp|r|@Lq6%)q^|m za)-LK59dci{gIy1AGIY(8b+J*N2S?$47oLZS&y%k%I@vI(pYLVP(q_sK6ncpm2G$A zpxL&a8-gd-Ejfx=GEZjW{4aRg`YyoJ&p#9>0X)Qq|;$+?%juj*9o6 z_tDU{^eEQxA}3CgA-$iSPHYeh$-aps1!3{gjKzoKsQzX5{S$%NOl@bhisv3=`?fm2 zs|8;mw{DFM4q3b?rZf%*bvZ?aZIp3Wd`NZC|N5by4BHG2O{vEh&(!Tg`UQcn>;ck( zJbd+Ebm&DY&?Cg{QmFNx_ooWVbhg#Xr}lXHbtwmSy>5wgfrzF&KAUW^UD+Cu=Qs_S zOOYjDDgWWB`T&G&^zh0e(b(u#_e&p2ZiavO3`a|-Zzz%M#2s00acL#G+uPJl8vNMS zi6^=tg7UH>{INl3jJYadF?LroH&>!PpV9o3xWAGH&}IbEm$o;JkZZtAUQZFs?Qo+t)*q5+P89MXcA7t#2>vLM!Tal~0- zq+#Kyonx07{;WT)V!mZ|mR)4-JB-5Uey z9UrqtD!Mbin?G?^3+#;*N3C`CQk&Ed;>dU)8a)IC@R@iTf8J&!Bk=#;Er{aNem*n{ z^raLR>qj^WelWD=xYrGk1t1{&SY?2#2hj!Pm{An=56c0TN>EchP$!H4{qG8XLY3u=fiIjoAC8m`O>%uFvVv z^Ruzs`&kH_s3V>+p%p{hBj(0f?DYYh#|%8oYvtX?g8~kDZ|rd*n-^TzM2u@cZouCi z;*r^~>AQxR>;tPMegUnBS~oy z>2^>BCVl%Ge3hJrQwhI*&HD)O;s2OVw(IQS;{yvh)TJJ;j#yxdVj#?;5lND%hmITJ z{CPXigvG;gVBwfE~``YTU<3g6rO8nEHjc-Hot)k09s z9JQhdw>{Hx$YsBa(ZWHv3)j$*Ju_5#bCv`1}n}uD3l}%lSIK^6^h(LyCv85 zgkRDi&UU0&^+RfK)|;s(!%rKl|3#O5XqpB8Hm0iX>-mzYv-Os8U?~@R=?!?X)t1mg z21Hw!Hi?r6%4VWec^$;akQ~b8NGikO{%xDgSjcIwL;WRL@MlXTv!%pxH{wly%deGM z%?13n)&FwvS1{y*WZ0SET}lPBk0&kVT($w7l0)efq5lYuaeB>l?tP??<42bX@xaQOM0%&H2pdtL5gd&A7<5^`Sb{&n!Cwdn*;Qw?xtLL`x zX)+-3!eIcLgk&roc}+0~k+gt(Pz#n5xe^7x_X}iN2KHX!nO$Ou(!LGFK>Stk{*9t* zD<3t-*i#q~a{cbeYi3j#M#{}~|FB{H2?ZuZ$r+0)%7Ut@_lsV8VV^SdRWe05tbd&B zKNL-WUd>m|QIorSvBtQ{7?MVu_l`J1Iq&V#Ab*O(46rF0JA2zdaF$a)3-Yrv;`~~= zP&6ahDG>mOYo)5D2I<2pv&(-P7|QZoDm{)O2v1hG*((g0CLGZqYvWu`zaFMUh}Bzb z!)kat`yb|w#K7JbxVtF)--(G^uKz0+HXU1Z&(IwK#E)xMqPIkCyNGKXgPWr z+c-Z%sEW^uDOS{g{?v`mYRnvFxQ*4$ozA&u4W7_fr0Dhq4G4nE(y^Hg*(yN#Df=H0 zJcJqLBJe57W3-^nSzDYV){0E(jEg1Mvd!RC(C2(%&X>Eqml*Q9gWN%uj$n6QBLq2}p*mAjFpn^*yc)qn{ zIqE?mj5!GX=gYQj%?pi6SI6@GldGp-}x5AMxQ9z&LyL^ zc>n&`U0)gisI`S$j1a=SAr^pV&=VlGA*j{CXp4Y#Gf4o?jELHL9^J=6#?=jLiwJsV-M%{Y3oUcZ4~WycV+7A)Z`qpkdn3^Q3vU2Z zlV#=pZQu@IRG>u{ST^cwDb&|`#Kiz8$X%=K-1^*NwXRx~ty7n52c6&-^fz+kc)>vJ zum11tX@8l4CYpW#1p?|sikfz5V0gTLT~xT+li3hK$lElU%zo02FNXF zr3@)C-SFvW!J91GIIo31|MH~(>9Bw>qNK$9iM2g*skk8%zGDK1Y4i5f{jZo2%IgF; zIoAX*YEP6=x9MXJFOb(yPDSJPKhrgcxHRc7W=hcIu<2Y><8WOgb!dV zDfF~EGvZ7@IT1frA#D@&>?SnQS;U#zP$~?`(mlkBwnyX9vWHIu;lD?>M|q7QmJ&(} zJGm>8xrU~sIYEA^8Vka+Dls`Uv!j(R`fsHZL)-rboncwhMYmZsg+6FiKGeM;eUcNA zp3FDQ9Ym|$G8!j09{_+qp8JsG)T4`JM!kaqa1bp3H;-aC-vGh*wI2V06knmQc;~N z5`AN=;sXTq&+o4J;eNw*FQ#-;#F-N^%AU;bFtY8Fuz{zfD>C-k9Wi95Q29}|NV=$o zvSxC_N}^yGT7Td2%)d>T$Vv;Bb2T9Uf!OvbZb@Ofqw%ICS+7RR)2q`Bup}cHKv8`( znD~^z_kt1rM$4^~a^w66`X>IxhS0$)g^1@-W^Gm^mD8<2d8Ab z(y`%BTO49VjL!@ud3*4q`oKv(l`vBWqcUqj`j>n+b|lM`4&A_^!Uh>xAJAF4{F)ZD-`n{=ndPA zzCZ4q-zd$T)tZX4fO@DE_sMu=Fo(u+YX#14p4jj!3N!Loj($N%21d+RkwCE~gFS)p zp-|bL63gdTyTnqV_+9&P(~(#lLeV_|Ergfy5RH^!~3#|?T;d^saLLACW;rFP5WOur%1&eHJsBFeHB!q?X9Wx_PmZ)@Un!m4!iR4d=hkEf1sDqP#fCKS z_E!rz6f!xPG5Z9E31OX3x~Rl23<#9b-YI`F|5l=iX#j51OGQ^|z?Q6B21GOkT&+)a z#*qZpi$((07#bf!8yi0r-1KPsqI?{mp+o+Z_%fS_bj;M-r!yQ2vscyAeP@4^2p3KN z;fRqtwF5yUxEUA->g{Ml7cac-=O0jzNf#HI+|w+2iabhfoMYAEOtX7S$I3f$0DKEs){gY2S)Y{ zg>==9%a1uccBc?99=^!P`?Me|eR`X|gQu4ty>n7}d;_9%Yh|Bc7~1JGD_NexGD=ki z?RhhUuVOUuZ#qrQ!jo{KZPDmI?_{5tiZ6RGE6;-8ILa=^h&&8gW4-rL)rc9<0J7u5 zSpaQJ@EtTZ1@oYFK6vXcgZae={sd}3+$FQ%T=5tuztKkDPewHPlI02u$(@V{`S=48 zvi01$leeI(MM)ctml0u9qAL7Tw#AP4+qcNRB~7DLHZyRqb=?H;47x--|YwE z4Bi)=@P5>vdiPhQVrEBbFetdkYAnthm(pk{)r^Rs)ctK+S8r=DAnIPcs018*4<*jV z1)8}hmJW#^u{S&0T5_j+{TB9CqSV#Qeyy;}M{3f}e3V!JtIVAv`%)!m?rD%ID;vKr zu1!gwf|8O3v7-6q>mDQ{k#GHxn2YZ)9f{sKePdAgUfn)CH=RKA%A4V6T*#C=#eHz^(-`Q?G?b1)bpw}w!saS^e zd0Deq2VJBh^CslkW#CX6UYDt}8z`eT7<4CGL4_XUlW0ExA?Co_OekKS`m=+^Unufh zA-ya&ql|G#i;qrF=$>wO@}rk{&P|Q6LsKKD4r+s|`V{K&<~fXzapEy_IUaMv*5dsR zmQP-zatf&cF?o4gR|bh)Qr%`S2EaS9TjoKFBYSyiIGx~KGRh8xI{s|3r&dE6_a+vC zY77XJf!Uz{{ykbS+;ThU15#2yTi(=JgGdm1OMq0$F+4cgzFKHUe#hV7E&gQ%3=fwNFEhhWG9yqun4-$p1exR*UAahNe>>5YJEp@Z2+)^0B}G z1sW<|f-@nuS#e!>Vr z4@EZhlq+*Qr1?b*Vi7b|dMdFBxCj+c3HXKGV&u@$H}BO;T4wlh=C7t?*EG;Jr`TCz zyXeYU&SAuJdaD%@beR|zBwh*Z|Hg}+T|1;my0{)k57!C$&8M2Vu13`N_#S|`b zhk|NYB>_bijDKnMAuzk2AwHo*hw+w&^d*)`6ShsUCE99%rCrZ$c^6(LUOqdx5JIz0 zz6EL=ISi#Bx<(LQH=km)R*3_>`NI^Zdj6f$B2cWG+k;+9pt|_@1o`-)4nAA?zx4TQ zM}?2^D#Th&xrHsztiA@;sNrQql~W-7n8i$~#DNwMC2@@Op-34$*=`-HHI?1t(r@vA z2vu`7RBO7bD|mA&63j=@$=1k| z*%XiiR*U|eSbJ5HGc?1IRy-@UtM$?QWn=fFFk{M$C4it7^~vF$&fMBl zie9ClpjGNHU_5Kn%MxvEOS1dm?jZRg?&hs~Qy;cPKOaQMdr;k^Xxmnw4@_xelV5(rF<1P&|LzChtjpMlW@_|<%WXKHfn?lNok6LU*6gJ%<+%Jw z8*@1ynjCvABa)d4;A&JQPsNHjc||h}cCe?FgJ$Nx8SL{^-tW)U6*6yW}s64Rjlnx&n_k!(V$@@+2F_ z-%W@4K~=k$)znX53zQcyJw2g5b;DJ$ynCx%2gC0vO#7h-W4;lGcs}5_ebe--uX(o1*rP3P|Th$!)0(QC`7raHAF`&Ft#!h zX5V8zI*YZ3vmi|%+8eY0L#-tURHl9bl`vesv60l|W`q@L6TgNmyICbCpL!t#UeI3N zErP`VW?7WfYLpivV=2&L-jCKJxoi22GZj#8XN+*H?3O2(P1?={poK~=0H-19ORVZ%N+uc=(`psn2R(MfrXoaGe9L%C*@78h*lHKF}hSU2zi zGfadu7Qk?505V)vvcYW6(~gV2{adcj>ITWj{C`L>fMsd%^N!j#DRx=&^R?zt(r<5_ z+6CK?D3i+IG4v1`dE9S__Ix|(7Z@IvKuf(ob)yKOtQX-@yY%DaTgusds59Q3cpuuC z5+Dj;xRwF&^;)sCHl(mtT0K@fo+`O^abnDZ=AO(!pBt*tivuS00v;SO_rY+nf=+`c z7aa=c6MMsse3`-IL&5c8q>h%ecF@qtGbo3keeRx-UEAZFucyZR&68RVjB6qtV_c>M zoSPSp=S)OB%#Ev^;dq|Y{Yz)h1hwGw30NUm@9qp|k1d-vOui?pC|-Z_@w-35S%FT! z5RwZjd61gZKSVH}tZLTsIaeQfxZ?`6tKUz->`tK*ASpwpQt_Wj8wCr$3dMhTpF6IF zz1fS90MnecipXd&I3x>{;d-s{yMwX?4Abkh^?&v2`-1+C$(O($uN@u@e=E4OkrK2X@GcnrKL!$q?|P3Y@vhMSs!<^H zF{m|$ym!+H<{4z{zaA8?g%Axt=4?d75(EsTn{V93*8r=WsmqH6>C?v(>jfk+nL*M3 zB~I^mBDJsGj3iSycmr??Cfkm7XSuP7B)wET0TW)svfZE~*M9yt9#3oUNqndJawXKW zyI^J)LSmki+wPM4!i=qbG!QTQ^b4)$OKfW}!r1}^SO+aml=ZW}`xU8=gTctihg&zb z+>9K?Qy)&Le9KP+Zk}R3Ur(c0t1(wED1Zydx4Ob`hgK)0p?Q!DI-8uIV@~$K)6Hn1 z5R6fLaqm)^eGVfn>v90$BnkzBa1W7?51ABQ)799avxTC+p`XWzoXZcwAN6}deAMqO zKqGV>d*cq3c`bG!S0>!=>kze|oE<^b6wPh;zZ4N>(q*FNtb(>}Rj1}NktDD&*B1;6 z8hP)p}bwu%XK88n40(NhLuO3Ej-d*?S%_vl(K@7DZ0@d3&%viKHv=*=Ccm1&V z3kTjGPw_G$&|I##1oi*OsjiH@?|bZH+*PfPOJgvAxb^#-;*>#W-agm@*%WzAM%PEg zMk`n)>R9%^k;k)l@SAfzWx*`i{}*5d)JRG+Hg*B_K{$yu#>dqFu7e3 z4i1U&jQf>(Tq9z^DU$_>z#3=dC)PNhg=V#2>6-9 zJh_lN_?3mv+_K2pryXej`7ji1X}R4YrsG+~AkQikpLs$q_!i{Ma2NvL{Q9fWra45$GKg5uL6IDO)%!%L@Vg}zcXl#<*;&&JIU*TU#LdD$Nx{6NbQ054 zUZJD^G~T?;@msIC8cBBVTllGCd3#nbXJy#0=S`x&jdvU$-pTkQ@JFAf=4e}X#9=zl z$o%k^qWH3|;iaH+kNWDa3d%+ldw~9a*(DLs_3@d!EhGs{9y5d0(-Ro=5LAbii~X3= z5KJ(1XNFOXce*kf!WD{QPM`7<{d2?_-dL{66dZ4>m2%T3hY_b`MD}%P#zr$JZKHAx z7*$7THuFRZ+W0J;Zla$!7Zn&3;Wo=EI#+MTK+?kGXoLsz{_R50mdTtpo%lZjG(k@` zk-Sr#AcTM6RSx{MFg9glQFz1*AK~3H+s%obZ0(jPxfGJzcuqvUD11hEW_2ky>AkH? zA+bUJQ&j6Uv}HLX%!Hisz<`Bt8lzFe*%U2l1`WwBvr=mLN=?6j(nc%&#PcX=OJ_#vN4 zFfV)rdSZ4N)Gn2a@UrywA(28!NGL5nW&D zN{BI$9{=aYAww4-8;f90)tS$gBXctx{o%j)jfPXX45e$G^-%G_O5LT0p{Eg(c#!pp zY5){IRmb_lxJ^8U(F~dVOL@!q6URU{|nzyb`O01@|LjNue5)m{^4q< z7YAywUw&cocD8)}xLq-a`XWV-`>!8s*=QmCAV5h3b7%gry6vRBMDo@HdI@5$IS zVj1|&wmt}SBd;QaV< zJA0#BJODG1x3aA8B~jr zvgIo|4L6DELx}JP4E#tum{A**LEui{b~A$ZmMmV&El>VBGdRQHIVttQoeTuV-U5jB zvclrKPxK=3O&mk18bBsDIqO@Sr;zXHwnoam^?& zunc!>A=dX?#lJ=4rvVg=TJvKw3%`AfGd^|GLwwx_53?aH^?u8_FOu}sbAQhZzcA%V zrUnRF{rp;ReSER2HJ}DJ(-z6*{*=!U*qrN(^;TpJn@S_U(2^gogw3|9LGum1(q>lr zxwobm6*5Y#GVg?Enj~q&U=rrq@27xNoy+f+X873v7sf3Ai@@r*gXX9R#<(#!IaZmU%m`J&^-A^qF zxzsBIrdZ6j9W7_pQ#4lF$Es6RAUsg`PgG#BvwGp4G8`bakPY$Y5TkI?x{KHIW}JNJg7;Yv-1ByhyT$k6o#Yo*d51<#uk_u2sEnuKx^23zRksPKPLod zzDQCekN)Ysa{RMLRG@I|X8AMcOIlj*CuspWmaB-0_*kR_FMRr+9z*F|Pnu*IP((?| zUnjF4I%_MxWx?}?yW^1GMb=lkLC${GddZ@se<|owt$S}j?c+q@zL8fn2gI0zp6a9> zjE_5?u1X^rC53e3H5`w-UHs-6p!WJchO;-@-<@3y*{^DMXr)iUUEt2A`)b#BQ;x2; zVS}eZXao%s6#e?Uc;gqKldy`kweJ3SFtx|J)(1vNKSrTjCdgA0tCTN-j&VX=w`q(- zSo!CI5c^%ryMB4E+z%W6nxbsAZ7HaKc&4wtTyat%?;WN3vN7n=w$oh*$zr{a94tqb z$>3f=Z#L4YGaZ-P#lxkJIS$k9&191&(AftV85Lvf$sn+{PT9cvbL!nsi|>o+--<%L z&x_ETd*hzi1tOHu)yl|#OG5LqqDh-c+`C%0rkL`WM145zlzIC2k=<8(O6qOvY3>C< z%H=p;Xo$&TSfM8^qjif=4sh`T_@CI1<|(RN@Q07vjnG2eKRByfAd>gX=i?;6Vr#tr z+mjkU0grbB_rj9scX127=s|kNCQ!!b!$nzEfrRN7ux>FG!s(W?91@7usVWm55D*%q zd6jf}Hu*Y83>?}s4m-6Y?RJHKfadGMK5g2ou#C zHF2Z07(IhzLg|^f&nLS0swR;p_aOlxxY8CQTPIb6!;|L(l^f>RS?}A{LoC#)-Vs%huiC7CuWyE_L(?EfG=I^0$Ee{vFK2(Ky;vwE z-rvTI5{4^YxG)~=4ReN;l@-1KoM+rrS>@`~TlRGJ)Xh=yKWw*55uGd)6IDjpYY|e86B$S`)L^*Qol7qHp0#5P-fjnF5u~b_2`-%x=#y-L3!KFY_!2w zwH(c+EU|Pf!1~8K>u2c5=+~pUy5672*6(yvnba@P!P_LrsU$F!O~0s6;NOwB=UR5O zljEhRz_qtQ^r4>ta^gAG_hHju`>2UZc8hwJGOdT4fNBrChdMTy7}G6a_xV|Aec%|b|OoR=&7t5f4aXJz9bPZx!whZpaq+9 z4dyJcJY!U{`g4wDC#y)3LT6>_#yF|&(;M*{y)U;c!LAPma_&FnN?I+}GFT2;1;xPN zdrf_rVA0?x6xmQAyE#SS0W-(h){6pL9{JqTy>ZUcsWXaxadAF!Gsf;I4ImnHyt0Lk zX#K5sVe5r#s^!?m$501IBiY!TY8+4&= zwk3b@DYmICC=Wx(`t&3p2eA?9<)2YIg}94ke_B~SogtDxqZp9|C#UiCi6msg%xid#;GTTJ`;Obm3t3n&txZY!_56cSz{c)bNGi!z8 zVeCL8@o5IUd0gRgT&OK^x9q+N46Tfyo;n z{Rb51wO|L{)jm8&aWX4MPM!@p<(==O9pSQDEj`r5aF_*`G>4Qsm{sMnT@AT5`Pf4; zqUNg2qJ_JF%L9pnNmzcrTCXNMtta$hZQ9Tj#0T}!^CB=o7Y?>wvvhUhJ@ev0J<~gh z?c+z~#G#{952^8b$KOYaUK^$<+4nI2qWm%`dzR5#P`eQ5B$- z{#&g@ko=IB>-ZmUdQ^z3PSr|?i%C2Gw_|r(y}DhljMUKm7FMc)@Vg@q5cwI%`mks* zX%90k266Oz4ao1o?4@dDf=#T)@}A7BBQhu#irm>Ox*l@^<753q z(F(7AY0XWd=U{i8%vfxykg6^lx?)zf(Y-jrostlu$&wZxp2L&>)M?LrzD?_1SD}gQ z)5fptwWyvOD1X*X@kbdgtXt})vZqn$d~g1OS#6EEQ%m0S%TR-? z>Sb6cPU}qak22Z_xA3CuN@F5hvj1Rh)OJIMQ z0PNf6=ubxO9nlY7NIWBKDL659oyst;vN3I};#qd6d7iV+2k7`wsRY)kSx73L7`h88 zqG@ex&w|Pq$1k_$41crtF6ltmKz93d`Ecxl5uBY#|2{O!0>c*-^FLaa#Pf_6(N~(g)c2;h;QE{lv_j>X?S^frKo`Hh z2!bqQNd98f-KY%&Z*5_tlChu9l>?i|{*eO}A8MngX#LKcut7uaC%>S0|L|F}0ovNZ z#oW6fa&=gg97-i9mbkdl!;jeuGE!hO-zTSKEVFKt=o};Cqiaw{`o2xFHOfkUH{}9y zcX-P5(Ihovo?Y}D_4x*h6Z(CpU57Rzl#SRiR@Ms&DB<$US+y5T<3OMFrQUW_jFM+r zy>#mdd~?j_uimvzsJy&rn##-{XU8gYMD3|B;DO z7~SJ}BDcCMKdF=W(`<^Yf&waEHzH<)^}2!4PgEUaM67!MM?B@&E%NCjbVLcySdl=-<#^RPgD7PfD^S>U5Z{kyr zqtdp37B_*CXM(r@$N>=PC&7mwcQf6|q@?@KCr1ca(c#0#CHE`EOP?p+JaaTx z(&N_gg>77mIiYc{lqq{g&&^Cuxmk2SLqka{1X$nG^%17h8oClo@j@xeKSqkQn^T)0 zeN7i$Ue!N6xFGKB1*idIs<5i*Hz@-^$ao~hdhDnH7h%|b;Pe#Vxj zPnKM#l~3l2>vGya`g#O;+-xd;?|LX)tzn;PJ1yqj4>r-cZHCBJmGu}VVllh}i{bOj z*K!K|$3oj5DyDUnXz;^6*NIGmd_(23_#ms>Z+`vhtv4NUA|5hC7n{YNn1UHjh(RrkH;GN*b@`dy08CsJ~~rZ-aXkx@0+^W!hP?y#DC7- zwFgaLbcva=0;6#9KWH z19v|(^LBFh!w!ID=Py3I>5b{!xZK2b8njr?Q8QHQ1A@#y2(znpeo2nBLRj-A)(6D* zLOH6m+F@m`K#yk?!CtLb4ux&3#oBD^=C6JQZ4&%)r$(ptkM!Ex0soys-z}Vk?f@Z( z-+CcZLZdo1y{pRn$)Z-;WuO2twrb2)c2D1evXGltc4{aH7;Jd0FEwD<2Rg$~FYPZ~ za+l^)Qs&d2?0)Q$DSUpyA>K1P*+=1d>&39U9{o~Ma+!kgt*%Xo_PKRQTM=EZBln?{ zv%6(pv_Q95##gx~y7t=)({t|^wNd{nr%({Ot-AX{iRaAPd?&U^l_D2hbDZeX=h^;T zW8QJL^2Fxikd(}jYvFgFwb}pE8PhFYNFPX(ATXg5<0POp2xrZ*`XYG;y1&-Lf^KXW z`=YlE9vn@HyGSI>aO7uRPEAzlLi<=0n9O11SLEYVAC{!1$->a)kz+I&)z?+BV=DB) z?R$Gw9ba-UOZuKV({3E}$aNp|HpBKO-)1lVh_O&Cq}R12s$Q7RFBJq z4PpbX!*6dV2WQ{3S-l+h+(&TwhXrKj`i|Vxf30Ny82SxKpEZ?`Q7E^ZWn+=6`_)q( z={wVg&u=+!E;uNBu7Tz8*j`ngOleIH+0A8J?%S%e0SWYD+PXe{MTl6Q-iW^Lxrvek zUy8Mz^G{AG(vYM5H?jFY@oi~$WbfM)&#YcFVIB1EH7>D83`y|9m#Hr;=Bn*)yp5df z3aFwv3B{aG-;#atf#tn~CdV!3^G_Dsayov7^I)slgS*dO)2CxB#&+(@ghj7Kp_yG< z1n;a1RqJJ62aRfpg17Jm-L=v#soVAn9Fu>B0HzCmQCIk2+0XYsHdz82gw@OJ%FG?H zu8pP0$#!`63z#T0A5g*dS41$|jQu~p-aH=a{(B!ENo2`VmdKh)2!#~c*LKP>_9dbe zl`UCD_CivjD3Ow~j3vcb#*#hBz9-4PuY)l&zw;X1_xt_%e80c%KlkHt|I^EJIp{qQrU;42tN)m6$au~A!TjO-w z4ct0TQmYlptHMK0*p^aiuxu@FtG23xH$c+Aat|7p@p?mlRpkH55&MRx6g-UP@0HoE zTcn<`JAG8)Yrr;Q!+ANCwj(Ez>}sTUcl=jYqtcDd`Fuls?{S7&>b6^jQi zcu&?q*MW%SeVMIlyM}bD13D13wLWRH)6somBX)ubu+-dOSCen(Qj_(R*~MYOMU}zo zeW}Op2qG>@>e(u}gI@=$g;sxJI9s;i*;7UnaDE}Sf#g@M7`H)lKUc0oBvl~II&nHr zhP;;CSecw(iA@@qdwb){1a#Php8faAkHT0F@qQzUajfCXv+rpt<5Q!>D>^5K1*Z{@ z)=u%ecM$|5J?6tlNj|LIbu$r0psYXit3KJV4XM!8kSp%1(5emAiVx9yDjXs~Ej7R!p0d>g{ls(A3*l~0Y8?WN7Bdd2ul~OP~+7G*Rf0j{A#`Uyfn~=<(d(s0Qo0>_)74W3UgM;)@BhlBy8&An50!T|s z4(wW58g#p{TqWs2Oha{cNXJ=ApH{q-NxGXEtFh|)~KTx#;A^=01QaPOP)wdRM9?sazeD?TYcF%G8oJVE7OcF0Y3U)^QcG_SF%WYx1 ztgFGyZSQ$5$4V*MZss$Nz{qC|GbRBrX@ewwG9sYcXv5bSM#=LT-D})-kr;Gia^PV} z4SnG*fF_FGYm|Zcth{fw|ehm z4p=KnxB%Hy1@AxO)bv00;a^l~&6pm+YfACUVhg0{xKyzJ{BKbOZCU{YXx)@U`$NU{cNS+&yJD8>u^bSn z)EsQP#p&XL5R@blKt0mVpFSg}7W{lB&vfyPm=4pA;?|uloxA&>{WDBG)q1}yTbiZy zBpjfKLE%gr6-VW&LI~c5kT**J-<{`JaP? zK0Y+RbFyE3ST5LRxVrI=7vMD|6*qGWl`$LNh@CsFXmpCp7!AQ$r*aq%jaIFvx=|K< z?#AJ72Ow5exkcf52-x;4Dz?bkhoC2hNM!A-&U2-A;_j4ok-fk3ji_(+KjD~Dk~JPL zus-@XbaVq5*WDxZwkuA47C~j^Ss%uiNl(mt`p!6^9_Vy6KEyOtIPV*}%K(n~M6oV< z2pK#NzT*jfawbc})4dC+dBMPDg(4L$V{2~rd2a@6qx&!2>}YK|GfdErrZZ1neW2WQ zmIWY0k}F!JE1e@OS);S@wQscNbTvGgLCUFIsHCtaoo$GmG*AJx6y+_engFu*+cjYCpCei09EmB1 zyKEB#kAZTY*g}IOa}#*(g${;YjK$Jzc1iFMKE0Y--9Q2m86Fi)e|+%^gIjV?E?c>o zcj{)cyP^NBeW!_~#thui=iN=iYqQ_EoASi(V~ODk)|>e(^v=U{T(4JIG*bqI)K1Y@ z7{W@2CYP@woz`~0s)Wx^+!FoV4Q~yl0^S-U_BZz$RNz3feoY*w zA6r@)z<%*xi@1HWbK(0~*FX|V=$S$z>zP~#xB_%(j@qd;`MBRsw{99PnspBE?|L&m zmiFN7g>z`zIBK+~{Ec>mw4k;l&lkLD5@)L{x-p~eBYPR1jgY~v$dQHTmMp&=72i)l z@~CnYX8ulDSWY1LD(Hkd%M-`eqX+{L`vc4%q zTtS8`K|PH;YZ1tpPw3Be-DdN`zf{mz&(m0sg+=Rd*Nz$rUN9=_Eb)e(RpOHiJQ?ry zUnBSv0&*&ESR*6I8dD%tufN=ZO17b*c3-H%m3NU<%7(4XJdns%Shnn=n0KOs5TZS@ zW_6Md)C_AFk%aL=+YgY|U<5BbUvmMO(_ zEmim+PVy_av!jQpQ0A{-Fm7mE-%_Zlg(EJUy4sW3g0EgB%r$kjK&B3f++UhBXZJmu zv1kT{mroXpqr0y1lc+1 z$b@|N0}K)Wk%g(AZB#D%1Q|$bvEyVI%3S>^!8+#aqU2V+sbD>3CVQK50s;gEy6sr0 z4n?r}jQB!;tZ!Cq_|~cxzH+rp+}P1R6Gk&X^reCIQ9G-|H%1P7x?R4WrsJx3*h-^H z#;eq=+(H(%)OMmbGDE(n-JI!^*9NnZ;0#5!^W1jMPI!07g?Nl?`yC#3DKM=CKco!N zBerZcAA?qy-7e&aw}kYwQj_>DjI&ir(bH6qNm1*tkM6^R*TZ)-7rni(XSeZl+E>3& z!N*C>Rsdj9sW4_IlKod|wl&|pXtBVFSio(ecLDY8L>U>p9u69v ziX5n&lRJxjBlNXTR^R0cRdnOT8g>arT1U2$v9IqOIpck5#O%-$&qT*y&e8G{45+`E zrjlCR?;xOQ*cNWdXxX*f#Mk7y7jJ(qmG=9t!0S>;bCvR&zd-BT7i&8pnfu=R!_dK! zt|gYnvAdL4fsCfo@E(uMsX<)l1fC*&Vw%}T9dCY8l5}BYUT!jsbM%8?F~w!>kwDl@DWZ(0Qedj?uYN_3zT2;SWc(gczR>KlcK<-52|=Dij=<=|<9XIz|x zk2&R@dmNduJ2ozJ=SXSeHrtSD$arc_pH+dJ1w>KDjiSNhVcCzS(eRdd@7F6U=<$z) z3FeB5!uK=2U%CXrri4zKN`joTHy%je^6};?7i%<70wn*DoP!gF04ntNoS(^)k$U0! zoLA+?v=md#qg`VL``C@qYKVS102gXYVBA`oKR>jG#nP!L{M(h(KFYMN;*(a_;9zEI z6Oh**3~cs~8gg9Xy4yu@>Zy7CsZqd3DF`G+*VZ zq7OvvHp25Yi$b)!)HQNsme{N7C)Doq(5<7bml2eh9382lKRq zHP|qnM<5@q`_`yh1aoFO=RdOUSztYEUNb@=yrbrV8xx zLL+vF36+{<**A6?VvRcsmRqDaibGoXI+#QB7}Xbi~M$iec@&J3GWN`{y(wY=ZyZ;kG*Wjo_&$M zHoAmDZ6e7jK*_{U5QVT>J?>qa)?E*HbyhHQQ&Asx)(Z<`xfjwh?X?pN%6 zaL|_Syvg>F!W|DpBBYq=<=bBGT*3=r~>3>o~ljnTK*EPRN1tr zJl={ho!k@mCe`4ae#M>5puDt%ku`ceQG&Z@A4MNms;#eXDr;n6(tMS8qsd3|!uI6y z_p^$ys|N zKL|7d@~e7nba>LQu#ID7r|qupHhf%t@r)e5VNTXHK0~L*HTpwD^I*<);-RTT76XX-{l~426saff6#tZyp>f4yFGc_MW{~I2 zp4BY)V%~ZO!Pi#TgeBngTy|=y6Nhnl8s+^yip_YQjafY5Zi{?d_1pYP8rzVXX&3%k zWT9(bjRYu@0aR($NdSf#S(R21UL1qby97A4!`ipKR>ux;J{~i>nK^h6rZ}l zdSUg;_{Zd*s-}a}YxKpW0_Q}^3buvj4Y9)ub00IdjmzbR-}pUy9?P^{9QH|0{+}Q& zpACH>gA}nAMDk)ccIuz)v^CLEENiB*nW-*gg}AZw3K>NP2w*)Ql(jnzAg3%^yl1)qjoYWY*~X6zez?+kuvO7O&o zwF9Z$oP*FSH{DkR|Lx>L=GC0OCqcoRf7hj~R5#=QBe_WMqmcmR*T>Yrk>d8O8db5a z!|woXb7t^X(}d{}ocJ>r>!P#w)9$9q^9dN;jxJ-%Nm40YNjcB%ZTRLk0F_JpYzOi9 z)~?o6;fFZMbJ!7g)4I&Y;^W7%`!Hj_XiF|TM9upu0X$KP*2-&D*<{i3jkOz&{iebk6Wc^bUM za_ATqzm&muW&Z~#K{hXKP zf+@xhI_e9X{&YH@m>eh;FF7`f*)N~t%AcFZ z?Y<@BX)5NODfW%+!Ku3|Q^A}TS3r*n__}rRsm=ss24bykDaR4-uH6QzaKdMGI__ca z_OtvjM}(fV-wZkr_V+dV6*Gll!R3e}*T-JRdNI-NeqFqzIp8E3{jl!pog+IBYFz?2 z{;bq^&pDY+naI$(r;<@x_we&8tf!LwMJ5sPKZI^Fq9No)V-*J2nc? z3pe{Tv(^@|?r1kk$;Cxt(z}h+i$CA+sZn+MzR?k+D*7p)fKa6L{uSr()Vx9Kr>}Q3 zd6?GuKJyZ$ayvMt^-|A{Y5`#*l zr`$^{g7oZQi)>*3vLF2OY+T5(8N#v6hSw#HU4f_0LTgK1&#Nx(HDt>bq$H=0LFpSL z#YR{>!iWw0{5^`#yZW&(JE;*WZsp|Cg-0b<4EWK9@q^w@(i}x9Pn=|5af(gCcZI)Y zI|S|6pka+DNbQ$m+8xXp7o990p#S&akVhy2CRN6XsB&ID_vT#}PR$A`O?^zZQYw0% zk(E(un)P$gz=ZDqR`Z121JG;aIsXxEN=|A&9eOY3)|jSGi(a<0mGo4e45?{%lj=bZ z*xctCjx7!;_7+;_&=pjEQ!&51nLbBkgNTqsxK(;G-mm2F<`35S3HH~Cs&mS!`xMM_ zey@eqOdcbr{9dD<>CKxV4Q>*kC9#-!Qo`$5F^$B~lGh_8iSTbA#ET|euz$`+5z2RN zi`KOp&wKo?G|tj~vPZ~|y(rULF`)|LgXRIXJOxwHZ%6+jk;F1qn#|m!Iafx0q24Y&q#XudZp0?zG)S%+cPakvV(u65BN(I}AN-*edkBP`;oFqM6X#!!h56QyT6t)^w zv_Hkwf5h_jph40$r+3o*>pxcX>|Ku@%;_Ey;x}Wfe-v7$*ZGJ`TC7&}PiCUUlx{F* zNzZ`dbhus@;Ur2;D-?>f-(VPA^dTgU0!mrqq-lQX;e9;WzOoi2c~PR5wY4xy@2E$v z$cwk(G`y9EpITt;g=#>=ug^j5h;Z4vQbzISK5E>_t;lK_uebW7d4JU$$7Puc9#J%e zm>Ex09vI+XaYIq!(a~g173;~uzg*Dl00BO$|3VwhjV$6&9 zAExJNRm)$e81h&Bxz`44+1n9Qe)j;UaUz}= z>*be}dmciq-P>s?T9K6$^|q36X(vt99NoxVvpZaf-uzpda$OF#E~W>6$q+79GSX1x z*1zFSC1vl(s|%0jzn?FE571iZQ(SBA!u60ud=vn*gh|rP8fi3mP4e*CCUh(0->~x} zaza59Wh3m^$2To=nBLb{v^Q^$QQ?3V?nMlf0j{xfno#XNpx5OfOe!jB)G)4C3;U|C zB~=!ZqrtGY#HJ>OT!2j+dKGtuN(9Y7Durz7+rNp zHa_rLG`k_g&?r9Jo+nkDmqdQuQKpOMVYsZA`Pf2&r}VMJ84||emAEC>Iqrk6fr+s( zlgZouIp$l`kEG!J`eWQBtWb?yUd_99l(EL__v8ZP!E4APO(?}w$=?iQd}F~;*MzY= z*_95ajf%2*#e%e7MGn^twQA{Lk? z-#vnelJtOlb1*%bS)zLeYuUGn#W19b>!j9zE&n@iJ=V)VDbd~OU80v?VKI-f7_1l< zH>hoUTP|47Q%-BNsJ^$QzvCFuRV0(`2|rY*fM?wZ#8)ds2!;L|B%hX;@1DQbt*kV@ zuA+6n&|UjuhQ@mDXdgwEtbWOs4oRtdDKiVgN7M3GyAQzkYLUS|^{C)-M4zl9pem{}C z2_joLFHEbb#f+5_bcrIzZr%8k1`NkC~h3U{zQ*;;Kk zb+xk(0d+givJcYhhs&BIlo>d?Et#9U;o1r025qTY)n_h(HBA`5(~bFx48~|biUR+( zy-_&pqE-4}mKMhM6FC|p58X9+v>M*azb!u0oqZNr`E%nJ1neWe7X1z>UoN7B z4c%YR{Ll4Hf26KAbdFZ6dSaFEj)(FKbQ**ol3mG4AQina4~>|kxX)adpKEd5COL@5 zO;yE(axBw~`G~#o4aw{XEuGduh#zV*^V_fGO($3k6y_U9)v9Yj zaAZvOoEysd#t&ts6AOL}QPgAIACt0i_2vZcZH_&EqiHOVvB_P6VFY6K<*)&#L&0VB zkeV_I_x1SBMr=Kc()Ou~vm5hC;hVW{Yt^3b@jCGwAGaZNY;*L5hw0-YDJJi!QAL@z z#mB;zwckX9jD=oQLZId0t;9W#TrVL(;1Kv;80lx{R=IwA$)xF6y8gW@eG}w|Be6f> z6;4Aj;v#L)I##-8zKLRLOIoA9X=*Yo`0%FxNUtE{achvvI;pdL%o}~R{6nu^1h^^> z(&yNdZC{=zVvfKUUQWBV7^#A*^yfQ)N9HGdYer~jq@tEpWbxw`$^iGEg&z}AQ)A*2-aEr|s*it-Jd0UC_TRI`ptyA_y z6k4@Tze=U2sMk9N@`H8{hNaufJuFbb9Jy{LY1H5CiNo7@zw`PD1tQsM>gr~Yf3Eh; zheI zgu&@2-kThTji|G7DJ&NcX4~6!B>PLz=M%gdqO!bpl=t5U1^F6;WQxthEQQ@!ge3oa zjc5J?L5cx_wzKhJ|Ajs9aLjhjmr)<}W3$_jq*f#x02A!~l3^aJb6{L{ZG+T&Nsba+ zsirX*R5#C;GjDN)h!rfsJ9??z)f(qj)7yYrOG^b?Gio+~?!BoBo!cSU&--(lh8|-? z%nc!tpb0u%_4^BVFzO-p-fi+(NSbuLy=1+mrCon>zbYTcLrMN=ZQAY$$t5G^hKk<` zp?KrBnjhx40Bvr3Ssb5K_j8Pe!lZhU|HkcS#ZvDZD}#bL&BAVR1w7uu8@<$4{%&C2 z{Om)aJARoY)~buzv0o#P65Kz42*Y)D0AA^45ErXrM54yb-AF%vd634geQ|;Hqmsfw+UlFFFjv3zF2L^8dZ+M~Ph*w#1NIArjg{$ygbE_z zCf@U|t{9V=(-yg$p}N?$-U47@pJCBKWoJT1n7xU0O!B9JONT*41-!rk4~xu|5THSK z$568k9*x;+G<0J#C@rZN=0tH9tx2Ru{+G+YRe68Z9T=9|A2=oMYi zvfqC!+uXu+KgWZe5}!ItshO43wv?JOrwMR;mD(BUHiEqpg3RVr`6^8HI9rVRY+zck zcwynXOMd&tsM{i@$@k`WS8{yEXJ3*gkC82abWqbWHrB#UDQz^-lu}$li#(TF{U&5w zt%dO6W`mIUraRe8w5^QMV3MuvN0r`&$eo_W?F{9hoayp7)BhV22q{4Oz%@A!e$`)n ze{&tnLs=6G52GJAGEJoz!% z()@ENC6RIgdQba|TJGu#e;BZD3M{C}SS8Ml@7gxLS=_@Do?UlhM-C4MPZ*H)ZJ~1m z?uUY_e`uss_of{PrN3;+1v2mYYu{Hn8WENY!0pjnGT7F4!?z@6h3O>zmuo6oRTpl! zk~jXZbR$LOPTPQZW)D&`44U{sm{v3Cj;C=%m*5M~Kh>Pwb=4 z7cb4%h8}atwLIf-rLVe)(B9gLl+?lgwJ?1b^3>VmzDpQmv?^KMyt8AC9w~W?N4d5Y zoUdYxIhHB2u4?(X{1oq-ThZ-K;g^<}q{}v@<}QgJx0Ab!X1C}eWCyU{8 zv?n;Ym7V0t>W%OZ{)cC94GXp(sNyjif@e|eHZ}8!b7JB&MZdZI-b#pI_J8my$k3n@ zd9lE9Ogwz=9?%^vWFuqr-5t6q^;rDt91S9S+w0fpLH>(?qFvxpqdLxQ-v0SSh7nbv%X&$Ypy$ja zG`E-NQtWNA^DD33Ij_8<@{>z;qsD5{c~4U`dXV^_f+(ObuzP5@`4}qYsONK#zQqtf zPfqr#=!&Kz$_lP{fOBe#nAOY{lb0n|tY%sTF|vT+5RY*=|1*}k0Dredw>U;?#5Uw* z3tIwQ;31;i8B7kFnN0Rse$hiGT6{)+`4LCMeq7o#-616Rw}>LGC$tGLu1%k>1e&;C zSBFxwSj5M5IG?#9Sl_Z9wln>ViD2xHb4^c@1CM6@7$=X&zb3CY-%e$HNB*s(qBxV8 z4C6s%S9w@sh!z9sVc&v2NHIaqxO$h@?7G*~{1dja9c<4bM|KmyA*7~+AkfTL2z3s3 zN3=#SL`EKjU7gw-R2AS8ymec%@9MR8e7(JU0cw*{+*qP}2=<$a(cQ07Q(+wA-+m}@RmaldS{}2VB7|CG` z6~xfc)a24NS>}HW24X19d4%Y#UqD#rSm<|@a&hXDEGSF4$!@40$~nE-m0bY+4XFt{ z?bq~?VZrP4oc*O_rG^f)CdQkfE3JLnkxsKVaCyuFCt0WSxltqg*k??8e3HnxWHCcYe+afu-V(cT-9zp62f^*u$_2HWHgBXJ`TAQOjV^Q&Oz_%}FjiIq@QCdalLJuW9pr zyzjKi>FE11pESgn_r6t;*}-~dPsj17S7H-+Kccmyj8E+74O7iP1&b+dzzbaE97qH! zb}vEoJqGAw*;lYKzp^}ZE8j;XYXGwHKnib)_>7&QMi62Eg8~>U-&PPS&-rdHw|nmq z!fUI=+$oX8g>z0q$HG_}bd~L)rvYMXmqWx^`?b;YBL@)hp^IPrnKJYotsI#{!}ex3 z0>Du-NLMc@oTM}$BdLj5kGFWJGVRU`^Kjk|-CUcsUX7tYQhdT=pE>=K$R*szJH zo@y?R>-vfj2;&5?cTAC6E7Up7gK3Ie(DhG3_HdPqNM?@9T0Wzi(~kde9_p7`fvNi- z@v{&02XvCPPVZko?}szDcD;mF)Xx9GH*TzrQZjQQ_(q@zI*m+QE2aM`qOEnZCvA8o z-K%wc!G!-!ZWPVbKT&C0Wmm3wIzHsvovP}7RF#HqC_K-laqIW*Xy1`izv|Pu+u*o& zN}w2^?+9zQkw?5#nK%hRBWfE1`Lr${BdbLkfm8v^R2~+*(L5=i{^Q3QJ%K3U+}oZ- z*_ac%7|bQkC}EE~BpFJ#*!iH38bh03rpc_kjnN8-!uV#3*%%|gS5)3G2^l$rk4=rC zv#eOdPVR}hRjn#m3g=KMnIFV&Dt}bZulan{U%!IK*vS_|YOS%R{7%@u&!inQpi(Af z@F~8`xLvnG?vXI&wtt5Xoa8q4mJ^vj+5|Zy0>PH40qzJ`2$}`af-uj<)m6dolRJ6oC3zT??iHsiq*$Fa1AY(&L-~hR|fXkAz{Q$`;fTzHBQ*$P{@_HG0 z)2oW{bR}b(OnJ@pD8@xi$h$vncV%VJWDM&8<|~KpsjhVpW=+IGR#R!2|H)Lw^vFJc z?A`tI!S>*Q+ia##YGZ3vp(!lM^lKCkw$wPso0~e`mx}8X=$xZW5En&nL&7d=%+;^h z@N14C+I02z^Q4%x4~)|8!ch@UOXgi`t?a#LY?ay4w*7maTi*e^{A)1T$5a2*Hd}DJ z7Mh$){{>Fxs>Eh$kmT^H0wXs7Hs<56pK{5Bj(dnHvA5BO(TfySL8Spt1)E=|!8r5`yN$0uAP{M|*%F1f< z;)T(;jl`MZCQ6y#X6><^5`RKz-Yo87$frLNdJ_H+eYD*To(C4>+KYo1(YAgIjvN7O zD;Jxi*Svr+J24}BEspyQ(W9mH2lQM9v<1#i^l2g%+EMy$@|V;tTPbT{F%b*2$JJ@L z5#0@&dF`@72XuN9KH%tPTR1iJHl!)@)K;^AJqHx3HolKdFzGwE>4h{0&4XYD;q=Tw z=eX^hkucnfth~#((=gU^X=BM{H10(xr^v*^=Rw=^?l!^0R`yOIkBj|XzH7}$F%ebg zmr10+RNHFUdzXiicr1;O%sM6>?sYoyWVB7kG{R2VQ;IcSOczG}AHo_fRak>o(GE`D zL2LXc2P|gn!xz6$zThJ*m}=>pXP|qoXChl>D|MFbh=pucqE)#20FQlcqsH-1Ip{+V znY(Ii>1xF9qc{Wfw@R=v6ZK-J+!SZq;qqxu_s0A9jbknZ<*R&#V#NeL=!*E&hq|gX zGfu&6Z^Qv+jm^i5z%xZ;qy&1Fk(F>Pzb;cahrcsExkrDml_e#;u~JU%@fj|jpzRaU zEJLA{D9zFqHdYL!jX+>No8v!=Fh7~`*GYmbjmW~ssW2xJ5H*-y)>P=>M?aJJ=QT6$ zVio;|r_3VF>Iwl`r7)(#{Xuvc^aFAu;`}1!u*qML53y%NB}x zVt?3BQ>8n`p~?NGLsNh?W%jZw@tiLHaNGyVEBvXcOq-1dp(%|TSL-W#OIBV0U4eGj zx>NY?-rb}*%m&$S_$A zKiXYtyS^E45943}xT-e`?zJ^85MYMCn`9$5ekopIe~j*dhiO-Yj(xaacPCiW@F$J! zs?AVp=b3oDXVM@}lh%J|r_Bnr{5;G+%8wQTuzqlvQ>GWyw06|cTLRU@ zS~e}PrpnZ^rgwGj5})7bYyWfA)~0OD5`T)1HYQ$6ra+;N-9bek{2!avyJ5<^rvP-@ zvbA1W^KC2?{)tdlmMwSfmo( zy3=r{;?wf$2=#L+gYA#)TAZpKUpxlf66ucWYIS?Lvz`hGCuFH0W^xs zQZ>?K10fQhd5d|;X?`?*mhWQRb*t&q_xV5VVMM|y4~(?Wbto;UKsf-akd>g{kV)NB z<0*u2d8utnfyPU~=bjD6_%M-6YqPyOl1mYmTz9(aC4cGLYG}_Ty`LSF zKC!ElPyH||1vf$kO&&;A_%r|@1NjR;$@yXDrn<@WUu`9Yh7N_s4x+?Evq9hHEm8n> znDaB8rDQJirt7j?cJ*b;tSTzAnL>Hj_O0t*lhayFfxe}|s##L{yn*KJiHJIR($k#K zHCcMK^uGs&YI3yPQo#hN zDwyBWz_IinYvhf08S5M7H{MY;u;Z*KbL$OVR<8q3bgN}{RKko;)vc^_zMzz3NFdb6 zT6lT#E&yKUwW=Z=E?uX2kwmsmFg*mg>IS#s* zq2A4V4z=6sL%9|^&)?(`^qcp2efJ`E{8~VG9Z0YTVs1FmqqfPMP*&w#Q$c3#sDDsa z8>q0m{gdf~e*@M*3+K;=r6xB2tW`xPbxcJqhJq;_l$5qKmkcY)WLpj#ho@ z>>#1;2D8>1(f9hFvTyCv+89P+tAEjfg^x=$iB)U4jPQVPNazuk@vCjkkHb0!;JrfQXk>DW zJ-Rt#9znnQf37+ase(2i;R0m*$qutR$rj^3^k&qhEbYvOAVubS(;CfEZ?8I?6*!Qc z={c9Nhvx#aukjj(zqzrte_*woP133CZ%|Dbw2sx8Djbaz>7fuWmN-5iV6Q6}hSc>q z6?4yDZr%^xEy&rRrqQd-Tj31ti~zo4O%YKhebB#I{6FgASeL~m$fMrTi+g3(7OSmi zbb61nfsKn1o+3L-O3i5u%)3%)m*uMoVxV$#Y4M4>JM5{gg@3L$D{{TFpjTfeTF+WU zv*y`GpfNV|)X4-h+a>eT&RLzJrwgMOSwW0aHLL&~@0sA^0MSi?nD7)QLD)!i9t9LD5c}k^ z+fuy(Nh*czY+FvHJ+szZ&|_!v9$T5q@!GkxYh91Jrc~7xvsd;gezw<3b#*pr=$hIK zsw}x}D}SvZ{mmbY)co~bM`+KXk+b?ePJ0ymmgIv0+H^(=-r8gMYXxhR@2Q_s)U|Bo za3H)co`SWWI`a%3j+uUzTymAvUm_ZK_|N104Zbo2+qq&vA41hTZImqe91Yt?^29p( zeiRG_UqZ0dr)s1sJ-6EzY+jAURyUbAs)yXZ?a@@@`s`3aVy5Z;E{N+Z*7x*NWa}*ju0$s>3zf#pF_5laU?Bc>3mwS#{Jvd(@qkraLR-hi^I+2A6^uW8uNc z!gbfn#U5CEwec>UcW8gCdy?-k%V<;l+9K=bMv~sg{6OKm`ii`E8_(~w<&AN9SmeNP z4H=9N@v-k|kNpL&rdtw*AF}T4SiGO_X{ys*O^b(qsNYhM{GcrSTQAn^r*FY7@j@yR z0{r^Fj=56+`p5V?^t&{k(UomRWBe{^Wk-Y;(FPhTezjrXLVKBtojm(&`(aT`&$5_- z+@6@m!IkdfYq3#^NWDOVq(e3K2dp3_-yb_RWmp+%0lcGk58H%0=vN^93%+W&W8M8? zUhwO~_5a|^v93%`L#GY-ChNxHH*U~1e>bq}nI+d59ZjCDBcWWa6tU^NWY>AN2Ns*z zwLg_*PHpe*{_kobja+SYz+SEo>yjXqu*EE#2rrw{%mmhO2Z*3{hHv5m?f-vel)rwp ztJj}YWVYmD9wAV?$d=&= z2-MY}KZzQ`fhGv2UyAH~unUqz&M5;Yb{3T~2V*4LwKAi!rl`fYY4>^}I}FZYmJxE9Ooq zjXNmNXSRHxQ>)toLQ{xDD&%egvVVCy;E>FSVwJ_+wCweM^jHsl=tR?G@QomG4NBR` zbQ>xF`f>z|X-WY7SqdkP&g73;n|rDSFzBJ8xZwmy4-jH6MFSTym+lREd(Nx<3<@}) z^OJ_50}h!;$#=GRu%pz`%f(g+`epm~dV9d>Pj9Cij{l3A$Il z59)DTgdX96LjQRB>rZdK`~OiGt5p+iz#ZBXLVH6=`mJiar)l>OI{$ zpO?s#FJX2bXeCYSV%#or)u_N!PV+IH*~-`2)QLQ5!viRB6iSdE$wVsqd|H`Ed9RUS)d(= z_rI2bD3tBrSj3&Gz@$6iZX|}jr|t8}NR~xHbbzA)$8RsN$1Pc{F(=F;J|r(cQczGw zga|MARpiuMyRVI?hsSDA@T1`G5G25WbOxr4n=L*NI45@z!r%kowhx*Yu^ zQ^a+|%KVgxD2Mi)^|@F4GPQhBKr{OHX!M{e<^KbKsE^I0nDu!b*+zrhcjwH)0~x7& zBdrO1BkRsVOhDvf<@qqAr@iMpCD%5h;z3yZMC_B~pTip~2)u_`!`bc7E!6J1Bpwx; zzR|yYh3`czq1aj_oTYikBsHAHwi@r~i%2+EPToP8-bU87aRW7G&(? z#UkzoI7*{;rFinXyj!t&d-@AYXW~j99kkD;R;_!u<}f~Uu5;QkDfxjJF7zJ`X{X5q zPV|NjC7iA(8~zcO>pjxQv zgnjNH$#t%Q;>MMJ&1))WzlbH`W;%tKH#)$|MI9v&E)xCbL8no1B0R^wYX5PChq_6w z&x%2DvBx-^PsC;2BQ5=VzMNj_AGA(?;zVt9=yG*;iPc5K*Do_7fX7GR;w)8i? zECMrFQ4q^0PlG=BH#H)xknIUIH9kyKuzf*U$tf|g!u`J(LesQaA9heGT_0Cs1(1hB zVLxT{5P3Z*+``1`sMzEUw!PUeI&D5E+d42 z^|-TTB*v-cYG17eRQv~GE6t+2llfQl-(S|)c)usDg-yG;K*@UZA!YNs--v8L1613+ zKJ-HF|Gj2L7>gLK- zGKitAsbBY!CFdD$yg?HFbCY_=Y6*l2wah74wlz4lH&Eidr;RAS(q{W;W5SsQyT(2B zKU@y{(5TUIlylfwlf4I1ZH;$x>fU=RrNH(e%2}AI=Ahs!s5!W^0NodSnNhO9?@>Cp zM<0TyFlE4!ApdBK4}7>u1ykp@sv8U#<)CqYz*a`z?Q1u8FGGVVn)GLG`t%;l=2JF( z4#IOq@CN>G7y6oPhRKCp2diswn-B5+hb)70&iyTJRHGI0%Y|wT$oxaV;TrYrlk2sr zK)ZUdQH^{fH}_JVg|Z(q_$eTw`E7<4T?LQGB*_FMJF9`tlBXNFCY0|yi0OhO4v;=2WbTLejhyb?)>wo0=XEJ^N?$L4sc2 zQU7uS{fecvFxb^m1f`tEr^Pwb!dQ>l!k`>54-LnlRtxpm9SotfJHZWM$SZMlb-s}@ zX(mlc4%nTwCQDP^%L@}{?8cDxej@tCn&)Y#n}XZQ_Y4bdJI-5=Fn|cjBk7NeW2ua5 zOC&kJG&OfLJ$Yd8>W7Tvz*$t{MT?@J9{Jv~0zOTLRz(v$jO%XoIHcSN4*rmIxGvdm zcDlD7KHFO`ZC75@>267r6SsxQ*pN~(KZno&ARk)4tmeZgC#*DykC{zj_C-zBs>^1xm-N3MlzbW7|b7)oT&piXjpl?p$-*7#(@;H7& zpKOqeHrh~u>MYtmrWeDG(nky7PXtu9?2Mdf!K)Tl&-BQ<7?^AGFtefd?ejH!)`_B&?7 z>#P4JJ7_>ju1{g9aKFG(A`5JM1Tzl}KBk4WDHU&Jj-rcNqe=(rDr`X}fv6cJ~DEOfX z-b^KJr0E*TEw2CpxZdxpfZZx^6(I?!$GDONzD70FT2tz$IO?qyy+5s~ix$1&IA+PU zSj;nyeHY=Zd+8g$*n6wh-YRKIM22!POTZ|2L3S)S!uu43R4Ep<6UiUef!<@nD0cRf zz>v^~DJyxPtjD@(ez^m`DnmVYaBTcz8cP7-to~^Su^0wRz+uO3oPJWgYHhhCeQE6? zreO*lUW=K$3=JcZYJMih?2$*IXbbB+$+?qlXPe+TlG07-Y^yc_`di3LGJ&4={J#!T zKpK@nj>NYIF)mz=403wC>7~`vFXrmB8APPIBGPc;a>lYoUZ(QSbesUqZ+1N#OAcH` zX=5?R(;t_OA9;C_+Zb)b%;nU!BW-pk9}Cb<-0?2<%ED}Ekzh`?!2ePdP;Zg&mbAnW z*PevmfC+9Sb$$BAZNrA2oJM_91cWZ9L&*v{1jop$+81!1@g-0JKx0n7bj~> z-04ye;e|*1aN?3uORXH_h1VCnjB}>aZE$grI~)9Oyj)B!F`g)@-&;%Jd01B+KZsM5 zJN5rj_SR8RuTk4DAt5=agdk}n0tTRTDjow=Is_C1rIeN$8k7(d6eR=^B}GDp4oMXR z1f-G9p&4rC-S^;$_gT;PuJxTibRGZT#~pj`YhU}?`)1U8*^{nvrjL@IcTfNptw~;V zyqYkEqtiF9WvcyK9f1-lBM>vTmO(YKz9f1^`{XP-dr-b-=}!VV%qU@!+8ys#Fw$J3 zEK5AmUSk&c<~yy>j$ba4S)Vx$w_hyWpA=`6=e9fb+@)A4>FOHaI9SfkJi2|ql}2HHuXG|h$FY-naN z{=u77axlXv4j{(i=}VZS-lk*G;w!htYw3DQz>{%g6i@vy8EfyLvDz^Jbt9hGSpGTu zg}=XNG01yXQICu6htA$FGKt*KL08ZObb9kvRkz{8uMyOfIsQ`Tc8N~)yU9k}Q_{@6 z>-KweLO7&3E=YKNtFnj+d}A%dT7teZ&a(Y;F3R>Ey>-!{q&UCjN<_$t!MwM{Ef^n$ zX+^^cn$r0RP=tiBcJ^-MLKH&PBLC#x+5rOHpFl`X^Nc^i;3?eRaarP3yBg(vW;D*0 z{!q?8{dxy1O9Tw5D`(aGlyf@KzL#?cN8a4bq{bNC)=op_TBVD(52webN?CB`x zR~@~cF61!;P(08D!qc!#TbBlDYFpP&5EBL<@;bi`l!)uV8%G`LJa2NU0GUR&zS6#yFQ%e7nytSS9sFoI zb9@i$XC3R3I>G%0bdiXi2^CwGC0=}+=5~C> ztpd)C`c#dVxK*2?33!X3Rj18H7XGr!zWF13zNk~;HuvP|JtW}vB~Rv__XEz^&z-c< zDfI1OU^L?qt!EQ@pwqjY*ig~!_*Wad(UGxiMLkL-D4Xm{18QRHJfl%)5K)7ebVTTq z-^^0SZwRO+=|s^)wIlx+%d?*pn$6mnS-707Kq~OgJhNv}8y)zl`3ACzU1WIeIYrEic`4b=gfaoSM%c6s-CNP#4H?u{-z zu`f!POw&yGgfw|X#}mdJ5B%6H+E0$Yx;JZqU3M(4VE0jgVFef*nX#w1K)ybK{QZ<> zW_zk8#7PF7VC$u;AM{fHBYSB0pbyg}7Oig=>d4Zu=g=K;-<;eADwMa@$0cMQmJ*-j zN?(L-{u}L-$4o+nzTn;*!|<#&9{tW9a+=A^NZ0duYkQZ+uei-_YKtx^c_au_OBw{> zlx92-+Eu6agPD?_f*nOjvVwIMxrOV2Mg4;$bJRu>NY>YCR%??>f$xQjU&SKl3m{rUDo-}$pn8RPy8dl&0*#*={A_`ph-tj| zP#DBr`y^0tQ1U-Jm**llBz=tT-~;n^ z=0tVT(y6`;5j=xhEn%I-r)ewYj3jFab^|vV0Fu7cnKZS!CIztHj;PnWt*NB@X@M{~Rjn+$I-+3$#(l&{Yu6d| zV49P!nN{~4Z#yD)dsV;LVhSf>)J<%O<2|H7A<}~+{2{H}t5g&jum2uk(A-jk&h zzZQX&hsda>B<4ntI4E>{Wo8hVGEm2e!0Xh&9GxU4TruX%d_Qe4w@0gqFMez(b0_FE zqcYwv7QgZ4Tt-D(RH&VxEzNFFW)C{U5Dq1Ok4ts9skO~%h0R~y-|zcsV!qR2q^Tx> z%d$d1JELb3FwzK;_FG}iSgUU&o0sBG@ak~6ifBd{9`fjh63Lq0xzkH(l{bFkM1Uii zUydufq33yAfNR5xPH}82b0Ev{`>V|NY~$td$pW)o`V#mMSFkFbn*LtO;S^EnY9nwp z7mJfkS{4mBjoFO=u-$-HLvyB-552B2S-)TK5u;JO`RT=o%@Cm>7y>{gGB;%;VCwBVS6ho1{qKfoA^qdYL* zxpOPh!GIzK^NCBb4iW_$tz8N+yws0SK9tf`-o1Z#x3E#@NanPCT*+&!MuNmv{Rt(p zhg72Ez$3|$zu+)>f_m1EUn||0Q-1!XUiHzRmQ-9Malk?jSgAjaZD`z(T0Ta%RQEtG zXRO0H)uH#Up36AZ_gNL!v&!z+dK<%CWN%Xo<=++d?9gN7ASN zALkZNF88F=ouXfmp`|2L$XK`4GGdo^BR@_%g7 z**cRe^Y;T7X*4;Z@paaY(aL;Lw%_LynYAFrvuEVi5ofei*F=^FFzGA=+-unh^vZvE}XC!)NhQIyUCRZM56X~ zwrDPEOTG(U26^A#TwCiLdIY*O5oxie9c2FLkGW)#uMui%Hxo?D>a8!dhK`NDJ4Ch| zU+CK-r=ik<^`176_-VZ~$iIWLWcWq(G0cI$hoZEM^Aj+~Ddk110Y{MDhJXRVtyfd1 zfY6Mcd!Dd4;ByBdrGN+rii)l2Y7{m@i*L74l(B%V2!LlO{!;KzuV~%KOQZcu`0pS; zhG&zsJG4NC%_CuQWUz&L^V;DL)-WfJtzgL%K4?UWRXsptiWrd{4{bOFKO20wBVJFSDW z338@$qN|r1U>b{ymKD@Vp?g%cf&rz65e$^xnY(`G?j}Wkwq1CUJvU`(9>erB>@sjN zX_>d;p3G!t-8qUcc=YkwNWH(G!yuee$zbsE4NxT+0QnC<+L5C_`6ktmQ4+$MhAXs^ zre4<#Y<@|bZ!3RZGS+K-O4-uO7&QbYQPNOxI&IG_31 zb4dyry%q^;l=Ybwg(9`k>|Z*xV4vK=s#CY+;?aPVkK|;kgDh~QO26!z^`{W3D1LoV z8`8t)o(ly!({&GPziEWMkS`n73kC7MTCdgX3c`#_>HkXz8%Xuc{!x^a z7G*NOD#gn5jk~vlc+dj?`t8=Q8M&Kl zF!JbagE1ZZ?XddVX*4RY)Mf6#elV1?$A#gIfK!9Eeaj-h+%CB@sm?bpGH=v2NhF-2 z*X-adZ!K4n`JlA^`Mr{ge!;Kcb+Oe=sV`qreO;guIE0s_==AyM1JgpDq4Z_~UYWwg zLcW8E;3Te1UT514&HSo<#j8g5HdF##dkxc_p|+1Wt>@CvYzz~EnqTg>ThFtstNwJ* zjONQ&pw${Sna&~OFpO3YvR;^klKO?5^59pX%~i+mkMp>iX-XHLPLN^sku((n2xhe( zIX_(VFhhU^jIS?{7Yc`jRWfkKIu@>u zU@t`lPDumtAS~$j#=3Jf34PvNb-7?JRAD7~^Eb0^+izH^veZ8{Pepd*0H2ZZ16fMr z=%&kj{LIVcEXqF5401Dd`Ejj&2?YD<2*F0I^)Y|?_^w>AV$nLW+CIEtW#H=eH#lRrm7dAMTyutC{abtNPcnb`Z zC{FzUIAYVKVo>nQjxHB81VUoW!4d8$PQzG#Mn%gst(Xv>(NKZzF=3_ml+B2_;vE;b zNbo`&bv?iaUv!C62FaB534os$t}XIp@;p#~buS+BH0|5B^FcmYeiO#wu!^>NrvH1i z27O$0wxq#N5W?;ASvjI{B6^36OcEQE)2Y9jbYbT#`)W%)vg#oTi)Y?~MJW%EGuQ!s zUg-_aBL2R|24Gjou^qeXPNmIWbXC`~^sMZzl*({KDC3-} zl2-1an7xO#GpW`)x8y!>{!nufAQ8{&BEkuKxA@P8n(V;gZM>#ag(`qklp_Ts@^Du$1W)_U~6i^RfkH5U(^?PFE z6CBpW=57)^CTj7`{%hP40GI;!xihsUnYPz8cW{Zjn2Dzy0d39C3(#(%`eT?AyT+Uo zTP?j+jz0$VuL-a=pf|~KSM4Tlc6wG@#RM~=y#8-U3&=-CFC0BNA^8EuP!z%}mBhPx zf)?C5;!cM_M(cMU@D0pfXnd8(@oI8D6zhtO_oQtyAeX1f9RyDWK0_7UfUGxUfaMIJ zp>G2oKkQ57IUcRLs)@~we45K{{J(=z^5n&bx2>#@FTMN=VkMsag_Cx4=|))TLU+M2 z%+-2~(hZR5w7+@dY_a}KyzRT}p>@~3j)&8!)af~>fEj;>EKR4FKoW+4!VI1_@aCRaxjuOYax=G>ZkK%j`giXt_QYqW2ukm%4|kc$-I$r zDa&-m!2I>mLq_%!{qa-Tnk9Ez3k&o6S`;X)1@k_|MZBUHz2}9=1vAZta z0>YjgKpKTXejO;K38yrHExh{FOLQ$ghj5goS%BYN#!D$GEHv#Gmj23ow(V&3nyX1) zmy|4z-noCGESeTjId1`mY=)#FS!87#Ix8bVC4@TQp^#o8^_5_c*0rH!dHfW-HJu;?g@EMRpcj7Hma@ zl41kYXp2(DmmqYDhv|S;4>>p6CWEi94cy#nM?8ZnEirf(7`ix|;m;s(#$w1tTN58= zQt0*sCYR;z-Te%-Pixm6RmA&+)J~Hcr0N3g3vOhr`vBdR#7vdqPZLC~Q;>H@C*}u+ zv4;CW?Qmfxes1Ps8V}mlekkd~ick82KPjU%qY;q(Dx${ziQf*L-aH|A>-5MxJDi^L zhW|gOmq?g=D5~nPPFnlsFkl$_0%v8QxB2qpeWf}*+G(zk3QTA}!u2)2gi~pW zTU=HpxwTvnzW5T!7)||MxxukO<;Cjl-6j~p_5YP#0kLyMDew%;Yk4Js!_-YTuIoT# zsNH`m`2him?HdPf!mB++8`Ym<>U_WqWczeL%c zsXq(jR>C7KQq~#jJ-I}^JKPEXQgk;-9GlSl&2_=x?_3&O^=(A+ytRg=g9Eudtin&j zu$UKt?2@tf3@?S0E$$nZXgMAX7%KDGjOAxZM~9Vn8M2z6sq~Cj5HEfRQVwWtU_#0J zGve|ww8_Bd#{c|Xah6O3EAQ4-qM0U2z6IO?bq(_2>RenhfE!mIb%y6r_ubL_`asK` zn{G|U@DdTPWC)Y-&IgT}(K6!BcMg4!B3hcLE{E1r9f>Gi`Nx@&R~i2W>{7NUmDaht z)~jlZsr_aEn`vaap|W7iUXq|-ox@hgYuG0_lP12R_;k~4=E@H)no3+zC&!yFnk_Qq%h@E7(5V3%S3g= z{;!dqLsG_eKfujEF(->kiNO;6uU`9bd(@yWM#p<(GSxea}D z_v3zqhf?b7gme)^ka8Hhh_)1@DQ>NB4kn)C%5kK;rRWoW~gP*>Z5G*C8$7_CQW zY+SHO*_O5wS5Il>zm=Ufk@??n4}^Wsc*|fFt@lZxfkEv~GQ{%hx2!>y7B>S5Ih`*V z49J}shGEsea~pR=TEfV0@Mz7Vj2CHW8S=yx^3xbqmS2AK_t)81j3>N~^X?$tFPrE< z0LWo)>KU zo*ivr({B2b&2(kogDBxP=4iTIA1GdRVW}-|#5vAx%Ic;0hZmDfFDEmM29x+NKiA_#=hqv_70!c*bHyMnMG(74!fPjuGP^a(>upHeMwKri%1l6q_cZc=sn_H zFs*6HRHd27y-}?=C6`>3%nr?tP1eLw3ALfYhs1IFW7p~9Q}_5d!5m2Fh}eVB?;+1- zdiB@`lT?w1Exqiw7cvQ3HA?BMDXkFjN@8juwx2r-r z%025S0DFqe!$&+vjplK%)O$>+%evDd_eZCNUm{wOA#<#ctoOmYm!rzviQG0c>@F)2 zaw(?L#PYKydP*{VN=Iyl{J)7Z_RcZib=FUjofnf|x)sM>wr_BhkK)#yUf4Pc3Q4q& z0r#5GKDs?&klaRR20sZmIoRFlqG=XzdABcARYvLXyx!_nu@kwrx6Z=rIcuMUMw9|0 zvnmqKdjG{j2=nDS9~H=!5$K{%Z!p#sSnhFjv@}ChsC34WO8n!^J-EXORH;SlhTiP09L;7*%t2y z539*31?tr(ROwA#(;JxC{LauBL8B6Wo1SMCbBtSvLj|VOe_noZwq!mgH#(InyZ>-I ztf)dC{)7jgP~gKrJyvlZ3VD*R^IXws^`A_MKPmtb{(l~ppz@4jhC*6X*GE%4=jGST zez2VKV74lFRVr%XIS*S-$L3GcMy;;*!qbxLrpMuOj;bXM!R6FU`78>bD>yOqQCMz}z`1<#w%)Iu;19)xkbOTH{^{!V5Y@>DDOlR*%8Lk0YX@EK#ypZ^xse zNok73TO3iN!Wp58BihRR7Kot8@yMGhB6i!JKD}7(T5s#xH{d0kA<9_T?%+)sP_p28 zyESNkjxn!irka!4n2GMrM;b>`sSdCi4^t#5ec8H@WEGTI#F0o@_I4UmA%g3iK#HPf zxzmRK?R1B}9%*DvD*W#6@LVBsW<4w$ZOoOU1qnSnZCvzc)9vxP7ui-9D@};uH=C|d zz^+6xICTQ{dV!RO{lQW*K0^Z+h4og(NNa|cNsdoGRh~R#3e}3-+7Uue5Og2A8goq~M7{j(Zi*t|492pml#q~nn&oG7X?QNc z9QZi9)KtH=NMm-PmDX6>TN^66GiA08E5RNzM;1XQ-{2>$YT98TX!RN&%>Xa|UTe75 z(1YDL!HOM^i=E@x7EedsK#N-zzINxpSG6GjPy54|Qa#q-?!Al|9Q}n5mvxar{-+x7 zRLD|$oQzvc_vS$9(fl5-8t-=hV$%KOX+81Ql%T9ftlnn#Q+{9)$NFYkD@``unS`uR z2s|Ma&?#~^-GWlJIUiVWS^Pm`PPhrbaTFJxj%ir%1Th?!B9>RD&>AWQ_7vY>71jf9 zo6D(l%JXca+JdRlcl+1&mej*mA)mi=StJ}N=(hYV%2xaG#rsviCdh=P|9A!-^s{6C z@eH0HXm2cxm2OyG+|GEEiFO9PF9&lC2>(VcG zP4Zur4e5Y*6-2*rn~V6;<&~Q2fXj{Pua%9&C%8riifbALV5+;|;Nd=S(UCtz_C#>y zqt(}WIl$%G1h8L#*Qpz-!6O|bCLBz`OcH{Rl@KrK4H!Fg4aj)$?E&znm+v20EZ4YQ zf5i22mz*G{#Jpfm$)>eoxWq60Vu`|yhoz5K-eVLW!p0V1T4^I$$d`X7$~)q9>;5?T z8!Ko}op0q^Cj&wZwQ@sSn+`2MpEictRc+BScGhFsK3vcj2%fW*G3X8(N>l#a)c{z#I3S2bo zRkA7KzbEZOL-rJ1WYH)+&$6dP#Y3#mL^X=0?*}HK-KhaAzIN4UGQ-+_rnb#c$~|9Z z<|?m`w+6rg*zEI17%#S~!G?#Sc%#S}carMyyK6?VsSwY1PG0EnKLg-L9Mqy-xb!(X z1CZU9LJbD8J#HGb5Tpmtz}01R7;X~>-pb8O$QLX zZNe+sPTFHZ>@1}2!u_sy`p;fnvZ7fq^|gofey1%r^M7SRAt-7#yz(k{b?rmWW~sfC zJ_XgkVC7FdFf~HPpCnlv&H3mJ_-MCRuErDxu9;X>--5EaVikd|G4S(Qi4F$gsTte- zDW=DXN}t{dr3mc6=!7tEY%^K_B`g=S*|$@`1Uh6d4e~VOa(T6A;`r$N*qUi0X_wwU z3w${#1p8V|#;yTUdA_Q`qK!YyC^KGj)XjoGbG6*#Va_{u+pmNCo!9xaLcpK3`&PQv z4%GC0;@1iJ{GyZ;-Ir_BucB1lY@uXaI%BXB^7^M?_9yp-(R0n4*VvzvYBFd0TznLW zJrt3&8+)Md@c;t?4I#Bpz=rzEW3=wf{J=V=wb^;pD7%Rh1!3lm!60_it1E}Df`M&t z*9Dx&qJrS>)1A&r9>F3Dg(=gqk)iaOIc<67liwzLi;Mb>$g8n1MZb6{w&j)MmEJ+^ zSNt;CJXJC*DLNOJM9`fjYHf&Ed&=0 zG@@uMQfWw8zJ1c0DYW*+^6OTGB1~5H)3t3psq(A@(KusbZst&;E6o801k(fGKBK9a z1Ss?d;Fr&KT?h^j=#p84rpRjGiT%jF1<;ji6qs@?@x70%nM_It2*Gat4A5kVr9jmY11UcWFUqG&Cz-dFA6K zUi zqPt>3NlvyVU`g1<&j4I}>Z%Wh`dflu%3m}6zMG-cD|qS}Db?<;x(|ai=Z$3| zL>UF!4JNy?m#TBe;{DV+R&GmE7{ywqB zlk&d=FVd2SAA_%1Al*J!52-YatX%>=)=M>*dXPv5iGn6A#bXQQY$Uo0QJm z;z238w=3 zqA6YfG{_{}!9{A?sTJb9Q5kK5Z6yR}P_OhJUh!U}1P~`KStxSoDhtt!y3~1OP{i+y zyxaGV;=I-B4>@i5>novrr2f>N=kK%_eW!l+QBvG$GBVJQuY{N#tqv)f>Az_%#luf< zlc<c3A^69WbP8G43=92UP|7i$84e4x4#4$|S~ctAYxpDx<$Az_H>KDq#~e&cA% z8tF8?OMcmx6JPL=`~QJYP;wziTggC_nmPM9hM!%8t4IshI8%z?}NVt+wRH9dXF_Lrw^?!uSZL4LpI zd{=EL%voV9;@>em1G%U=Z3kltiiZoZ!9v@DZ~qUZvQD7;X)s)Jj1EfbGj5Gu4tO#7 zLj_%~*rKpk_f7ePW($)ivYX%)CsoWI#gL2!kGiNvN5_I!Ij)`AtDPTm@(u6OvEvxT zh=~d#)khDonehTPGyFUCE<~3iJr^i>!^v=<; zS9wCB&+QKkyarprU@<^7m zuv5Q4D|U?sdO(WfE8J>K++a%BAl?)9z-`TVlXGv(k*T+wMUD5lnFsQXonAaJ@XYwq zYEhoQ;Tsy0Y4YUtWXx-^-_sw&Kf5h|!4igx=65S{U?9$NgwcXi*@=c3UPyinuEuTl z0DJRqCXn8NPLo|L1WK5$eKLKHaeIgbu4kPzX2-L|AKZoc#1o5I`F0^}oQ0*m-X6wz z+)}o5?CzOgh2yrHQyQy5p5+K>0{JSDmsQc4%*m*1ttD{(d!Z;rQwJ|28!`E|jz2@8 zM~w_Ip=Ub)ZQYq$djS=$yk;#@LGZ-Cy#q=yGuJUL8AJns_8P_oky)ymdWpGt`(rXQ zJGn`i9Kz>v7RAnIDC1{9dV;blME5^=Hw2qlf?Ph+tL0*DhlQ?{-djLlgvq;SbbqqM zyR;JM665`*T(rgY_;cI#r3m<&j4jUuu=fu3$qCLUm-JPuI31iV7~v72|2zT^zl|su z@f{2HvqCqWdv&NC=ixIQ!EHA)TF>0zlQ>6mR+8Q!dKbj|bq zOl*AdO?g(KWl_e0S2i2p*l*cSsXk}B0g;*4m_wWm<<163L_u)@$-Bhw{ROK=*hT#r zYwVhnG46(7w)8(pzm%8&v3|;9)PEItqH(>T+DJuZhXKpW#moSb4s6^3@7P#&GMKKPwq<*-?d^wbAYdOO? z?i>E$mzneY(9!Z~iG$GRSIZgAghJPcGW=f~D^Nz2rvIx(FBF{@fp-zR?sZU;KDtJm$`q=LR zKp^FulxnjL;CElPL+lZQFr>a_Rqgm+*Q;XsT;-#qJRacZG9;$6G3E-z{az6!S!MPj z{^fUZU@XI5rB4yqTMW7$hwEl+w;&^&&pV-%an zeCW!k|GSkM4wyjr~l~>NKZL?J5oJ8{ENw^w9HqSpvyW%zw?d z95H*%2A^ZGxgO=2pUa^|Z=&Nlr(0^I70ZzH?J8Pa;97INa*s2N`lL9AvYG?u$>|ij z$w5Eu8pc0D3il7^_caqc27Z3McZnGH*h7B)+ONXB{H|q+8x_oR2+i$aoQEKU-84 z0?E^)r;SmOP#@*5Y*o=*Y?Htp=hlB&Ki0dx)}Np}c6wvygYuh+9at-}ReRx44@f zKI(GMPURX!f}9j5K@-!L3%q9@kCa8~T}S-OTwuF5Jw^g!3Tye!^i!()k2Kd)A)1%M zg$%07V_eDnI7L`mk9@k}!CyS2ruu4t5grQjKHaM&o2=cVYxp%fh08ClhFqW_ga>iC ziHtgai%**!t^R3Ywqh_jlHBdS`XEG)NjR)z^w=%Ad6oP+?))&LGV^gAfLD2x8fJ z-QuxNk5|Vl>yE=_zV&mE5*SCuK(W7?3QlnmWraKAfj)CErlv4KA5L6rDqWq%g|vF| zhsi)gA9%q3KzyY_fc^}xvnts73TI1zYZCP907Y(?A*8)0J}4a#wi?J6&F9y#vdvwz zM1jb$+f-NJbb>`T1;v&Ype`20?L$!BhpH}`0~yUkULquH+D7dZRY_QpfOEQ(>4JsA zh-2+)BLs)ohOJD^q%t4zT!f=c*xO6jnh?-$s?;ZzJNbJDhe!zYhBo00GmW0T=83_{ zRYRQy+xPsduq*B32r@*<%gJtXLXcSTF&R3Py$kmldJmW|nJEDP zs8lfNbyi4*W6@cEwqP7D!+o8;_l}CJF~?$i5SQh#JGKjF+s!8rN_k51M~1~KyW%%O z;|R@Z^ScBdfm=T>^w`yyj^fa(h-;V5U!Q4!m+ z&;+=w8|U`3&_%NwN{F#NUMv0##Syz-(e7iTw57Op8Im?wZ8KW!)DZ*<#YUbBS^JSL z#B5_%{R$3qkUz|iA-hYv+|;Yp;7Pn6llQUCUT?ad^-wu88jBjgSN8e-vFB9+z(`kv zcxW4^H;;etP#y#7!TNNg%4G*3NWza=_gM#sogDuVZ!y2Q!Yb_a?Ak_E6a(!+>T_g^ z{g=!VK*L=Wm;b>ld+fCYtv9;5UedA$agi_u@0M*0!7t_=>-|)iX_j8l#ajEw#4Bm1 zPLm5}thkkwRDxv^X|>^e&Cmp(SWXpdG`3UO`(dZdQGL=imCmlCB(KNEiGuEpgY_-G zsxKgiexP3N%u5lkXvGV@3SG0>5;A5%v|iP;3TfTGUDdr>xr6&p8sHyJX4zF-Qfqs> z73bf^J@Q{w@jb82%cz_&oU@ld2j~`HcQjj3NY8e>(LyTUw~c*~SMjFF27z;-{x6?> zhY_Xrl#3ApUcWz}B=fB_Pp#dVVzxhA*!<^iYb6>snLj+1?KXCtM{4_Dck~2@ZhC3e zP&r4@@I#3yAnX&@2U?4!>w{<8Ef&&wdUIc5S#7*X-~1Vj?0dHa1=Q^oI1w8u`?~bfdxfMoprt%&$g8*kj~~1zV}2^Y{0ZcI!zT%G$iJ3R z%IS9UPm(U_Io3zKMAD;I_?xkB&F4wq4t(BRW5cS3V9hdYoB#jnGgxGd3K~6YQbrhIoJ(myoBTsm8 zg_ivoAb~ykx1}!i8M5nyn6_D%^~YUrvJe#36Ngz+@y=#~%zf6q89UK;z$-1o?AW&Z zqA8}?@1}~A62+dDMC&SSzhi#$w!E0oSz|z>47osKC6j5a$4KO5P9{e^wc);YEGA&r zWt`c@!{m+?U*;Jl1m{K5q1fKU4YFuNWJL$E!vzIwvKBYgW7(e&GOf%bv8ZtP^9dEx zrt2Yt8#ZZ1&(f=zSB%-O)3*hP3?C40euHqY<~&}J7A_GRVqEmMj*~rV^mc|dMVfWP zt#^pTDqWR%4)n@8-&g*}drQ}9XJ4}ijq70?j&dHG7cBDdYs6iyYO*j}Z@yTtRR3)3 z)0LA%LG!~m1S4sogXy1Wwj0@?D9Dt^jFR|)A1Bw*yH>FMY2DM9$lgpD_^8f)dp&?7gfK#X?3YWfwq`sUs6Sk^gC z-XXBU=sW1j_#-$3fMix1{IB?#Uj1S@V|L};bnGHd+94g)ZjCb~ciTESP7@*u_mSqf zDv%L&Sf@lT$i5>r)0b0~27AR#LQa@cVb2uMh|9%qs=8#@6F4nAHdMkqxFt--L}I-L z>MRQc!;;WrBRN}ZMO?c1)&C-*8#$y~$HAl8Y-2Zih!s!C-Kj%Rlzq*vx?$hX$|gc@ z+hQvH+${Q&yFW)lFk$L=mfdE>{$IDd7v;-cGzF{YSd&}M(sDF%%kJbl2uJoG_#=cu zij6zDI*X8I^|;D#-~020#-Da*-GNA&)Hc&Qkz2oMF#Vk?M5BsJCj|5JV|-~tMVW#{ z6$sk)?xSs#DB#%9+~K+K(x#|$3Y(FRFAa?MtD6(_;Fs#4NtVZN6PuH&>SWR(oAL7>@*Wmpz28%1P{F*Eru` zp*%+0)+W-v(yRcxS87dnW!k08H|e6*I6Kye*62u&?&1IVdA8z72?4k@hRjzN-g#%2BPLYICE=($(aMQsDQB!sMB#bz<=QIVT;i;65(~oPYghMe)LUeFT4xkDJ;!=%W;di{?Z<{A%0ACp?)a0EDUy#-O(;D-iV)dQD^HObYZj@-< za+kGB2?>nyKn)MRhh(|*x%{xt#jryFXf45k!puMpw4om4NOd{%8kKq5?u9v4+R3v; ztCPR3%SvJW+X+S92h9QzqTIMjtIYUd!S5gRaoy?5$is}f>U!-sHA)+2ijQ|ZnR371 zo}?1l`-)lMC}?B>Poft;k3*$FZx^{5Os+KaKfpsoN}uc7SORN^_(1Qr`5j6}gojR= zw2Tg$8VsjZa*dtd(UtzdP(K{@%y6DI>Wa+8O9`?3dX_xLIu|ZnS30{A_xh*(wkmt% zuTcq?1$k?frc+QflU)ttf|0NJ2Y{4!rhb*3#SOBa1V&1-TA|8yAs^f5(~br5Wd;fX zCwUoV+fiXSF)g#W=kesv_N3yU48JZk1HCSj)> zKI9N<%}-Py-MflQ2n^v_;zKOc7q10Z{1Tm5(aH_{6R?1Gom2A3yzD|5OV0XS^lXcp zR|-Y5=`&**OZUsPOZstk;8yYO8G6|f<5d&wD|Eij1V0-bCpP>rogitj9PJ1AW+wz=^!OLRF)% zZMsH}{_~wy#G@t*!O}wnb(%| zyZ6q-kgmg}JC}YJzq`nOAh2|u>FBx{BQ}pB>C5=oelTNtB%Er_L_nD@7g+WdXEwz* zxm~`tjx|uEF0{A)JjeQi^$1h_s^@zq4npvbinH>n;Xw%6qsgtI2FvUEG^6x z9z;KxZE96`@4Vf{(!&jYr+?4pSSKM^;O!{s`w5!i2%q||t)AD$_R$IOPJq~8V*|=y z;)nr_^e%ctx3BRYeC*uH0Eyrs!S+6s(EY^OeL**@MuA}K(cZf0&?igngMzs~^?v)~~Ip4CMc57_vO_749)N(oyu|@#8q2baPBc&DuUV)=jg+XvEipUKm zLDu_GK)k2o^Zk~FW;vK=alGF>-9V+DtcogRJa+wY$1w()uUrQ+`y!`D(up-V zW12Vn;Q5bpn&qG%yq$hmGhuRU0Xw>z7_;jxS-22CG|r8Zl@sXnd&oHAhlpq+-1c3a zLho>;ci_S6-O?$FdI@L0TPFES+BB7xjV1I>+qHR%#3W@jY$C8+L_$%> zS78k*<>?52sNMOTv^tI$DN`r%&*lS#0tIVt7S&U}^@pXpQF-O1CnW8Nd0&^A8Wi`$ zdbgXy>lcIz9%J_eVqvW9QmVtGy{KzR7Dj7KG(Y>)%p7awHuk4xf%G-c4pLpFKLX4! zcufk5D)jtCk|Dx)TmJIJm?W+Z-lNNV+A~=Byl>?^{z+vE{)%1nWkHx7?>*@*_UG;v zyXdqJk2u=9l%R8fF*NWT?Av(1c|yg4hNrjAJ!#Acx=KFHTarI>N@SY(|4<%DU*DIV zO`~B4=~zGkKC1U10LaWP$18^_X*;dGj5(HZ;zLPr+U+E~?%U#mmI8hL{oBd;0}jaY z-^{{`qVhL2X)pmSj8Jm?n|tu#LLT$!hl^s(pzJzuMwx<^Z(XCb{b{)iE+~C1y?Y}Z z%B*U7W{S0hppqXLdCCCd4-Bl#-DrI|HQUkaL>r_U*o{{t{qTa+ijd6G#f_bDqotRR zv8K zLN$Q-f)v+O_MOBvoXzIY83IR;YhSp1YZtr{*VEWcpv&!-d&o0I#k6&=stzfF+eA}V zd=}lw4jLYtV;T+1>AENb5;gXzUXPz5jXmBmdseIHhL&A>6T3YtKo1Eye>T5ChuiRj zZujB8+<1eO0=1wi1EUo(x6~+4sv2di@9|M0XTbm(|5*em0qPctz;B(L=Vcr?>yelz z0Z&hkAnlkATzDuMBMo3 zOWS(8AN{nyw%FI9qnWV2ZOPzAxbWRm#PZI`zRYJIp>Y`j3W~S0Bs<7(eb|s6QO+G# zgSmm*wTxnK?As=uR8z9HZmC+eeokWgn+fQ%9@wrd+vN}UKT?@*Iq{H9N1UzMLU1%c zW95osRN#n7k;wG7*TgS>4g(&C>Jb}3sowz)m$tv`(^p3omRI%tpHQ$NjKR8PG6-TM z?ewMDl4Tt4Z|%+0QldG=A{-6HjouVFx1mTlmdeM4gFf>DZcM)}C#-ZX;foG*>9()) zpI{Q!4>+;%*F+NlIt=E(n~nCx3$K|0c;mO+G|lw4^{o! z?M~UBCN2~=vmNZjL4I)DNPAcZ4#AVyE2l&%Vt2>T*t(T{rxL&iyHQxzU*go`PC|c-GPr9*4ow z(jTi8Xuh05-3ZjPrGK<{+O=3v8jvp(85nK-+;?O%hq_)!&8@yvl`vG$V(LySwx005 zS0e^5h58+>7iCQ6G0b+KdG?)CADZhAr$k1!o46A>Nh$GZ+jc$x?0O{$X!0%Ph+-Sr zuAyFs>^k=5mSR(2@1-TLTuv=<{0B^N8@7*}_QtxYyYe}0;DEOb#jxE1-P=S#Xmf9e zivqD6DtTS+6>}vim1r(zRbVx6A_5G90u)Js*ep~@*-O5ufc(|@;7T34H)UOF5;aqL zRi#@~r$?9N*UHRU5{DU0mxH{?nmK32W;@QtME|Poty709psxOkh6#Fjs|R!8>eL-l z3e5O5ik*8nKhY_EpB$3N385+34aU_VY3LxbxC^)P<8q~mf@NMBvinvxHmK`W%tVqB zEBdWMVsiN#s;w{n%&V4~uyxXInz6jK;r}xHGCOWFwvG3}IfQ*N=w8y1Z;T!p8w5i) z(LCDutA?xNxji14iEt@#`1`>_8SmGfm&}~xe+6cQx4(4ww0v`j<2`6X52u(qZ>SV` z@L`FGPWnD#uypm%EnRLl;{B3X(N%|B{FwXH6#vQnX~TAngkbdtF&_fhs}I4Gd&|!x z3YMpzH62asN#Xn`MO56Jp)bL8UjyR@;L9q^K1}R*Te*ej&5tCk6EX zl$U*&=1UHD!@I&zk_=IwS*CU%9aK=)GwX&7&xn+}JQ9)SJ^3a_%GERO=1Vzo7U7P9 z%b7;}#re7I_EITLlL0ez(Yp!<1?X>3tNTM8l1dQj*c9gxg@Bt4;)|3mx=W)s+wn`` z)LxVeBrDOV=?&Co%8CFKFNjYc^SApVC0Kc2p{f)VDp&Ye`npEd&n{P1Xg>0=Mat zVj8lSi{loJKe;4ctGa|>33%OuLMfPT?F5p8;a|BAJu{NJxb&fOl;SXSLc%ZFu(bop zXb0}m=*@S;Z7N+Pm^hRw8~NMBlfpq+Tz0{{5da@naA z@noK*${E|~r9xKWkeub^k*iA>XsPm9B&$ArED$&`W`L7zLb_2D1ZzKAdS%|WOIz-i zC?2%$Q_&MN>x_|c7@kz*jAy;KY@6%hDwa%m+t_edWeX0^PWZ66JyoyAgHCSU|5lx zi+npFo(ADfJ5Mz8SHWem>jzUz0Wnt(?%b&*4hM)Ew3(i39Ci-g>;JE1i_@aLkRt=N zhbYe7Z?cOEzqU7)Cn*@a+!MgQ!)p3VzWH>8#pccn{tWuibnrI)=NXv9Ae9o&^|N`r zc{V45-W-Gc(HDe^K@&kn%6vH&p>96|QUT>xY>#gCC(5s!#e4h~JiYM=pRLOllzq8W zxV>ckrQJsW_s{*HD@eGi;2n5y8KU5#O=D~gs9x>?J-krSC;`=yf1uLVK9uu#C}yL2 zi8F{ApP0Ijv&Q^xe=OC1U4ZN18jk@9SBeLflbc45`?o#;wSu@w@a#POT;D+#r zJm?}=`E-(~^sPFDfd}R`^u+r?HYgC-IT#nCt#zNNgO?6H1%ImyS&q*!R8SfR9fLX_ zgvmkIPWMXHn#=oiNTFMq<-U6 z$Z9yW#)MnFXB_trpLk^VucP4FyR~dRiXF-179DEwTgjva{x-Fw=1m-rPHoTKGX0D6 zd7H~GfvZV{pX|?Y19w?rsyF#p=sr63&e&XN`0DsS?7fLQm0i?7eheoWLK#aU$s9^$ z3{B>-WXhP36f(aTJ-0kj!&Jret=$Yacz&`@FyF`uzv*xvrj` zIQM<;y@t>FthM&ux8=;)XncqbI5I1EGmJ2XJVNjceoe>*3Dok3_*^^JW}?>ur*DJl zW|S8nFnVec-Z7@ra#j<{4r?Fx8c53aU2bVM-Q7LO1Oq;h&Rq5umgzMffMWs=L52aV z%E=_-Jxa&V%x6zxKHs~+jFY!#^dCrzT{=-!_>zB;Ui4duKF?CZwzXGI>!%6l`jc+K ztHk_H;w@^UXlvBism0)D~F{o#im-Z$RAdnNi;Xl08|U-02{$m9t+fCkg)LXxb*#QUf^WWX zQjYl=a@&o2(a`VwSCYT6Y_P`cbM~YX>CXH!HP}Gu8Qo8hxpSS5=bCDg>5)PS; zJ&B)Pn8<&;ADIsynVz{I&*bNUfNt^=3Gr--*&#K-OhP}Nmdw4~I8uv>Q;ajaKEsY{ z&M;jqy(&2A)m8Vt;Ht_&xHR%vCvd8D163MiD&o6E1*FZW6co3nF zRi{PkZ~@u8vfw<@C`f15t)MLpVc{SBo<{oZ+KPB4TRSB_Wk8yw-s!>neG4W(l1^TP zyIGwKsdZeM%4G2cwrPYfPr~7|>Ari|4f7D8ExQrA)tpCNf&pek#@`}Rn4}9>sceX> z@09U=HfXkeic(suSfYr?rrSeK;jzcKQbeGQc4i98R_HiPFXDb!hxa4kJM18IMLP($ z%qH(PqFZ-{&f6;IgN`o2j2HOkL*ST2aV*_*6Su==G>?X(TiW=GKXu!r3m(-l{a`np zfISX@dIv7)&Me=HQB_*rx9hFv%P&m&I`=;bJC;@p5C#4bgAEuY?lS6lD<>MEAX?{S zJQ&ApsP@RQ-{%5oy}R-;ELl2!wFr9Pd1;SA>Ao+ho=?}7Xf{1T@fL>ok9i+P=B?K_ zh28Z4n5!0;&q+BrICuD5lKU`e;|VC}>&huNn{Ma=UUE)>h3vE$9^sFZZsCDOc+7ZszvZ&C%ZU=@mon3rJP z@oYr8*R2o@-}Emh7asTIV>gAD_ZWv~pa4ZPn(%!A>WD&}-7u<1AYM4u*AiJ`lJl_D zjF$ponCl6k2N-|US5_-fDPA?%DLYZZ7*jlN-ya=6$py|r?#}CFvG`}fvQ>6)#7>81 z9tjv36?tU4$sEj-ZfINalvhr}nx7#p2aN4a}{XYNwbr1%K+iQ`vUT zo(!tQgCrVlC%f1_p)luXE0dmaqVt7pT=T4r+U`= zFG8lJk7_dQe|gzAdOBmMU71HW0|We#|5!m4Y#F0rkBadaTmVAJXRv*rpZ?J4AZBZ%!;52D9(>B2G?k2Nl7{$aX>qjF#CbnCiS6;-Cbqb>c1?e+i$UZvOrIGCWGx1Uux-RNF;gTKd|k8n&26cGP%&IQ77P{r23QTVH`L zKmwoL7MFO)-NfaZ-PO(X&)l%5lZ@58$z&elvOuHu6oCuBL*$zRzWF$p+am4R0mHYh znOE#CBb-7KJM>C_$7wd$h5h4~w3IkuvwSdWly@A5rdYQ;%x(=ryQLLF?|xg$>R#Eq6TZLNmf9QqAP7^lvIL4XRe6wVWd0X{(e4-7A-HC9WE zNZ0kH@7rHb?RC>V;1pyu6R0!0 zg-t|mgvwTy&=VOb*@p4|k&Al(__WZv;y#}_e2zI{kv{q|IO)a@nw^WmWB%uXTc~RMRF8tq@y8+ zA;Md-cG=LG64<$KNWV%X>rC=`(O$vyj=FER>8`h!qO4>RlK*PjPVD9OUozV_D_~$5xNByhhT?i>nP}Kl;LK`gS_*S-XLb#`~ zA+FuoJhF+$oZd3>(`hR9)C%?1xNk- zOP_4^{>Wzz+rmnFq(^An$%4o+axio3MB&eEk1-!dWjEz%Un92MN-Of2D4zm{cS+QD zjK;2UL{eZN4Kn?SA^aHT_mg?zT^ z^6=AFs6>(ifv7#^LyUPu2q+|BU*Eu~48Xu!?kdX6hM&?w%{@$_{I*a-_Pr_(t?fs2 z^jGAS?(AbNG`yi`))4%O7M5Ib21Y|+R*GC}4v_6-o-$P^@BYFt_MgP#@hPNV+4p(W{HnchD@h=HneIe7pW0MH z_#%&4I2UR95p-6On>~Pj>8m%V5MZ^TJcgRB^&y;h`=7)Eoq^H(UK|F78AFP8b#Vhl zqZkx(6#~auL@KOW{*1=DZghLVn{p1_gfZzt02?&$mWbgWb2&Rw<; zSSWQgqpBt$K~n(G-rWFJp0G!em)@OyS@r*MJlN|7H|9idfKUs)1-G1f4eoQlbqimT z2j1@b_I5QZINxLC)r^e_6gycACNEE&#J{!AN=0c*NZ_Y@mTY>p2dMswIS51ocBi8- zhrv;hH(%|+Nh188C_L6unvM0$3YfHom91?3wXpP0pz4-Ptx^m$>epbc{L4_i~ zjl%%{vCAhYVXfO1B^%rVeqEG!oH_FnR@L#UZSnClFGt?9hE%OGiPx=aMk{n?xf?J) zP)cXcOM|05^6WJO%f1NFOs{-;Xn%*xEC>{CkJp0(jbsY`>CRy8f*byw`u+Rc3v{1A z=qnQ9YoWEqKgeB4am-(BieAg!?LVso{N?x`21ssTk8}mfZEjvZ3#o(5WXXMMUvw5g zk~?DMAWgIw3Wy|}d^3b#0{GdTZUG`#PwV;uy{{#gu8^9!UKJGJrsbMeKz^tXxE<|Q zZ=pwQ?=)=T8;m5chm+g`@dSg}Kn04=Z}hVj5AX@%J9=1>%kvHyk=Ab&9)Bxu$rW}~ zay$D6IK2<*cz?+iHXmLgsaq<}7i6~u`+s0$pU``~9%ImP4N#!K)Pa^4Ksj+PQr9mP zO1~XNMhWZ7THysu1N&Plp3l)AzfBhss9Xs(iCGya;b8P`{+%K>GMH_B{Mc}GO32>~ zb*HebIM&8}y4$0x%w5X=BsOJ^ss2O}EQ;WqdZhk@;|WW4u-Zd%zmWOW|2k|yQjR=N zm5i!`xY69u12jClZ;f)9)rP9i%dUJls*dureWRcCByAjUo6 zhPm>oMsi>S3;=Os=EzC79;}ncg_4)5?1)i-Sky0Ml*r=5svQS0GIgaXI)y_3fbYLa zN>G5RBU-k6_WeX%4!~F8h$@>yHvGcGeETK=$rm9 zioH599QTLa=L#LBPZbEJbd0b>I+hF&+eS6r;YU|<^X%a?iH`}# z<=_Z0nnpGNs6-ea)fP?pn-I*QLUx@VYIqRfE^?yrxu?F|5Ox`<3G_YPVOcWb|1S^V zBw^MLLjDl8ZTW>yBcj=@a9`5>4s0ApPZIQ#5F5B2HFMObgU zNE%<<41V>$@F=}=mjU71NUGGHOXuLICssNY+|$`AqlvmtzhA z3!@wK>QIKu{kM(+hn|M?x1pxtB;U1I7lR+`RJ`2HaQ4;K3NG9-By+It^8(cL z-e*&E0Vht}`6P!{`j!A3wRJH&U<2XAFm%C;8AXg?p@4asEKhC@7=&1{I5GdfElwOa z4T1}JXEbVqW9A@h!cI`L{m76UH0A0>Mj8P}d8`UL$TP?EG&21N7$`k`u}i;-U(*Uj zEw#7^z9XY0+qqC^CARF}>e+1dBtoF50BTT$OkGV$MLDUns4}CWBQ>_<<8fGuiGn>4 z&BByhH?!vbtk&s3U}@$Y)^#_4<=|R{5!V6sK}HGgW1U#uUQUW23+VQp?XbLhMxt;x z>TUrtH$XC1)kj#4hkBje9H|>(3Gmef@?ICrlr=xHbxBGUWvkDtnrQLqjo+uPV+j1| zps*!Z&Agl@I2LCi=<+AvEjw32CQl8)UY$ULtrEt6m})MH#25@Wcrvd)jO64yi(@cj_U9HQJG7CYdPWnDl;22vv?*4$4HH3L zSGkqx=AsIN%+tJ%eC6i|VMc7Isn>y16jES-8>4DR#2gbf+v7tm@5OnyZMiBrfs8@h zdOb8C)vkNphck%*7GqgYvvqttf%ziRiY=#2L@VUpzAZ(N4{l>kOk7NGp zBCQkN4@+NiyP-nB*79pi4Jr*{lxYg|2a#v6t0?mlY8;!SCMAx^S@H{{TfsUx&-gJv zUaomQSE9$xS0LntQ_GCXHvt>r@qcfCyASo)E7A{7lpf%%5a^1zz$mYCVp++59{BC6 zU&>alQkMUO-y@qrNt6OD*C0ZTlx#m;KvMEuV&PlzAK$SV|KZu;h$fpkhQdMF4_PNJ z4<>t^lNI}YP%G9y)JEm*c>a#6pEf%Aa1yKnr`XB<=O0^z-|0V7=DEJ(#Ime{RTOPV z>@$DEW6vlTmUG*WH?HbSSZnYXBb{BJtv1DJdT4`D8(G01G|^ZEiK;A374N}@vn|33@_XKA*i%+kMvb7 zf8j&e-nEr``+3~HwJ6QQ|0wOVGs-F|oU@x?uJ)cDx+tp7PG5`RtZ6p1FE^0`Z;VI* zg#R2!5ia|-wAj46x2GcZT40}>;UZeMmX|?r$%X+*CoG** zTB)u4Ip)Aplfudteo{Ya0B)kFG^<_I(OMImdfKnek)z*KJ>OVn8$r?&ETO4&KTJ>o zhJ+gln81Ye!>}~MOAkM4n%x6~>Qq@?j_qHS*A=Eym~&Q=tDF@Z787AuSP8QCDJ?2I zeBD-Nl1}-8-*&eTiGj=cLXvkc;H{OKS7>CM|B9NyOdJX7QC9QKcka;tuGa%gQ1jHF8Caio za7F1-hQ1&Qjq|XwS(!OSF-p#=Okgml z;Wps+_4;rdJ>wYRuq=@>b2<8`Htl@R@A*WViuG4(EWvcuah9C+`c*45obNt@oQuLK zEJ~L$Ht z&WOz4n%L8Y)GER}3aiT?mc~gY!zP{7VS3|Zm_e(1>zj$bYL=#du`LLb}KR>p3$;dYUdcCKh|JcD$Y)>UNbgCwa^!abz10Z7p?a;vxnp$^bG6p~>rh`HT zmZEk1^>nX$HzrkXcI{qUp|Sd2F7-=y_WQ?ohOL(Fm>ADqu(T9pX|HZ0gFS!U%gxso zguybiXVeb>u_U*oC;prL)DEA-t`(XXTjYjChXX^UYu1-B1FsNQ#S--n0aHV@b8BgpPW)XyV>FM<3_cs+=t^2mzZ>tZrGi&!WPRk1uATda zvH2{7aBm_R5R~vW;9CB8=EoOfKjd;4(@d zk95TBTbqyB$1zZb0)sp<4apN^Q{4lm8VrAC1bvh?I6Mc+$}Dm-XQ!j^Pnwl}ui9d* z*fWt?gbm)v2^l<0Q8U;JFo%f|N7^hC{X;2WiIBi9yi{F0s+rhma?#k_=_%urk<}|_ z@eGD+H8IVZo~ z0j8bVLy1#Y#WKY$4{;C0Uaj=0uRoPDzE0JccyZii&(I?`bxO$qz?~8gc{umxEV9D~ zyW;}iM{KgbU0@1nm^>ysG_)I4h&cP1Bi*c1Ga#Ai{P=iiCKNH!SSXQ z8iCB%j7YZduiqC{!sCgDaTB>QUvy8KJr@fv zUxO?&#V}zZH}_ondW%X~IAI1Vnp#M9aj~E?$BD$i_h!dw-A43Hk}t@8N{27^OMZ2@ z-zi?=E8OfD1)z$}4IcNU7%-!t&d5>kGih{bpf#$A zqZ%md>G_#ybe#gu@}&J&)jBr+qR`r$ldgdCb!(4e6S)s2#wM7G!Dngv!O9E|H6NmA z|FY#SX-Hb1Ag*(GNxySGSj;66r&sfx_|rAT{l42JTMF-<0-e*wMwpBjdH+h7e9dRL zMgrlKW@Wld@wGi;-xha^h@!_h9M+W~igkXw(+Ld0z^Bz=I|`>q-~A2D>Dxd>!g`LKUK;io@8=-T2JnS= z{&z3Zl}*sK&-|<_JF=xCjsll@)euddV+S6Mxw-^|dD@p8E?%{vsJ`k*Cu$p^9R5=F zjV->`EUe#|V8k9#7O_d$5Uan3I>Xu?LX{A*qo29C00Iy;F>0%45hls2|N0P|rghIS zyi@ZYr?eX~b#6@s?NUCj0Yq{r+)B0(P zk{MiY>q&tR^^~*6yuuj!B;qNo)Oz{Bjo~FP?DSwRZcc!mH$Xmy;W}k{_3qE!<==|J z4qQ}J_$I+kVPy=?wc4ZLOobWnGY0S8m*t(z9FtV_g4nGt9<5EO`&B#txGqdAGu4up za?EUeS*W*9eS#BH?Fw5^a8FFoHA~=MW7X2^Y~mcDRJyGw1YeL-qb>6E7aWTW;`d_A z62(M)0ire&2^gfjNPf`5GUK_JZKA1ES`W`+MJ{DG(1jK6t2DCC_6QAYl~oksrm`|d z{v^W&(`d8lvy7prxx5tz)^x-~^mf$Kt+?6_s!Z%vmgV8u7a59?%p5bc_y3hxNr7>Z zV+V(_`$|?&Fj5fVb4Mgw#n_j6BAzIVqn-StGha7njV4lJK^C(uh*<;N)NX=-aP1oa zd>NS`#S4k*asP$JSL#{jGc{#L8dI5={%ERVI1T@$AwToM>Lsa=1-~TeOaToKz8myK zSX80+(eFUZizYbPFVpcOzn-`;E87YgQDR&KV1TcJcVDyO{~HfF!`;KnSZuKwz&dts z6QwuO9LEUM0;=qnhd`_{!HmHefGZQ+CxEXmq!$(D8Ov3t!vl@rk8`S}fwz30zxJ_X zk!%S$V7uW{tf3TII1g;nrR-=RxXO#Nv=ox&rZ=4TX&7srwP5IhDaLIo_yKd1c~gTj zlnG`w*JE{S07%0gOFwemsd3gG5k2B$(?xI~j`aMh1!Q_@^&q|>?Gvxtc{}YoEOMeNmE0LA(?en}#(Ri90l0dG#_SNqh|vlIlh;)5Q*(Xo&Mu=`PnYI( zf{tFYKv7ZdlSPh^+%|$_iMkWA5V$XTTwgrK?N{D0yyiq=2F}FwqRHUztJ60NiVDA| z1zyL-><8NPFfSXxrWk@+j5s@cQb-hk)44=Hu=Z1{SiJO5Z(C!w;VxTzLFqm`Hbxd* z;1G$}{?xJ);Im(9N4D=h%El*XV4#L~pn{%<`IH53$V}GOa=bacBYWnhr76bl4wB@a ze<)E5H&UOjlttm4XGM@#X$GoS)zOUM^NA~^Pc|oF=u>ZF?Cs4d(m6k>!@E+WKnB0( zZZT^0i4Oa@9K?>rYvO2hf}5cywnZ-8hu_XoOdWgj2ewWJ=OHs{aB}#)YC` zXNrPtvZLG92FpwOq?J%rf^F~La3xv_n8;2K6GHS2V;-B9_FX%4{b(C$fhL_3@bY8X zMdTO->fjjl3>JPu+*D)Q*S|6OT{~-S8)-@7Hl%M-W%_v}^WX$LDTF+MCLnGFDC*cn zAhmygAnOxDcl#DmBU~xW>u_~}kSHsHT~kdi>F_cWIeJM2^dK_{#Iw3m0UqN$U*uv~ z+JTF$dy!V`d;N;yl9Djh=M9A>Z-zJAc)Crb>K{RIzCm$-z~XNphiQye+ECi;ELGnG z16J<~&8KA0G)c~dDQUYFxiebH{A(-5zyh?=AZl3k`HlPb&oy7>jL#=VrWZ{<;@|Yb z36qsJ3F{X_kZ9fKZ&7J|+q7TORK;b0kQ5Est?EsTFd{Hp48{h+mL&e_bO}cwCdRIw z?mSov1P!`cOu~z9`MP^FzNt1Ch6eOi79f$5yg zjEO1k7DnXbf4;{|%>pl872&4H7|O^e{>~_x?B$mRK&Z%u`}U^XZas{-YYXL4+yi9# z79=Esh-zOZ4ON_oF=B*~j8z5g-xnR#Jz+HaY>*|QNoj>3dzt@rAtou}1U>!i-g>(B zd_y_R=cSKzd0;9UWK+=xQ#qdjbJ4WN`PkS}Bxsl-~ux zEH>5@;XR$dw~z=7OsA$bc!;L-5XnA(2R&YD}=9inZyxpK4yscy$2Eq!SqqhTixs}CUzh7$}jgOqdJEPdfG_%b6 zAIP^$vV$)CT>XpzVskw&A+r%qLRXN7K9ufg9aQ{Td;TZ!XZ`}l4Q#mo{t^DTQj)6` zm`&s)0;QO3Fr4;%}3S^GKhjtWb)K*@K?)h5VE)pSz)F zIN*47dbNmu(;p|bI_sk3TW*1Z_Ejg3j(98;#_B41Lai!DwM?dLX{~kVK$)=U>`}*5 zD&;67(q)%QP^`YOF(t+-1AX_Nmonkb-afZox1PPoB%GJsL1Dkpw^@zM4^b3pO~_ku zwP4o<9|o{^r-{9iOrbL~qstW+zcy0aViOu1w#&rphBCEnaTHdKhu~J+U@}+p@-Zxz zY=d?uWfFwq?&5Y%E#6$QvMk;SGW;iQ48#J_&lpWXs+tP)taPWA2{sH5uBT&w+j2J5 zs?tpS(Lp9-C|yS;aRch;f{#Q>txr|s;<Vw<}r)z{^*VJibx5`VS%lMJV9 z$SAb|VJR7BPJF6M=(2xhY6F$7EcfkpElIo+N2wXyL&IE`ek37{qFjZ{CowHh;?&bd z!C>LiOk4lcP>e3j*BIcBOF#S$f1U3d6C!ILp(eTkBnM(41E`A*Ks5_Z=f>;`fX{%s zlaQ$7e#aYoZpnI$yC`k3+!Pl&N+^8*9Z46T@xDWwn@eCd1$_OZeMR5}NV!eo5W#G9 zXheo*-xPR+VG!8SgE->>SB zCqjqpt^n+AK#Usi5wUJy5+t1abh9!{a-pO=Vtr#U0GuV|Ti4G_OHtr)7!`yR*B^oT z?c3Ym@5X-XT#34cIn1Zo^C4HJy!oMcDuKcBjeCEA3x6K5&TtqQ9;6u`V}JL73;Qj~ zNVXWcBG2+CBc9+LIy^kT(%k$E09=F+C_^P0UWQHfWG^VL#n3dLlC;j^=ZIp9IS1iL z;~wtDG7{L{ErB_S@d7?0v}+$X50#{VZ=XaqUXps75P>KXSE)PRjLBEj$hnVc?oK z6dz+~m`EnpL3yb3P??>nd;y-M5-IR(CtNdrI*k{1bwb658H3vmCMih;U?t_GUi~e8 zird5l>X>lR+Fhv&?#DKy1oTY6hL108>cRZeodA`*$Zk~t!;9gD!UN97$~NdQipdqG za@27(14r9US7+w$Xsm+y|248J*F8FNqJa2f#-Y}0<#|*CF3Tl#ZS3ur-rZFEDbUni?Y^Kpj&rfQvi8jS4y^k(JY>@xq0&aG?}HjdX-J1w+jgn7MD_=394$jMq^k zY4voCxt(niqXdIeFx=+6`Tcsb01_BUHvlNR78I-Yt^evwj1E;^JlbYy_~R)fBbwwWd>^I8uWdfn9q!a!TG~Qvp9LzJWyE!%5dN}L2M1S^%dNm*0GtqXw=e$do~URib;U(#)eeP`fThV64OJ^S@KblFq3FwPXZ1Yxz_*)q z2{KX0$3-D@8oUD}-$Lda$b`(vz6zm2DRB8w8?#@NPPh?=VRX4#Ll0M^kfky!u>)ZFDp@egjE>FqHw6{v5h7>-wh{bhbyQ*= z|LWgZ0wIMMuG-=aCwTSFLiys527O2s%4xfNAD3I?xU=H)dx0bLqnvn)(K0d^vPW)3 zESEi({`Be~8a1y2*WWN|lOPd_^$am0nP7mG;Qhb>5uPYHy;QPf4C$H)*m%~)Tri1} z?@1`WZYtoV$V@*+#UaRT9zwO>4Og=*ty)N+M`|F($#R-4mlVu9#z|5@SN+>)ty zXP3}Ob{Pqz5W(7eppdwle2E5r!dbB>)k2fKv#QjHM@%V~;9mZZei!!kU*{7QiS zkS$Oas5t}`tS-AKK^Y0GSmx22f3RE+$!NV8sw9oU$QHWRe!ok#aXAi{jx6ypoy}39 zLzy2}L{!|De(-MLy2~C^S0Xc*q@(Eh=N&t_)Su&R2t7YGd(&jgmLP`fq&-0mBU_0i zA~oQz;i@KmK+EAsKsFy|KzmM1+82$Xubl8k5dcMtm748ZpX&PbO9Ger-SWK*T-y@Z=kk9iDKsCA=MSF_eiruVlI z+xl)=ywIUp1%&s`fA(Ge>jU+`(`R?UbqkX>%a}1N1xpjoQS7gvW1$OkjfI$0cHVTr zArcA+r4Wgn$)Mea#-{@T(g{e}gVk#1U!z`?n`Q0{37Jne^NBixdkF-aRD3*9KW0}w zq`T(5|Bx?-1q5qCZy>K1tkVFD&=p_*7`7l7_{VjE({D6`g;}y3FpVR;NBx{=Jl+*O z*Aa@wo#Ytfa2e|1{YQgnCqbku@Ytx(s)4(?`IK7kszkoNnMk~D%_uC2Ve`6Z0-+@V z+DcZCU^-8!iO3qY_Um0rgH-DWp4q&{TQ`yXhi&~`FcdF zCvm;2p?H=RmPwXW`P}ykHxu8Br$ttt4Mh`jhY;d_j2+03UmeC?#P0>L z>hl}TX|pMCIdP$v!%~Rbw!F}Ef$5V}c@v`*WPx_;@xwif#%t8c)bF>w{$|xOk7(z^gx>Hnh z)rvk$N(YTY1&sq;&EBT8<3_RzYs^KZ^)Ats!Il66xtdI4H%dyo?t#qqcz7|X4|2k- z3|&VPU91Bk9p^mfbXI6)O@hSBu{ji2w`93_azf}rQN)OcAg`4o16}lt;A^%}qA;Wk z$o3Uw6~(x*1BH>0dJRNE#Pogv9i1%xXe7#|>csMJo^@xEc)$M~zE%HKA9%$d#f|Hs zo0UpEo6@AUkH_Gy6{w5E?<1H{X;A+c!VrH{rI0yf3pt|<6&{O8xH79vlAqH#EA-1( zqCAFYb4qn3Ldmz-J@R!SSUw`64Si{q(*nh&;J>L-6r*s{KCR04gvd4EaDfF&yokXI zVba<)^1%@?m^3Jf5Y89V?r0kgv-A-riz$Vt+(1s_2C#!a4V5~7Lc^=1gB_q_* zZ7Zmce~;a9K1*X1uS!n`VUFRvo_9|>@DH1~E?k$CxA=l*IMi#N34%g^48%aBM7n-c z^iL}_<`}pn!}Zx;ALR8-arMi?Mi%uv8$V8ME=NVr6NyX@<3nYZ*8@UJd;y(#RZA-a zzdyJ)?vNTt`NE@l4kwHHqw21r9qcu~B8IEM!#-zF>Us3?=K$wDK?4gN4ui6+_>uce z1V-?rC(V@qzEnY)XPV!LCZ+k!q$)Mjb67MkS*#AjKR8-`zZdjXmHqDjn7@lOwK*_e z*`n_i7&&^!z8Gk3(+uS}@jKDmnh!xVUlR zm8HRZbGhx3N}&JXiNpuclv6K4i9eJoATf##rF7ZnBxnK%0fa#nUCq@KSP}8k5Ne-N znIf38JDSgw#a~Xn7Cx~wHyEI((Brj95rggh9ZW}80CX88MsW{XDP43*)U!JvNrA@D zgCSx;&k4Ct_h{BowE(otWC%c!#0wtXtmQx;dU291n4K5 z$Uqyk@fi9R9_~oj%irlZ4MF-OG|()9>A`SNDW2r-os^DgeCQO8q~3B8UPDa>brTmS zm|+p{J`Si>zp{`rWW&+zQRpuYA8{QNvT29rM)mCtC^E$$p0pRY@LAhcs&2 zx;#6Q9XI~tFOb~E#C^0f|LM9qivRe+y15SU2{F65!L14s%|Mxu2skY(bR7nIiU4;b z%6P{2<8n&lH#2TIvrV5M&P?u_uS(e*5nd;^mS$gl@%E)pqdU9dJ$j$KXhY-oh zZB@?Ts#3<8GY%9#D&Jhsf?PLH&K0bp@~ zCuHkLGP2E;e{S9mCw}ocp+G47rrxSCuOI&EARu=7anPK=3*ZW(b)&7axCieofYbp4 z(PO(UwTq)w;)ogN`n1u!@v~(A-|Z83Y051;bt**oU{v!-ov~*OaOyt=R?@1%E~=Lx zZdVLbAjp7jBoAxQOD;TzPW}{YGej(C7Gcqk_5TTl6mf5yphqr!oBT`8?@%qmkc${9 z;YMwNK`iMNT*4q9exe%}n;^N1Fw2630gMcN(gk1+4*~Yg)G7;I zpbunqMP5CdspA~^w*z0ny@j%2hKK69Q{6Bv9w+J^nU?-~2{MF_{VNco!4-%Z{60!` zc%nYe84O|tTEJU3kKqPq(Rx(%;QmrXCTl3=@1j--k)xVu8$DRzA2cP8xcj&v&v3IG zDEAX((a$k4$i=i$J6cr^Z4}`@67{VT+#`ClTvMRDqj_)Z+P9ESNfvDr4&_68-2LIoyb~=m|iEJ}wqW ze44k1h@-!Q>4VQ!6C$ALsN>q(1z-gqyC>-S92l`|9hbUcy+9DZy68p$)4O zopoCy`~fJ1JFjP9wY``BUBvozNI1>q7|0iDae=wZ92mv!D~Ncx8#yt2yv5NERl#6P z!==MB`CmtASKldYl@OL$BL5z900JrQFgS_WD@(KG1!K#YWA7sTB1b%-oG%==brTWj z%3%a9=T_u~2LxFkq+_@+f933;vm^9J{|-6^r7?P+k_dX5m4w+JwOaR7vG}m?qwtk? z*Y=X16a)QNjO0N8c0i9qnfxnt1GT}}15LqB&^--<5f!p-3BYO{lw|ergyE(1RLe`o zT9uy(6UW*lgx1fmj@~is0w95pWFmYi=@vNlHbITzlLc;>(0hL+oaxoS=sDj zJv$t|O(}4MC>@|E+*BK|o*pur9`pFOuB^Kt_m*|bpjHs!6O14%dT;a9H0a&Nwb_t; zV(X6o_uv1M!2e0$zY-uYtgc98n=PaN2*m^QktyduOSA#<-zAcU>gxyYg>5cG@dxWJ ze>M%LSAVX%Y%i~_$hH1H>gC!S%Rax>T8apOX5H{iHpCs`TD`U|myu8+eT*_)O}(AS zu(_a5VI2nngHutX^6O`K@tX6mkQFy#&l)_|y_K}#4=)QV>pBz<59WlnJ3L*_1YHgb zF~iq=RwrrOjxbe5Y-&`i>RaL5mKH3{isME)2GTkM7aL+b>r>&uI+_3$KrkY!9um1A zG_g0l+HERY*Dp}K0mHl&#YlBQ+Stf=?|LDZC`2Ybb$|-x8|WeOXWkcI%V83@87wMlrO?cm8AHB6&7V~xvYH)gJ9=J?1Zb!M| zzmx6UyO(a~#V%74H+tRpvbULTTK}27-Cxi1j;XS6UR$c)t-L>A{oUT|e4n>7k84Y9 zR!-$y`xfxv=AkOpxlwQ0&Bi5T*8+<_Gk=VoCAGWbKm0m&!_=X!bTOY)+-j;De|Bi? zN}H$ZLZ)}9^R@faZ9eOB1vHlW0XzW@HUcd;TOC#wUptgum?A!nJ(;tev7xhXGMn$x zg%!DPiD#!sQX$nhF&G-On|^bYe{=cl^7=&o=R)%jPV95X`8Ic!?kLy(gQsYbYR{$Y zKYqq|Ki^%3n|`6aHNV7OZ(f+{qY1O{nZA5iNIHS1-!Nd4BX2TYZ}OR9S~&)=2? z-|w5GtjM|JWpBBnS@3F_{nvi5Rx#4y->@>3;(2YQPN1K|_g|!A*Oz?TJQKsaRjB&+ zXA-3S9@bmq_ji{@7byvdAC^e#O&j2It!2EtsmPuAY8slYS(UZF3Oe%cvF($8bbs>I z^rm9+8iU2KuXTR*_X}WBzWZ}wkIaodrH>v-Xm@Wow|RDg=yv$)OTy7Se%kQQJo=b+ zNpIu9lRnp-c16DzN$U?Ye3lYlJW7RHu><3NU6D+_O%$VR<@MwwH@oEdkz0vtW2Qmpf+eObrwynMS zaInOtSNYIPd+>*YtBd*l0v$@6+TBgh->&W2+4)31VEA*B&63G{HKCK@_DvNpJk7$c zE5I`t(RoB)rI^x2teiD8S7VtC2X%`tz_z%E1)mWHsTPp8y?yfC6Y zM-1b9d;y)=tQIQ9K{-1OmtVEy$+sttUO67$P;hXe-yRJ8-GfEd9EZ~2^tE+GJ0sAZ z)9I(QK1~ZI(J(z3v|U#+(`4!teQ;0?6(bzTXL%rV_WG!t1WR-b&llfsXQ%q23&2uB zOxrvKwYz)Id`L@|uCb@nv00LxzxIPZNGs<@_N-NM3O5uM5@ARo)gE0gR23(;29)QT z5g76QLwmCG_Nb=Z#)$WC7V|4LGnGtJBfdu8&wZUR%+KcpN;vu(ul%CZvr_hfIVYSr z#CbRfi{T8zf2Q<{-x?C%v$Qq)8_Qg$jGlPSE0s2Uyb={bQc?bT<4a+0Q}Vrg1Fxms zjl8YIpu-RnhlUaSnNk6azw_vIubo7?7DJt-oL$jB>R+{1Ied58SDcf&`PfoE=Yk5p z=gv;%WASG{a3o_x8;2f|*6VgYf^GV^6n-%J;RA&jJdEJ{@qJf|l$4fpi3g(dPKn1v7DQw)3z=q*9#Y1m{eWAE9i zG|dJrq$|!Gr5*mTGEmz8>UXD7Q0!^T?Oz38hTBEUdSMV(zW<8;cKwRo`-k_n&rO$$ zv^;D4MHdUE&fmckPem(wjqeiIPLfby&r=fV*Tp5@uI0dQNBx% zJ+f;XWBtBTQHAA$uBrzhn&gm$;MrT)3y*NVSHYYROOqw2uf8suI7oJkKaS;7&arR1&-B&&ywK+P&SrD>@$1sT zJL%wBL!zS48(4}ALmL+$Ib zP!(pwZ#BOPx^)loN|rEEPTE$QO00S9oI5{titV(|_Ct-^%C>oDH!{jRD}tpUCR24TEsdIvJuv1B$)(h#6*ZVU?73vF!uxB`e^Md& z&qZ5LqxTmj)*5*~T{(JvY5!e%(Q7sw3?@4YH-n>g?jP6g_7CYT&*L08qQf`zD&$_w z_k~cZC*RBmt6n&ihMnKj4O6s9gFUti)i=BPqgY(WCv8dR&#dn)jd1XcxpDt|JyT9sBGaNtg~gxl#eAhO^ICip zCfa4sPRnh^90_k~3T*YQ0RRS`!Di0^HVxch{vD3y6bqN+H-(G_;Lqo6Z(lS{4sYFI z75o0f6;tQvFB;E!*_A4A;&ZrhS3d z27sx-a`97g!+#(JQ$SC6q%n?9yyTT}WR3neUZPWm9h7w8_$g(6Ys#zv2B za+;lKUJj;F?Ke;M+xFUDj-<%fmlM6QW^-*2=J76XvZpAc?DwTJO9Be%Y|V-fdVPVw zk>BKyD>xeYvn8*ay1{kVPWuOo$#mA(1QBbz#FNtGb!FqhC#`H&2p>ONmmWDMT+m%@ zFqYg|qRMmaQs;%iw?og{!2tbtv&<>ZSLcM8N83v6y11u+cTw{y!WAUOB4Q6zuKM;Gpl6Xv2=XIReXHUt^JX|}9k$8WD(v2_S%6*y@-_$R z>xTDmJs{Hc>#5kp85w8T3GVQ|_9KsB;kz|(-+?f&G2Si!=ldI8CNjIPvdfg+A$#WD zJB>|W+UCvD2Sw&B?7Mn}wi!|K9HSH<(kbcnbzd#(%kVmB{k=Z0MQmF|$aZsB$#10x zp;}K>1%4ErRFQZfiV%Hnxrfm&`DW*@r=X>$+u1-}mmw#m|Td$Kroc)}4@tr3? zm*|yJKf9)i`>wSt4m6=V`K^!+{&dIhH2jtWv<91a(DA&&!tO#70I(aoZ;J+n6^z)5 zhi#0mWMzRfc zl9_YG_|4g?wr%>xL4msrWf(^-8m=i-z7wY(Jr-TuRU!9h=Qg$)hieCh6!JDNM|NHs z`^Ik0uBM;RZo@@0XO$Tbt$=}aWML<|(Wro$?Aa+D^_0a>21BI#f9+j)Je1$to;H#TFV}UQ^PHo=RhU&{Ion!r6d;bVX!$^$&V74qV!1Zeypk+cx25p> z>Qs?9hS-lxz-LgU9Eu>il3zlHr}I9NXMZ+(@$8(}(uJLbdQO6NjU5wl3{MzYQ*GT_MrG2aDl0P&RAd5t|6yHi9IE*6&8Na~Ty zpTFsg3Mbd|Ix{%3l#UL&4d2BcZi6a-56K4V1kXX;xs+uLwMA!8z z%g*NT4>ZaU+8ANBfKjYdH zwbR-Bm@jd!(R?!n>LThwYBfH*sr=9NF#m>qUH0W><0fK-SdB02&5i2C88I%Aq;b{L zztL!%j0yw+ayv>!{XsUmD8QCbVDS^JmAmSq7;1< z=ImUALxV`Bml+e?b3>v|SUM@1bkO>OQU>t&8wf=Ex$%JXv&1uJzv0;(M5wIY_YdwY z@6xQo_vt3*S?tmYN!1zQ=wkHy@>>1^87xs|k=wM3 z?<3PF+(Ok~V21&>)8e@qv__8lGZF^VEV>t(6gR|z7s>7DT!#+^s~9f4Lb{DKDZFtz zRx^6x+(N1@H7);Gg*@?z?{)}@2J?{32$;bQP8gCgLS?uwv1#ag>B)_2znoS%=lmHZ zX?a*_jP_x%-Az6$;w!c6`rcVb+&W#&!#;dDiFSOY<7qRch+nwcxxZNH-Foz!GS?Ol-Cwkuroe3-DAY320NH}B_6$TZ$mY4h z{WPsS*`HY;sx^OIZR}qk(lDvTBQg-Pm z!ojn%K3vv&#T8-Ty}%dM*5^vHPV}zju5Rn@oc*f)QLGGbC2;XwrGagg zG5T$CntGxd28Q;Ezs;PtAt99bbD+SeHLsvd|GJN_?4K^zA9jY<_&P2PJhwh>y2OOy}qc$Pon0p+4Hl_H#swT`ND;dkMSwu z#j(JEvD1a03y18LV(f;KxXeO_LJ=}ekC5q}u)Q@Y-w%pW9NqjWbAgDQ$39na`_nzQ4C5X#h8Rjc5_u%qs;wLKk|(i#NfK8e;Gs zS3apQF5{l&?e@KN6dKQhpp%FKhwH<4ac}z3=s2enZdP0Inr`8m(a{5Ey8h7_DW@;z zsVK6(A>R|J-ulq8=Y)XJCPLH8T13b$hZG}ORl<zppVy{@xSikp z8`DB>Wj9DleJLo@{N$q4i*}rmM%gckyAO`Pr_VV%K716}yauDKX1i#8ROR-SYS&0+ zl?1DxtP29FJ&i5dV@SOD-k|KDtE`}EqdmRVvWJ@@-rAp4TpD-IgmI zasc*v;l>+OZ#bwapwk3aw9=wW)3r|r{V^@KDTQxjAI0SLAY*Jvlhyoo=$o<%8Dr*p zodZ>q*j09mQqS+!4=TwY!WiDbb&LOb(ADH%zctT^_;Vf(?Bazwr^EjYmJb9vWx zaCA%d`Nqqqs}8>JfeFFJYrnQdRNilHjy`jKPxYrLd$w$$8j+PFfEAiwXJ7qPc(z?#a6**svk}M zjWM<9GJEPG&L23)xcZgtv`O|5@Lrfs`yc5DCnM%qmOmhy{oFc>of2q{F47p*8O;O0 z2@U&)KG9COgIUyB#MEWqdNJGY-D~f*d|*)Ut`LZK#&^ZPDrG!6MqC76}6WH zXZU5JJ5Fmvt2;+B^716i3+^7RKGU@q&Obrg;8N+Ki&^3))6tGXKjio?m)R?`KHANJ zLj5@x;6~r6#>*Gi^-T__NaF!iiT)6Fh*w(UAFq^1JydJrEz?rU&xt}+BqEYX6Hd0G zvmlm3$iAQAF%xu86P_Y5RN$N90`LfX8aamc?H?PryrtgB z?fR+2`|gh0Q<3iZ6W9>LB+$2NmfyzrQ0Zu+!zi`cDgid(vJ2lL(f@c2+C!{;eY1CO zh~BHTj;r^=)GwzFd~%DF9^g)K;fjHnFv>RnBEF`rbw_%W!uw$-%-D77GCZ=1qgi_LO@i0Vylwcy$Vt#%bSE6(- zK|lnG!1E{wgo)Ci!Gol2dd-oVZ*q0z8HJ}EZ4cL6`xr_{GNJZ2h}ieHHoV}_)^jw+ zUu(mS){9`|u96hak5lVz6k0aU<>V=4tkXlv#vhyn`&Iy9*Z9~Pf4H>_79uiEQd8Pt z)@S7ecWqU!o){r^^wQh-ev^2L2{kqYb67tYEKKOu2UTdben#<$A%5+e8fy12jv-N$vuGmck$SEMD zdevm8XwYjW$WL_by3Xh)ggO!iXy=pU{RL3t%TM!aA1_y17k}&V-a~)J)%UkEwBOT9 zg_-x-{3E0-qt(mUZ*rA&ESbltUml-^wSQzxzw>KrGd-M+Y+QBX_oIh@b*xFu#ivPF z_fs?i@a~D_=|%zjCxbRgj5*h+M;gMH^m~kq^b35XL=^Fu^q}5K_JvOGu3xN6^qBGU z{{6d9BN)gdF%L+a(NS$6<`r5~TxiE|O%@uBM~A0xnIEo!UK4~sFUWrkUgW*YupcW_ zS0UUj`AL8sA7I>9fa0?9K^wTwD4vA(Rm^~m>x52qfBp|Ci(!=objmFZlPoRCPY zbuhtLq5vuY!yM%^cP8b|Z5*UOt?!B7@CMx}b`!U`3p_dw0NY6%Hh5b50thf@iOyuz zZ>E<62fcr^dS9}GNAD>be9(mnbO!MhH@mj0LZ#dL5@PiI;*{-+nYoWE6h`Q+}i*o6k_~ zb*sKxhFv9D^p-vO3tn8Hkq{Y@-E2N*Qf6N>VEb||QFiFY?yA(ias@uC+khF-=h?>0 zt{c{eHPSMGsC*5vXSq>v3HW|G=$bdOk3I(B;ErrI_eH8){mUhuaaRb-J#goZZFu8A z3=2`0yqg1K6})NWP8pOCO1caEVi=+7vJgDe4V(0na|1B?_*i0#D8m#;*HTEi9cw55yZ~92 zpuIh4@rkk& zzI!rLMA*JvQ;k54>UHJ^Tt_&tlD{53GbuJ$&1yKZDKQ0aM1WQu;xQ=N3f|QI!g@%6 ziNEPiS<`)*>w<>t!P1y47m*)$3m{#k|uxz|Ty(Ug5pz(T3k{ zj;F8F1m7)H5K?P1e4v$HMyR0BqS0A({lSk_&;7ipqz>`|CnEP%z4mh47H|ygB&Oqbh;8LaZFF_*xPmDA6GX03@kP`x_(LSLAU9ajrLm&I)!&Ov3#JTWiIOjh(R!n zJ%ZH2Jo_EmixRrCSl=onC{cJWaG_jILC{M$C1oAmt2;7j$3cDN4Jip~jwX()N0e@C zt?v?h>njp$`9d&=)XU0IkN1q{>fgC=mqJAWmHcRA_n7UFT*4_>eyWf@7;EaPOW>!s z&TU*bFcW=+pc?vlA^t%|8aMj#FkVPPOT;N`Z}-_vSrEbr9~B`^xlK@l3n}$Eu_bpk z^@6my<6alb9D*2W%6>Ud!d7$CoQbew?Bg1@uJjd(|F?}v+U1cxEHWj)TyNAs!+4I1 zo4Am!`oy`p(LJ)+b0%NSG*Ofz4IHd-&?@I;w79ccm5MlH5}W&1+-O6j%CgS9A;DNH4FQY4-mJRHI;LsmtLU%ti^UwokNpTn!x5pQbAQG*bUh zWu5qt|Gd|B%In!iGTNRhiS-E`v{_d^dMzqR$&Y5OBs71$A{?{f@sVpdQb1rs8o+fJ zRs4s*LP9-XR(Y+h?8e0uO3a2i*~e$STW;$H$b8I5Yj=OuM2JxsBKGToL^N+|n5A-A z^Ufow==?(r_wT}@Nhm-kb)$<0mO%C6N<7)ohS+N1pW{bvr|esM;$-L(ykTD4Rx1Qc z@TU=ZBZ`-W^@HKdIRn+vpBiPKo2mM%MV95I6oA0WVPVHmi@PJ`uUmvQ-{N@~f9LIF zv!WErG_{~RaOf$8rN%A793OJ*({+na=1KPF7EFZAzS*a?iXEfTG*E-ikp=4XGsEz? z?vEtcp^V&wJQC2D86EniNk6);z#*GAwN+ln|5)2K;5!2iz9o`65uJ@SRGgNcg2gCH zBQpUc_Jny!p@X_{W5q+C_k*01)w|DY)fN!Qg%k+R;8+N$3K>g*q|&~*z5ELAZ3q>i zgcB?rP>4A8UhFDVpl3UoYE(YxH+`S z?CIB@EIfwEW<|%d5Z!sa*Vq4h`@gm4!tRdE|$sul2L!V^)lWa4|}sV+(iH;ZMK zQqyeJE-5V`{ncLZ#q)vznS*VcS6$i8T9pO;PL*VCy>ojon{Ln3iJmuQL>Z`17_c{Q zKV%)tuXMWHVw%aA$VE~)gpO?8ndwya3<06L@ z(oRES*S{%ydJZlu?_L1eF&Md_m8|#{H2sEsD#%u5_7yKcb>)Z1 zz_%<9##Xj_O#a}j2c*KskzPsZxv=*kQ4N}Mbip<4`NWMVU2>V)W+fmhQ)gFX{Si<% z=aZw{FeTF2G*vWn9k`T4$xI5sV#wi3h$~2q^43#dio<)0FR*V~%DAUe!>*W3=Bt6~ zbO&h%EVlodL;ja-GIf-~b~*B+LxxW%Wo<=+6?@NZiDK=r#)lX9;3#a3-4a3m9aOYq zoI17bJ=R>awE{ql3=-eS0rCxDo0FTCF_w*CJSJKI1H@;0%A$jZfz>J4og)hXs@X69 zkDfb!D6S`98#bd*)4ogIw_g>ubpv4$Fuv&7H;ZRocRkDZzZt-oc7DE^@%J(&M4^#% zhNq$Z>{oap=GIzg8&KzZ3#gVLj0F?a0-N-qiVlGEFk08{;llS-Y}N;`6zoeO2d%Yx zGL^5+35)EJ61M&DGdsW@7OP?9AB|7^%IB!(gNvLWsLOFCNi`-(1+RESDSUbgFkoT1 z{?~`Ct*~QSG077Y+P?7xy1x2`U!_R^tl{@<;q+w`GqNS*3;;k9e2}nHbwib1KczKV zo3*e2EfRDYCuf#_1+(rPQ0)E|2sWq@II>T!mH+Xan-(cr=KICOLS+sm8A~MWON^EJ zy^=*BIJF!tS*pJ!n=UZq8$Aslos-S*izlTOvDg;&DufBqOmP#xwg3fNCEhrIYQUc? z{Z3Xh_DaVXEg-Bif2Nw}0j!4O}Z&$9ZD#n0(#Qw6!Pe3e`pvU!% z3H0SZ;K8w2PlIIK)4pr%IX$Nb)6c}7jdmDOh-lWqi{-K@?D+b-73Bl&_0t1ne~T++rozioLfez15VUyYKr1W(s=u5gA2Qa*9?mc^&E2RPWJP`D)A zd8dr$e#qms7)>g(DPDKR1y;aEm=XNUCM=?x>;3TGe4aRd;c4m+)P6n3s|M5F8;%q& zPKYrKxWlKCJp5FpRZlB_K0A+X?J{!|g_cA8va~IOQc@j-$_zRIlFFLus!yub49t$B zRkxgeDD}K-f->aK-!^mAgV;^3a@4ZCC}x4Ex2}W7tp<}cTuKdArzdqAWY4;3uL);T z1e$)o+I*=A5lMqcA(0Ue()tR!N%0$|v|t1fxbSY+i69CXE|O-Z%JDtG6t!Q6e`s{E z3=1+2^oIB~YbbkWSliv4fHDgghS?Uq@CxX~99653D@~(=(>D~ZFtIDH-KH7@-5d4FHj-?4$ z=;>Y%hPgoQtmfu6)Hh8+b=M{;GT%E~QyW;a#PfZ+P~<>jYygF25&M>bNMJg7Mt@cDZKuQXlY;;WY+=(^vA(FS zXzs_8xjW6*0w8&b%U!XfgU1cVOvq*2Hi$~aHSfHgtBB`H@ltJ}z2%A&s^rU1J(#7% zcU34FOx1p2NlUi*cHfQn0h!nv>FC+jA`b-+bZSGnI>2`JH6LuTW9iUJle2cKopsgj zjq;56FdIJC8ejnj8o3BF@O8_AZyCV5^7pgI|0@r>~K@y`iE6={Cf?X~+jUB7l2{BAD0d ztcQH*B`UHNhJ5QuHpf?#VQLGB%*wY*lL{r1%qpQMBqOke*&w9-n6_S|&_2@=YWcI9 zyp3cF;&(w{Qjc#xIy9Zn!nh$TE(=P&& z&!A9GDQcknp(OG#zjs?fhJv;|q|nOrJhbKIe04t}RHfvO*4UVAzc>BhnIjql6^@6h z`M!IjP-%XnAs4+gaB|+3s$;Dlnpha5XDwpuR%WnTEJOd)W|&mak~lArr@&FfdsCX6 zi*^*55dQuJM;76?zu8M?z+uu=lhAYj87Z(>ZA?rC7l-Kien!1J?^e*si>p)+2-S5) z0Zbz3C8_*F=WU)=B5zYL?_5%W8aWlYSgT8}J+#8C))Pm-rN^p}WoTK_yFugK;ok70 z3MrQ5x@7s?2}IsHhZE(lvwbC#J@)x5)KkguzbFzy!$6dK-w`l+$#sy)484>z=_|E% zE5m4IhP1#jW*@q}4GZMJ9{+y*^}t^b{Pnx71+5Qt?8i3#%I-g} z#QvYO^siBWJ@DT=(0(cg_TKGUP?Wu<`QLu#@4x^5@_=6wik^)Pn|WT|1bHw;IaS#l InR}1^2LbeNN&o-= literal 0 HcmV?d00001 diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer.jpg b/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc8d1cf7657df9d5a385f5bc5d573dc390d295a8 GIT binary patch literal 65080 zcmd?RcT|&E`!*WKf`G`-I~k>SkzORD4j@55I)pZeNE48zK>``2_mQepF@z8xAiWbI zlmVnS0Ri5Gh+$# zGt|Jy0CeuBbD(>`8^o9g-36WdpM3po%hyGh#qLyh@>qMC+l#rB?mXTFeQ`gYc(!Tqf zzJcLABdDd7wT-QvJkY~@~p)Vq%qGMv?;uF%+GcvQX-@MIvk19r& zU`nxNHMMp14UJ9BEnS4}p5DH{`Ul1*Ca0!nKF`h(Nz30>R@c@yHYt1i2Zu*LsK+ON zkLw)h{C~OZ-v{mjpcRH zt-LvJ4ey9y2eVuvUIE?jTesR=Pi{S`o@Vd-s?#KlPD z)vA!w=3w?$i*+773{dQx+HiTu+QtGW`3ubDC4vvd+J3i3Ph#v5MY1B@Vg12o%gXi( zx3rSx98+Z|_qQ)Az8;tRkJ}w)brcRJpOzgfNe@_r$mvTPoLH7?mbYJT|BQPywJ^8- z#2eh$*r`6&0p1e5JW5wWI50rbvE-KhW%rb1GJ)t3AHo1}d4gH$b`03K%qji}x*Eu< z$rWA`FrR%95L|tH!kii%RzA0>p5qebkFPo8tA{2XwkCzT_lFvZue7I6`QncDC;i$K z_;|GT?-qY8$Rsj#72@rjSmfEshL@MZD zIK;7Ws!f6H{9~CLa9`s3@gjPG_RQ&l~9 z#ECrMke0vz3nM25WV^4huI`-ioPLj0xtf4wdZM+I%v^x=!{Vit?~RxreiM!&Yc$T? zo*}XlQK?gc+6e@?4FkliE3`ZUqB`_AtDj|yHv#imq3b{j!+DreX1c0)sjSQXgb^@J zg_>UCm2K`dx3%7(R!H29_X~EpvMKodk2bLY)V-7BaMz##M6KhnLh0n#ENt8&3MIX0 z9THMmd15x@QX8!Y*Sj|$Gmuqn@!TRZX<6DNq^{X1-8{Me(2O712?4U)KS}Q@HPfGx z-u29q>fuPehV7@QbzDhQnwzb2iF^wGbyJjy0g6c?XLq|W&r++pz&!qK3K6g162qI63HcYL`X11fQsFSz+tYY3snro@T4&qN%;1UC(aVJfTrdF$7g| z+zbi#gi(&lpqtNrG@}$GBx-HWR4apu|8w2?9|hchv{4zLqb2f4512P%gaP`wZZVbt z;`EE)@qAWrj+$6WyG?<_=zjup?grmt?SyrpFmPMqQA<+80&6oxw2G}AAFul)K7=F! zCfwLm*JDdHq?lHhz$d!2Edqdv4J}l)FeA45*efgg8or|eV@Qfd?(gt#>k@e9JJnOa`-mC8h%ZY7^~(}z z1XEB%mpJXMS_k;}8X>1`hLtUA7_;?&0R|Z9Ghs4LJ zegzaK#YP9I`gC*Xt2B(Lm)2=@fIq1=;aU2r$t^U>d%!j=F#9vl+Yb-U{dy6^4f?;l zFeTE*A$xudkSGJRok+7_fF^~U%47T3DSi9DSf)&kWGG`3}#;=zqQ6xoq6#^c-(`sSjV&S)plt*2<)?YBb-E zr$t(#kM=TRbSdpKngVK>FUE=t&|WmvrEfE~zG?gEHO@~6TyZgc><%23ODJX&bZgDH z+^|KRZLX~zFU2f5WS4hgD5!Mde#57sBVTm^Cwbp%^3dKtUtz^wzLe-Ln}GhRIDe;% zCMhQr*E2Lw{O$exN^!2$Qj3I0?yh8Wp>2)J8@F0{8@&n)tT;mmk>MtZwM;&MYD@04 zeak~n&5Ae6E^Gedmia#nz4JISKDq5SQRgo|)veirS*M7x5;OT!*AV`t;y-!kQtMs| zQ!ZwSy}~sHHa_MPiR&_f=6zerSw%~J)lwYRk@79Bm??>kV7l3}Bx?}t-QTG;VqoEy zL8_}w-p;SHNJfjM%t5LvI7d2IwjN;7m;P7Y69H zff9WQxoN^hJfXVn-}uMPfb5?@zeNE@(Iq23>G6+MDiFsbq!A93=TU7=+|z%4F@FBu zlL4yT?M;nSzX2QhMafMTp4<82;=~t+%q-)u(2Ci%#J8C(>lV>a{*8V;Z~hk{GPAQX z#(XjF=bgd_(rv(*kY^{UCG{kMBgbw+X&VTP}(UL2YVlQT2?HpgD2uePkM z%?(fg2x;;6aj<+Dc{DfCrd9kEp1Q2jh3BS;k-v^?|5&b?^Ct*Ke{Urgayfe!vULcJ zEXn!vZnP}qqMh$72vE-Q3c1>To?a00$+tCbGkKy9j<=!rSyb0ftyBg+AlPE!a(c!) zO{9le)XHTma}(2kY(QLX_Uj%sN3d!(y4d>8bjOoVmr89RvjjK@_c59}=%Hd*TSnIF2Hb`O2OWIYrs~g0 zRf0_&D8*uWI@Ct>bgBMF@K$m*VKH9x5(OVyMe|u!y0YOcLta1?(F`Yc6==7@hz->< z!Sl$UofhXev@tRMJBOzxcMZMjd>f1Rn(q|5X)g?CkZXI&GCilzYNmNB%)P;|Aj>wP zSB_&jPXjX1@5n<&Qi^shez-LO{nB1j1A8`kEx>{9V zH>R@%dn0|z`YPwwPclBBzHWTjQijUpPG~o z^=0_*S! z9vjqaL7F8a-WK)@O7}{a*9%%4-{pV?BBgt3i7iyi{a^kuH7VdE2IwN!@_Cx}q=KF} z?Jm`l1;+9u&)MAc3@%Aied!m7*?x0B)MW|Qw(w{BBpZe*I_()YMB=ea7z za(2J+-jJ#6&7|WUcjj^S?Vu**-R*IoxBoW^`60Hm`>`_Mi zGN$&39NXs^0lrHLL)IYFsD?{mw)*cei&ulOG&#iFd1HS?EG{`1{z^c#G&rAEtBc;p z9$Uocz~+($w#x`F;hB2+92ZxvV?1wU<#@8!mz}<_ini}t!dzB-5ma-O9AvrL*r#PIYxrxrKPy4Y+*pY>c5S6_L zC|PnTi_0$E^I#4JsH-FV=4p=yQPqqnR~4hDGEPGh!_7=a5nAN(+hmWp5S>(R&lk7W z5+o@=Ii~CCLrRq6DDt*3BsAK#==c8FcPd-AIod@m)eLiXH`@ZYS#Jzvkk+*Ku$N0s zp)462O&XsHwJl6+Rx;dKf{A`UrTeE}C;@4{L;Z3Zg9FQhVVLmysZ0^Qq5|Z;xR63F z??s*tIqJN{3|*S0OzkhNisK?ad+P>Nu4`S3%sort*TaO^FClJ{aSd5X|w@m4{N6%K7&UIEwD%xf*nc>J}`R&Dvsj9_3R7ax8x6F2NnF;KHqB|&LDXie;8nlQrTfuetY>x%RB3+v=hATf9l4}6;EKl2pYK!5 zxPyKp3dg2cT}`@t=d%0n2Df}lH1Ge05-`ZiNwY8=v6`BgubvUUDjSfr4E4L2kO51o zx3D$y!xp}?RVO>mk9dV}n)wD@`P1?4^_Ud9$9ILRTMAfOlx;*h)lRrk0=*B?ERxcb z-XEIYwDGFMVjf3SYZ??wF!w;Y5|frC-~%^pGGLoI&peOKdcfyH<{kRo$*p(GyvB7@ z)oZ`>_#&UMUl$PlM)1|f9=pA{pYkzcPeLpVQ6P7{in%d*rQD=pnc2vo7?GQak{Hu_ zv}o0cWKAI^S;<1}L}3Sy=945eyDIqh8a4Qh6f!c>!yh&xQfV>_P#pe1kpWt9xC4CT z2QWZxtB?bB1}L)>xi>stu{$2cH0C z{lFyT*b`ZPj6eGcxzfwW0A(}%pNxY)AE%z32X5mHrqw~FXp8ihkiBJAz)U0^C=9z@ zPWX^}xFY1BA|nJ&s>HWpmm_QQM`et zH9?4GG<^oB=s5!?k&tX7+x=`P%3%eX#@<-fB2q@%v=m;7HA) zvxD<|c*(#jbH_-iuYbr@zy+ksXxn7N^v6bZ84QCX#(pnLvtpsia90`3=muG z6moFc=U*l@`=4hD{J*64|9>3e0n=26rDyFSVOKh&_k!EiC2e!&gx>g7RsQk&{mVyH z3hgbL7j##j2VV#c(9%h-WqtinN|j=RHqjZ7HP19+fI>7i_%YLtLzVI0TgkdA3Hf$| zB7=95V1ZE{0#n$t$)%OcQb2Q6Eniqwx$h3KuJP6MoLbDn96xR;`VyzKtA2{~=rb#Q zYxB?NT^p(WZ?&Z8SOopSD z4Wi#I3}L*cgM1p*KaG7cV(Wt08{Xu^u{ge|6!b8-aBu9_(a5fWZ#P`fAIZaVd{DJ= z>l98d=!2wpo7Mp@u$n=)_0?)JGFp2)Bb>TLWgi{u$(FF5k1o%hDU!L<4z(u6bloX- zNWvQayfKq$f4c-D8|);$jk556YnTq~l93D$))2^U2Iy?1k^wq|&`Ti4UJTHp)G)Fe zxR&`aKq?SA3p>>la*~1E!!tk&FvuhWbOJ0QD;Nd{5lb^DJSoBNt1v)I@BYOi>dXwV zpGt=zfBf~ouQ^S8JeUVUbVcY;cK<+rK-NHrnq-#*#I-Zr8TJSO9Pm`5V4WSlQ^RfW z?6iyTDc^j=#F1G8EF3ei7cp^(0aBCI6&nN!;yR_r`5oBVMfxkF>}27w4CGX6kbXG> z^u#v=n5Vx=^@M0vz|Igx-u>u&j^0V1VSu8*oe{j`*rUcH%x6eFgeW+=9qh6L{($sw zK1jPLwrKCr;9i;FEA%DaIN{bu-I>9-FUu-%S|?PhDP^qb&v-fJgRkpQRj<=TrEa^D=UOn~z z0U?!`%x*j^7*Z;_a|*`)LOkC5XGTWs2BjF@WYcxieh zR;KPKqBn0#{Iq1FczVe5z@wistk>Ys6*{fNF2yp)X2#CkXLNU~DTiOiRLRudKryclU(F2Nbo#=#EG?oeJ9*NjF96Goh1MRg9c^<@*e@oPu!k z!(4NxM^o8Bt*qH@%GG(~1L2J>C?{5C0%c^C=BJtoo4}Y_KS~ck3uvc7Hx&e7OuhcG z&TXX%nGx9F`a3QP6tWB-+T8a-+)zW3Fk9 zaei8Ee#h!tMkdMtF#{cvnEXP`TxNi-(xj+Cz%?O4DeXI$nWjoMOK3xq;wuzjCnri* zsTz0i6y8RO%(R>BD|22gzJm^PC^^UboD#n~3I^qQb%jWfyEyZ3if20;(|Pj5nRGan_BEq>jbB3M$=YSF zs&X7`?GF3@Z$0Sa)1fS4mN!&e8 z_2?G8N%5=lcn5dr%27v?z#Kj=qGIGkr1ZzOSMiheteA6eZhLuMs5Po~Hn5r()XB71 zk?y*gkz<|^m>wpcIe&azdK~MF)8=-S&Ga$l##*M-R7hjLm^bq9Us<>C~74?L&*SMeW#=aVu} z0{R#rTI*{~oYXd>Z8D)e-sU)F%W{Jtv_)Pfn&r>*B%;JvuMuW7dEl`t06lx?S9tLW z;=V^;!eUHt8x79@yc^XM?kic2**K}l9^J^EX?X$l+j(**Q#S;aV|`0Z5$abx{ni*N zI36I_(rRmKq+V5KdgSZx;o)}_p7=B;%3IUujzxx>25*tfZT(^8%!(Br&1Ugk9_fV{ z+p&_{`!mr-}7JBEmV%|$7K~_YqL1L7Ql$=B@n8iWVfgzg6fi`w#d95njh0=aJ4lrNl5 zHFHrP>+&o0Wf3*In~yFu;fuJ}n1hT)NG&rf?Y7B~6d;6bm~B!a%tVq+uE+L8u(Qx~ zNLWD8SG{`FhBo?p(R;%Hj2`A<6TWc)r6x&gu1j)%E+AbmH#hE+*{){v)yXAbk}!!j zReQ7%IDqq(RmPcGTdfBMHIF6b+uc6Z@7L;5E}d+4Sn=?;33@x-()&6bS?2W&R+%4) zT@Qra#d%FPPS5b2=r$f4~+^UfePfd~^}?;w-yYiDPya1`925 zshOLf4^1=VPBZ%U+iU+c!@<9H8-sMxK6Te}8dc?$6X3F$-c~CsN10N3cFp3=-&P)_ z8%Y4klQss%xavL_CO0{%3+2v*yRaL?*5yR4LEKDLkJq%MWT1|1;~BXbxxNjvyGTCd zw*q)9od`KWO!$1GKBpVgJTzv?hRHlNRvU}w7Y2P`1kI^_sM zGK&O@AU#vLob3>I#~hYZV>+&ykXx6*FFVfB?w+A`RC%f4ZsG6Y>BJQL(r%2PXl!8*UHhFY#E*46mMk zpHRb`F~s-b4b;X{3OZ$QZ_YeR5c!}y*{+RO>;}O-4SV?304;B={ki2&jGDWkb$Fo~ z)Q9~lT-qvWgl&8m@3~_OCELbje-u?9gM0CO5r4Tf^mUEIW@iCVk6J3LX9PcW0MAp$ zx*Ns1ti$%xVs`-_)dLcDV8#lAcd!{cPZP;0^$H!Rp!Ys=#g=oqNtTt968+NFSwcLf z;MM^1@*OVBr}t<<#DJ`A`pS#C*+uD*rDP+jtvLOJ4@(3lCvs*b%MUP?8Iyp zTt>>2%|z?>=4-&1d#o^*@|%_73NGngquNr|2!*wVT_bbn^~dVXJ~JJm$+11VSK+&z zVE*7l21tbFK11Uv`i!_XrtpL=wvkwhu%Zqt_H{e>-tCFFjO4gm>UC1_Y_2Nq(0>Pi zQD1zR4+B$`nKoLYaFh#ll?9b8Wr}A^ZCc}Rt+199)I5LLAc(K+Svd^4@9bpplq6r$ zaJ5pjgC&5lgq^|4`x?rt9zT5}6mH*}jC{N-t-h z4*PaJMaY15`gWvFg-yS zmHM=V&=cJ;TY8d{_2Bb;9wrX?V;Gn0&3$W>k`J>fj@bWrMzu`()K+HPBG5$q+ZTnT z$0g*^PXWyyb)gM{aF)rAqIgVfr>tFJMNn7Xl%S^3 z2ts(^1pCPYHr9{Ra|}>CPSpuwFimr~nhQw`uKH;~fwu6=j7ma|b+-61k0Xy==~xy2 z9DkhlFh`;4y!oq)iuBCp47Jtp%zWm_!PPBdN4+i|=kGql4epuKxE*DC8%Y6#GS7#F z-lS_5=;x*xWzgXDW?7YP!%R!eiNGn$4q-yY5>ulJSvxqXzG|2C#HhQ3jvJI!)N;5+ z&w?n~xsW_aEY1S|yxXR*Tt4r@?(97$0&MLZv+%ngv-LP8>$YyWz{uaLmW^e6-6EmW z`i*`PR#?H{jb7S*QpqZsUp9D0L!iTEMmNKCVBAm97^UYgmBq(wE0EZ0y#jZ<1=EC& zW(sDmB=pF*qF_-@_qyH_EHaq&&vRy|m8|J#aq+T6%C#Wr&brDC>zZBuyACprhnr^dpB`f?tOdFE zbwz~=Ig&DEL!Q3PvN}sDCvnS~&Gw1BI!bFnWLER120(*DZ3}5OR0&FU*W6+hO@jjN zah9HL(_-J=e>YeHPc)|WsYe_6=a!S75{q%2@@@T$&n zggOP1g!5PUoqX6dt}8=gXJvp~sU>WFvKHll*ad3*x!lO2%HhXezL8Bvcrwf{Z$3jy zLP0Ay3FYO2VovR}j`nVXSz)I_GxcRH`&Qoi*jvl%@us*y8)wMyCK8#9F=*+rcsCIO zzU3Vfj^;D_>Z`{MKLK+^Ocef%@FFe($kWB7TMmI2PFD;nZa z@ovTPbHB+%Z9uY-j+DeF(%cOGLN)xkp)BD+d1NP%qLrnvY>6@EE_E%?OZu?IGj!Po z7hPmB5CAYD26bXm1GzsUlqN$)gy8I5kUHyvSXT+PSv08&_Dz*NHp71liFc4VnSKEvus5C3qxphA=Y|A?RIAA6~38>rskb zkffHA_XfMQ^EyN)>hPCfn>H=!Cd7OXLIfu@QoS0gl(`2}&V&!wOOI7au=u4%DVdxK zPF<7za}|NrS3R5IAEvhS+mukC*52cTa;`PozV``oD0x!_w+&K&Y=JEM&B3Q{a_n^P zV{cn!Y&A8_Rme^gWRazX)g>>hsLz+aW#!Do(M(7}LRG@>5p!}zgcQ|wc_i{CHLS}P zhNTaZ7b2I~6ZF(6Kf0ZSI!_RkMH3>wu2bdhQU_1IEbZ`uWYl^C%qb%xhWBQ&oR(2# zsOG+9zMR2{{+;4AHJ&OZmG+8?w0p<*zoWIArlc*!GdMe4}NS;cDqWu zk|%KDq;hT5FHZLcEU;PwfqFqy@Toe@k;3;cI^Za24t zOuLqwAD4*n$%4wzJlVZ2EAch$vb>C)%acZgKDr1sjP%fJk!d@EnX0ku5ee}^Q|&3* zkh$6(K*klU6n|nHedr%&f#9T=7M7Thl=$G^*{Rk*o33%l>HRNMDHpO{`4XU^VYG^sF#u0qCaAClLWkJ8J0@YRNY$8Ktie-HargzG4Ts7}R^%wl1R`Q%e0d^@#> zEhUm7wXC|JmfdY$m4X0=kaX;5TI#L-AtdCsLReQkJWsetb1eGN0&km-?CY}TO(^qi z4NIe`)z!N9-y`xLz-Xg3VGr=I%K4JLfaJH(}F&b{gXNbkev#_Y?t=U0F{$jY|cvS zI=H^E#~7J%6aI|t?maTF)#~2DQC&JYH|FpuN;vuL5H`N9Ku_oEQwt(LEkir9zG2L* zR;;bu&6JEvCnFu3atda$HlT_(YIAG6)zim6^+u6D8%9}|{VoBEQA*gfN-85NtIlE5 zmliHs1>{4mWu_|8EF6Iu9`;sf)=A3*!rA*anNr$i0D7whn3;1_WDm@?3;Z*}@GQ9s zp|Qm0lF-ukN;jy?WebpGrS|%>cxEkJX`~o-%bl{Bbvd(=r)w#Ecc_-cMFDRak(U&v zL|r~CJjj}oKQ%d>h}v*+S%-9YECqll;tGcsHZ4%fl`7EORl{^gEyt`hxI7D>5c>XFl z6@)OZC6^ST38gMH7YaB5p}(X6=_=%C6JBH~>=87LEM1Sd?4(5c5z{6_xxQ~~as}oy z=UiV2xBPXYGNnG$m6)t#My6-Gx%x~*K6sFmXnT~F@eZYVs`n@-x~x}C!AEt4yWU$y zU%V~im&{PojT)RlQ{P2sru=W^uI1tWpNEURZG$?ea4|_|=#63S&B=^VG1=NoqXCQ4 z_bnrTD|OlNx^6{)EKRw>v-)Q7Oap3rM~eBVJk+hF;cDnya=-=e@8_h8KU_!x_uu?t z(dqW#^IOSfD5fKemY#$i{yG=+ZG7_pQbR?_WQG0tIMiB$f2zk75c90=uveI4uNa7! zl<)OSqfDSS2yC-PlD|+X*6M=~DD9d( z&he_7kh6aCuX?lqx-OY1k}gSo_@i?{dvu3(o49qFeDlabrlhqlXVGFzO~=IN?$p7M zv1wE7lxn#D-Ra;{Hn-oV4|3-f91N$P;M7Fa_onye1kyY%PbMbKs5<4>!%A|z%2g+f zfJ^a2a|f8ifz?Ls!=Z@wYm0<5BaGmmnc9D5JVG;f6-!84VKfK0qVnANc*o+&TSO+p zZuDJ;^iG(<*ODTVeeiLcVbxZ_b)bZySyzc%pEEYOuHezLpgN7XO$s-ixYv+F5|JGE zVVg5zY+PRLdJ#YO{eCUW15CtkyK+GqGNz{U&X$4c#_#4zy_wH-y{aAUjC`eZJr>X^;B2?rqa78uIG9&ubp|ZMDc9CSN4+Iq^NF(|BcjUmmhVp+>{Q>#r2 ztiS~J(y~CHRTOH7e_$rTOHUM`MAqnPS1})NV@Da4=vQneSNgQX6qRokNKn}0>eJoa z(8&(n1+o+abOklH80YY4;y{5aNURR@C6w503&ew;jN7DnyBU(Rcli}dwVDQ&p*76 zEf1G-rfFbeAxBL=&hsd{R(6|@KW$;_@r|Fq!ul@`UQcbp$yjO0o)e$(!+$rXT^M*zA6X?AK%9>C4t zaiRUSx=339lS?~9k)!Q@^X)-^MJr^5ec#z7cJ!Ov^9RBO!Z>z_Z7|dHOXBAb%LmW`J&LH8MaNu{2qw*un!P7o6Kl^(Blb$-2 ztE+&6szM3sDG(jU*8=NdYK)#9mof@Yh!<(;Bh6t1;e<^C+#6=;FyXnG?> zykEca_bfTvfJhtcg+IwpJwH`Hbk{C_rTK05Z-t6eB2hn=z^#A%)h)GM?5FRcQvBzs zYjM;@ZPF7bM*|3-jb~h4F+LWMda;I<5}<)_=;+HC)XMbnME9WlS-tnHCdAc$+=?r9?fyV!U9R8#=e39XB<} z3+NQc@@T~Kqjz+-NayDX3Y8Z}p3F@sz@CNnI_DgC)t%zlsQz?AJEi}?#Up3-&YhJ0 z=ZOTSAtWbV5?)1s)FHR~$n!=m^@)IXT9pAxd9J6}kTt;ky%J59ibiOZQQ>pWO)7SA zN@_Zork{CEEp8erT7#;dpZ|j}^Vj}%$i05j4Xy#Vn6zB;gdURI1a63@cvmpM98V~B zD2Nc_uql8j5_+mU;!~8TOeE(~rg_-e?-k#6eNXc6JNBnmgQrUCW7M=CA9)1M4u?mhfQdYvcnN4cherq1@h_x)-U5de?md-b#+rw7g2}y|_C4rRHgt z7-th#_pmWSn{Mq%Wt4I{%Gr9I2)x}tZ}-;ad-zw_+QIzzpYTYz+q9KI-v>Ir3GFd` zFrxCQ!0Dp-XwpVmh^#64z73e|0=E2A)~~L)Y^H!*g5p;#RI_X3kzYHjrAaAfE_)s9 zGEracc5AaIM}XjFoe_LZ1#H^;B-6sv`}S~IWORYzm|x}ZH#X>^Egck%cva-jU!27Y z$*GHoo0P3Dbqz1>KewDCBB0nP%kHscmd}pMTnXL!T7Bnr6X2z*{NB}xZCkzE($*;c z`kkWG&eAz+VW7(Q84th>LQzrR86YB8(>N`RBGn6FL5P%Z;05U1TLpUA-4Se&y`23xc==LECP^q2 zOoe=;O9G{LhA8)p(o*xZc(@?}M=HERHCHPYxFziW)8jY7RUB_@PCj}6PS$kn<1!6( zb~Ct`o4K#I46<_sCWL?h?_L5)$M0i#4!THCY0av*=5-~BcH33y|<IZO0#DhlGBn!0f93{4kUFXXH2;M{_+>hdg` zoF^2BQ;`7cp_HJ?=HIO+K3<<_9ABlhTZd#1FIglUnVWEPuA2mveRc79p>|e)4#Rlz z#Vp@zq-e%UqW)aoTzUNo`TZ>AP~68wFGkm1(!b4{spJkQ_~RG~sqGMVX9N2>uV0h{M0Jvw|=@eTtm2(p#M_4DxV?N%7rdB2_u z@hr2VaJ000TVhd1TB>$a)0#FBav0?*xS=VXVC91HN?TZbhPO5uI%8|ShRlwPl z*eNPZ%_iB9k&%Z&8Iv@m4|)HisN5*ca``mQKUmDoCnx|-g(>mFQ%$B+jUjm-pSJei zl|fso5L|=g>58F>KcNNp?J_7KWNuzLuFEZOEw_L-FPad70k{$AvNy3YMcN@An-5ix zuJVtTqstvwvsJ4<#|`I&r(1cEWW!`F5K~huJgye4roz~!32SeI2ljdzXN`(&A|9uf za9jj49Aim!J(ZY;r$I#FKj@skmqw|mZfyX*Q=(jgT&3xe$MwU?x6+N?!^_PC3oxZf zH?J4rpN_Ix!iQVobD5Q|OSq!N60+HW60!d20flBL%gz*e#DhM3p;rASiK{BP@{5c) zv{E!;IPVtDs0V79%zvt7l`u;vu9d}GmdmT#m4$iJ$64nlqXk2zw7IueAc4i9XvKRy zb?;Y?Gj94reAyEl^i*de1k6ss@HwTp7ffAo5w{k-ED)Ad|FcM6aseP}br@H``Rf|9 zecE8}nNRj1oMff*5w^nBe-PLL&I3xQt|7rYMp(tyL_~ufO!Kh&v+XO~n?b2){p{uT zx?29OWP7Npux02IFI$ZHvg2}Gt@%l8o%e;dMecI5t9xYwZ8rvmq}xovuU#E67xI7m zT5b9?Bm7k&CInd#c-l9&cvUxyxSLSOuFFKZ+hyToLUf52y+X#PB2?54%k7;9iM#xs z&-AX1J!-3c=26!Yu3_HblW64Rh(gtI}y(u5|h=U#tIEY_C!zEhgwy;4@Qe)P8|r z-Sv*ZG{aXj6ynK$oz!C*#tR`mqNsf&WQ?fhIB?R~nyL)s$H=lWF_nuZ-$*q=hTf`5 zRc+U}M`4SpDgn+<0LP69**4>IUeECEY4g9;bkt`IY{2hEyZFlv<7;nFX%hDv6^Cv# zCv=@6^jzf!I4n9R^PwsjOZiWMbCk49qaYdUJn@RWU`^#W?6^t7bc!5T6fXLxW4J`X z_0b%Tzqi<3*qM*Vq~tx$Q$}iX55gBgfO(|u9^jt$F4k%ERdK;P*w0Z>MB8{>|8Tcc z-Os=|jzu@B zaMuJ6; zdpqPt^)5Pk7cfEC4#M5~p1)WqDx0swcrEFeB?vY~AG8~HzZeGi&`Z=^5Rn;PsQ!O`%?1?Iid={%M zQt6V{L%BOQSFuA59I{{EAevpv^v8Qh)1o|b)-lWjsZ&Jl?}2HnRyLBs%d+X2+G=vc zdE!$jPm2eKRe;>~iYoi7LY59;+An0>LRBf;hnV`RO=HRAr_*jeUR&78!fT?hecH2UuZ9=SuQ5`*$ksAbfFQQ#y- zk~c{fTCogO$v5g{vGA4+krKy+8f}KS-Wcs9exSlk12Hiu(1EPt|9`rN&<5OBc=v5EcJ`@JjiVHo6vP96<%!5LZh~nH=Qre9Hz3tqw8BuOwTIA+gcNA?^kJjT+{S) z6+v8OSNbS_3uf*8J4F-GkR@8c^Kkt%L_0nCz9N_#V~Ue9<1@&7Av$119PCW!e+mZ# z8s}}SGj;V5rq`yTh=K=Y4>1EvxgIK-)hkH?oT2^nzMuoTTzb41(F)+{nvEX;fG2mgvpMU(ZSpt2lh{KP$2wH_qa*yq=Ac*_bBLrO1 zU`_dpahB~L#eCX)T(`Om@dg+S*L5gYTIsv>Qdev}kEzOPkRQraR>znLYM0Z5 zkvWAn*)4rbPrZ&7%mg@vzoni~-T}Rx3IAfMC3)*NDumohZ0vGjbMkIn5T-gU zfte86+B+t6hEfKQvAG$*cY)F)39u5qxUox8S8v;d|}T zD_s>~U&oOfeUGh|O?mdOJeCrIaa*CFY5^f@&c$8^L0HX+q)oMpTEkEo0NmbL5MsZg zD8wFJvLXxj=Keqq@*3(&)|^PpO-ORgdzhD#Wcj?t_-=cp%G3YcUD?FP{Q*su!q+r? z+dDiTtp9KqOIeC^J`@!k_8CJvXR%al*kD-dz7?sXO^dCF;*-7}yg7|QDRwk*32A7Z zRgsi{98QAoapU^ffz|6i)xf*5@~@%E!f^!I^`&yTtHg{y4U4LFS`MU1r8b0zGF|mo z^kp-AEJ`zqY%=Yd0Jy(h*n?TKKjc0wkSkvcASg6VIg^Oet${CX%UQQ;3VD z`Z^sM7Mrv2%01#(p6w7+jmKt%oN4uHbw_*djV_rx4PKwDD)NxVJ)f$A1A zKn3UMHIMKIfUztp9zb@5xE1-F^`sZ%{f&Bk|r};a?C#rPCh+mOC1=nIv zAp|=AScuEdTiNtSwn*2p0|$Xa1=IcQ2Bn}d0FagQT0Om$U*9tc0J3mEnyk(j-S%wG z_&RWOi)z{5i2U(@i{$uPazVXca`9AM$-NvnQ+AHnuD##0ATp|JPh3aV=|V{1e9*r^ z-&K9Dqa84;I`ZEjU&PT&!=hJB^DF<_1qX+(ckzQwuVk#3&8ZH3UUhp4mbCy_ubT_} zFPadLqj8#QPVZOM90urd&-VYp-g|~MxrO=Os9O;cPQ3vLSaxnM@i!SSbuTlLk zbN@VqfsTzy`uoB)*wMj_C~JE9jm3$*d@UmGD^Ks`~^AsIP4 zpHHxCpWz#;QcVywdq*{0$ZpX{ahx%{Tde;m-ePUO%`2o=;eJeNjEjg(gJncwu$^|# zuD$yS*P5}r(VY{KCvtCM%v7*4xP=o{SQ%sgF){H;nKb=t#OKyIEzA39GLT2loo(v8 zL3XpXX}N3DvVjlh@T4mIaz1Kd3NT`gKfzPEey}XVf#4*{H~?xzUZ^oJ6HuS51JQ{C zxHqg00l(wm50(=^$tfDh=vboYZ+@_B3^6Buu#}+a!0q^@gDJeiEZt)|=QBBeM!z2{ z%wixg0xvLIz^DZ9C%^;SA1sYgbhSx_H!GHarqJRCIqJ-HU;A5wu4qu@%<9Eve{l8)1`fPQtB@`8asRAh2Ha~|q5hYwkI&u0+Bx^`#l7%6M% z`LNBF{JIbL0mBO{3&|+Lv98_x+R)mK)fsroZsu-q zmC5f~MYFrODq_Q!PBReKJPg$q;$2r2`+2L^f@*QV=MBXF^&)5dZ77od9~m%Uj{c`i zaveab#vIAxrJAAPMJvLgJrUJ#@mt-X}7Yx^q=+~S&WG- z57x>*;Yv|;kM0EpVn1_=}&Ez zV2OFg0*9DQuw40`*h2eHJ;t$5pv!beS_q|(X#9>k8=Ya4;nLJtJEU5EO~sxn5RF^O zmbGbZOS(b9NBXb1c0L*WEHAL89lq3LJr;Pi&eyq4R!)Dj(k1PCdkl=XONWFIV%SQ_G_p`c}z1Rj8x|GRis*r*)ElcaBgudf$`69Yp;FT z3rwGIxVPdR6cQ8whk5Z%s`T*qwgmE7`C}ZEva*WzQ|CoJ);vNC`OGuxlathlF!`HL zY@fAVr1|v>&I{77$$v?hgNm#`xqv$nN5GXNbw1%}%k?YK_q5MmQ;*{>^|;sMTrMkc z#GoU$bUXqI>qSjlrAbda&c>P&o9fbE2kyN7q`lJPa6G@0gPoK!;>^X1i2E=jOP$E( zCaT!0Kb;$Zf5I0uODy$JwM1*iK~H3o;3=PdBguv_`ID=P2t9%c7RNmqnWWU zR>*5+=dRnNOS_J6%|BwRL~p|K>)hi3ZMXR-eGB|q&Rec!sqh)B_)xy(QE+YlMVLgg zY4(f}ThKlGn0uzkh%m1H6`M+jFKrW*#yK^<@veg!3U*!gQsq&hl?z4A+p=|z$ZahkPt>L^9SAS0ulEpcn+8mZquqwmgyGJ3^)N>wp*2-yE(Rb@W|{;sdLkCwvV#^yo3-= z0ydbg{1!P8SG%$a4JK>k9i@%?%_%;$$>(`M-3|A0JLF;xY70C+KDfGnc$5ARF!?dn z2?$%8dRnIIy!)nrPQZ(1y;7A%Ro0K0x*C}^eit2XQ=ycSP zm47hU5#{)<3Eu!^GxsZTH1UH44Lfo`thP>7+U8x(6Xs%Q=;*Cps_F|n%M5|REwrxm2EjB(dG$LTfWo0RcM&{luTu{l({GIOZDDJs6jggnhf zHpQ+9%@05!WB#*K{qt3&oUIf$qzO50@D$>~%KUb|Pyy{IP*PR{N9?(XjBKwRNAeQ# zBX!N{Z$Md1sj_-wxP2vz$=Mx@OiHP6zmSb-4_i!fx@aC6%Nl#o-hRk>yYz?W5>p5p4ibPdB__Acx$Tg*smQE;1n~|vZ8G{B_1c(rP#n@&SIaek zHG19c-KhVxieck3c>yzA!`rvvW(eVv_V>B*VMk-J`eqhcm6q8A?L;$g>A{z38Ucb` z7~W)?hLT~5WCC4_dSTC!QGjH}fY${4DiCqImFDLVA#4$8_my^>2bbE zIlv*LEs__zLXHdGaH`HS;SupAd|Ky*emlrRmwfgLBw#y%ZDIBYumC(vl)(I={evY5 zq~ejsPm%20hj!tQ)7>fMmUR>tEZT|YbxV(`^P!gt5M6JMEc4PmccC< zM_Q1r*zF8Q3b;Ei=`#UJ5au%8(%+oP$+1@BjbP ze)jlIE6;mv*4Q1d1U*LpL#Y@;aCsW+PU*Nq%^eGqp*^0HmYCpar+6{Bnf)Wy4t6ox zqL2uO$97Ks@aWx|611Fwumwgq*~iM={(2cF`WxD0<|_o-&mWdbmu?#lP>js`rIoS~ zK{uI8;D(?2j-SfoqNc}{(L%by1aOJnf+^Rj{7IGHtm-CIYa>4~sur3wY?X`v{b9i` zRH#!3?>T2-vopDxn}+GRxBApBR#HAG@1>@HTAN-83VE}!V)2hlu`vE5cn;Hcf>Dq6 z5G86QPP z0(yK}$GmO4y3&6@eZq@)xHMm1hika8Wv*I@JInhd&xD(gm*2OnD1OBT6NQ1a!n)IB zEc{9#KEv_%s)(p$hlWDax+uXuU%+k-h<6?92OGEXQ&GP%KaS#YYHTT%hX5-&wgZ0p zMdjYq{GmhFr58eC0vCQWyeM!-@M#vk4KH$!(F(t7tdN8&U3J1pNSfpxg53Ix8s!6q zzlXf&m)`8jJp59g)whz>#|X*4%p7!4q3IoR=u^c=QQb)_G%Ts7lYJw4Mq$jA)`Lch zQYYj6p@fk3Ck+Wm>uS0(RjnbR?Ht0e^W_Sj+drS3mOk6zqC`2-DuGSh9^|)Q2_o1e zmD9{^hILIUkjSxe-#!jw-STN)qtQTP;}Or{rY5gxTz!y9aZZ6=mP%ZFPJUZ~v`lHA z8CRB4P*z-BA>-?4?PC}o!#w?+DM{M~c4AB^x(9``oCIg3m3P|!iG(9VFb+z59{1>uw?+p@I-?VC zMm0ZbY`dE-VSX&W)W~7ya*pS)pEo`**KQ0`SnC^atI|reVZD(jmK9KXC7VG=UdVc8 zmptK9mY10~CM+i6h`|juhR8Jq4t|zcB~fQG@yPQXut_ z0w8Yj&qcSTfLBFz{$Qaup&8y}(E~x$LNP0lCJsLY1A<%-Y7+xx-UgHMm}gK!XdpiU z%vK;PvYfQj5RLi{oK@3bi83E>tFA+k!4s2bMn6`9tAdOK5o|w4= ze2OTh!*8hlSK$A<|KmX=8j%u2>Ukb^8t{6#N44lM^2uDMkxJCPXn)`ky%rkyca?9M z)07h=5c)cNkbxiLf;q!|4pxeqK(W zu$hImHs-V z%*eN9eY0rE^v=v)B|KDavLoEEOG~(P9AbFvs7Z`Pn;7WZdG;6Dmz6#kU6jX(!Y*r(+2st6JNhj6d{odm z`#dX63t%`87@IcvI_CtM5smDtO*!eDZgKCe;=hllJoCznFPfI_g1L)K>3Cp7jIW)q zL%G@`Z%}XxxtOUj$d_pc;+O1T>yK}Q$VRu7c_GEO;vR+;RV`?W|28?Lr_^qCCLd?< zE#9qPc1E`sWP5*ne07!3cyL~K_6Q_bJ8$Odx99G?;``_UJ_UPwHN?!Pf$~bBOe*pv zWu?uB5geiDi=n*k8+pHwu}N1;x;2u`}8in@M#gLv{e!jVHx?KDVP zb*cB>IZRw(4d_#$)#3sOe)q|j?KMHo&0Y{9cgJJo`L{vS(dRNHU55tKb|Gf=A(ahb zCWBe(#uHvMh9lYnYe8glA-X5+SO|$!Y==BTdrHBRb_Ns0$0{K@3<0uY5BMnkHZ5~b z6QC;9z^)`S&y0f3=#xT&jM8kpavkSIBWxD?v?|%B9``Fo=&H+`Z3q~*TAQRT$!I#% zd5qUt@3$7!oTFsiA}!mmRGB?X&@P=Gk6-Bb&XMjrlb#!#Yj*$8G7uCAcsN+Q^Hdnw zs~`nL;^L)B#AHXF5^(;F9yNb9pqhUik$q>f#gYQ(CHs5hTi25)2V3od4lNmf>;*Md zwlcaFCo>C@l6Z`BbzM6?gw$cp;&V2+8s8e zii#QM_A9;95+&_v_^O^m7Ee*p7QITZCTPq>#*1QFf3;WJ@dnC5b@W<596UU#N4pis z1+5`XidNeAaN}W9=RantYJW;7^_bX`_i3=PbQHTZp^5mx;#nasCUkv~o#Ujn-=oKf zBsV^bc8ijEFF(ybWzSt$YH4-Fp?HeR>=Z_pFf*FwXJ6{rn)QGcU=BZ&H+i3t&p#E$ ziRkGHJHp7VpkG2!!^u#(_zdTpJPxi-&~YG5vVnthARLt7p=3aR_=GjueWJy(NCF$N zy)(MKIdiUcJV1wt;=1m?xnK>%P=fSmd0YVhg(wNb;2RZ7QlFlb82CMlmk4gghZ;zK zeEb^Id-zA`_~M-?xx6w+tSKXvQ2S(g$4EBkTPGJyD!(1df^?t?bcFQkim63CWnKhu zPWcJ&jW$K3){t%poDAmECuM?&b3j>yP+u;P(Bem`=D^3*mFOCk!Ft`p+-s@47X*e| zEI<*eD$a7~v$kBP`U#Q%5ov6sVgS-I(Lc+TshsB>})$H7lF z*!gq8APt&Rng56wEq;fd-_ZgvPDqlon z&mjoXZq;R3vMgh@G+ve_T!RrD%hg7{tk&%2sbsT?%~d1#aA$?zKRmHyTy%d{&Q&@j z*}~{ucb~WT*POKZulr6FK>!6gqlfDZbLavODq4juHy(D5CTV#Se%yr?)!C$ut)gM% z1Fm`9noz_H`FngbWi^gnF^c?lqp@}TO6#P5r7_{aS>;whh(o_nWF$AX)_UHd2o+tx zW}vzu*e9E1vzS&imYjf=!VSpxmYYH{B(+mBI+J0BHz%rts+v5;s|E{Y<#4p<~ zBfqnx|H|_I$Y&PTUr!*8mS;K7&h!i@E}}v}b2QxP6#6x)umL9X-ss}Rz{FYGH+eP( z;ug)KccG87v2xAa8J_S3;jypGubBrAPy%vj!NRIBn90Ived%60ntw170I_`h3$1$# zxI<<+Rl(u;ruU3pXWO!3ER6t4hxG>b0|(&o8X8KE;| z2$u6^7P8g`)~Mj^rtpAh>gDd{x?-=ZfkCw^Gtc0{4|VjI?QjmF!{>)_tHQRs!K0U+ z^?x)%C#}|pxHfEwrSaVJF=Q#7`<4N&^VwYd=Dm})pB+~1$KJfr2))u=ZkQ z|9y7}n8Yuugz)gUjxC05WiqO0H4Cdnv-(=d;<#~nLiV(H9igWLNO8GSBx73S4{y*6 z0Gbo^p*DO?7Tr;ZSPn5^2uJklJJg^mnjC6PRII;=JeOJW z<~PW=E?XJ>SISO)%)=J70wS&7gaS`!(ZbTA?M%dnu7euMr2*7?8^)^3cfVVwY5QV~ zar)L{ST-%=UV{pl>4#5dIc1RSh=Y>Ruh*)Z&{8+dlpe<~3=9a?PgfKbt`Zk~<2;!b z4Vm`LP7ttgUZe%}WEpmWW3%pqxM@pd6oKbgN3eknJx54i_{irNF4zXG=R7qYWvz;WEm8T6|dpnm+}x zYR!-f#n%**WK@e7xl*Nl0wtIMkk>(Hj+4{H{n9cs7skG8f4|#cKYz~U?O4-F%Vzut zkX>bpd`F!O6GcEt4Utti$2InLhY$b|IMaA7_fw@VN5JDim)#Kd^U9x$ko_5VhU^@# zybtgH!mh#hWTewN7gkbgg&fdV<6TXUX$0juD&1GU!JVpCBqi>6H(_5-BO{$_mH=M7$LWz~ zX}3y(^g95<>#00$&8fMJs8t$X=^~a}Ug|+Txp7!ltM#y~cvJ3iU z@;fBnUFe39v9g(R+_+}1eQ{H9ZstKJN%%#oB%hR-f=Ybayi~7)g;yDD3cUd4MRH7f z!zD7>*EmbDw^oM;gMNHC#v9$}`ynYDb<5a=#JkEmM-Kl;O<3DRRK&JinGr~@R0p8q z62dPxeQe@0ZtkaqLZ;~oI+03uN~M1b_8Ly{xtcnE>J7B-%a^<2^$>{ZPVDq1&`Ig< zEbM@uzk#;R!T)ij?f-{a&;Ls_^De67??^|IDCY`Oj>?(B$R`1(evc-gu~7dHmWHK_ zzpBQAF`>zEF` zIr?&%i`oU#%qycEp1JuOB|i9dDYK{y9|Y1-iV4;KhD{IsD{mh4--{yu zUtn(cIFjH`2|MGf$~1KE$oA(<7cUQ=yG?CPN1k34Bec}-exEzDl{r0hcA@j@y4gns z{pg(0E{8PL4XIKCYe|47{^ef2y`OrMWYY$~^p*(#n2++Olwf^lbKGsPdzRs;7k@t8 z_R)F0t+JsZa%6_x(PKg+#N%GWGhV@41rhw3=<8D!eAb4)9S^Xd0X`zWUttWS6&KmjrK1ve9KvH&G4RPf%S$M1X99nBi!Zu4V9p4UVr9 zdoMr3UOI{Kh9EYwW;RVSFS}i~jU2BWBe~|;aMm!=?LWL)4G_Giq*m}0YawkJztmS5 ze7(H1FF9B3WUpEu#=<<1#OJQ4-R5STnsZ6Ox>CeEU1JsF9uj0covo?(7uAVF}1;$+fHSEsLJtweIKd;#Nli!5ENv9rD0rZ5deX z@-G4?pJrzVAbrji36RI3blKUcu%i?%`lVR{pw0`YDYnm?`KcCs9eqHS*MS}q!eH@jpH zomVSSq}=Mi$Otiimm8x0N0s>Rs1pDGPxDVdSb%(19X3MOvIOWG)t&#Ci>-ePUsSF(nG^Zhlr@Lt)Bnfr%VbPgf%)t$SE7*j2)cEXrN*!w?N>;-n~3*3uZYI2?>8=l{4@s*wyiFgTe<6EOF3g;>6^nbTDwwkuL zPIVbMXq9;t!QWJ>ZKMj==(pX~m3;uqVBzY%mF3!?1Rx~&IxDu>hxY|J{dqXnohK`H z;iK@UbEOtvvLr(GNz~M)$*Qy?>$%EFV6zdKTz2anv2!-F$ z#mlT=6VFPGALmGf7%MC<)lH_Ds*b)jRUP>)AkFnJ+v#sB`2_Ub@r?L&`jHZ((WvSm z_h&>I_}FxDc`+^AWcVUAbpw9BA%fV|6#3GdBvv&&7zFcnD^{qV3~C6n*-fu8_0xT< zA=`7;+R)eQUpF9x00<)Yd%WeW*pqwX@#M$zY5dL2}1iX+_h` zp( z(xn^_9r@%R^JYNGk39TPF#fDuSb@HUg;ZGAeZ|T=PH)|?;dc&L6{{&1{0CUVk!^#_ zLjy#Q*{DAQJgCBW);-eyDNE`~7QB|&!9i;O^tSLx!Df7s5^+i;%a zmHlg;W2OB9Qr)^+z6BQ3l!%30qOH2AcC!OeQaWs?7#lg(?e_6Ey9hiBE2ao z-D?4dHxME&lut=iF~i|7B_s#UkF{$>l*0j0W&Vqk7l;i=$8G{ZD~k!mZAB3ylCy{f zZc?7kalSsTgmz;FOf#ut&o$s}-Kr^-sgr>U7~e_o=s6o%SaS*hL8fWfwWh%|$G*lv ztSSY}X)oYbgD;Vf-KcxrPi))e;pBkSCiC`YZ^zy6C}$|m4WDp9rk>hXtr63r=tl`B zyLTy0&62n}tJ#tIl(tHML~2jlvj9>B{lS>;d9p205Gq%AixwGr7Ga~vP8qW+ds!gU zpZ+XeF`p54R?WtVf_O6iCRO%E-MQCgeOoQA>KN_EIix>uOdLIt+~1*{9_K|_oZXGZ zFZC=hrp^k?3n>QDk@dTI#r)ZK6)WJku(Rm^?N+AEVzhYFF zA}wKFddHhTl&4D|10+3!bS(2y^eYRGEv)h4i!ORI+xQh7y&Kz5SF$j8%C{7fYpp*V zA)Hp`qM(ALlq>}8t&K7-ESOIv^#C3<6bU>v*qx_7#JozYzC{gMb)OUE`rJ>#pF+%& zi?X6R86us8M0BYK^CF&-ms$1sd)uXPe+Jy_5FA6dSC1?3yhB)4>96X770q6&n=AX2 zrYm7w(`jn1i2a3hDvekvR6j3irfiW7vB}wYmDL#VXjZ^W?B$6C&nYI@f{xPoU{rOu zsE2wz^bOGGW2*r1s}plDzs|gSP=E%V_r^v9c)??9?K>!OfQzB2dt2K9k!w@xWi-f-aL?Oa#AGN z{S`bZrcHTmD?bdNr0!`))HSGQgz~)^moUFtHXclt zCINylml5S%O8abH{G}_cMP)@r=#NC>Y;R|cS<2zd>E$Npm$csbHN$z)XJ8L59;E&l zNE|8GOgmmd*BEAs_oHE8&ZH>N1vx_4Q$UKy85%{J?Z`F(i|SGN+LS&c_Buf#jg$Ut z*-UOW9GHFO0&ktS8!wg5<#L`(fU_qTJAhp87j{bZK|}a4`Z8tJ6MXhveOxoX&tHdc z=r+FxvC=ve*Vw&E5dEEP@`DQpfc;${ZSJ~i{u$VxWg%2Zf%#--)J>{HROg^T zZ$(1_j!VcHk%;1%*25wiI?)~J8h3#5h+~3=>8NVRtINawD~=&LnyTNPzK%%0F1t4; zRpxKCAXGN4)rv0px;T=v&tn;65$|AOB;oZWLG&nMwj=-SMgW)~@V?2CIUJ=%|zzsj|RG;rc+tX+t&@ra~8PT1fAq6O8+)8nk){8snourt1E2aa@%KVLJALU-&j1C=-V%+W1M+&qXbqc3reth|f+F)eeb1+ZK zxqKcH#d#xvy&_X`U|ivw6NW>5s3&Se_MxR33g*Gv=^N zbrL&V4x6?NxN9^}((Od8O@`9};}!-V72vDd8-PDX_jt!hzyQL$$0Z)jNhr>BD^f6m z4VZIp?!}eNo1`~qVRk%`iT=sJklG&fRZ5MfLgklwyDTj%pf6^HkziU^^)yl$H5CVF z)6OFK3aVER4{cyNDZ>@qDE^Iaevby|%} zXxflm9d+Sovh!VC#S?gv;%cjeH6}WxU`NUt%NNmOKuhuxj#ciwixIXcy|Q2skShFo z8T3T8bzNTuS>wKB-k!ZC0HML=OlCQ0(9X7#!&fL(ag0o$a3cgHJEB@_Ng;NtcVkpV z&w3op1eln*nn*p|Agnv01N?Y19z#9PnDg!Q)LicxE+VmwCW!~b#^TYCQ?sY%6@!f) zu20W21T}LqasRXems7%-ryS?tM6cba3_#3s7vR|dL@9t*0X~MMrYBKFx(NiN>KKz3 z;bl(|jp=@g{ZN9)|CJVA;u+>1LpLfma(zvhLOeNO>PM2vn%iK1nKx*&%IB1|HN zU5^5PUiU{GP^yEQHH`S1%CPyk$99<2nWpd73W-TH*{w0mMLJCOr}U<9<&uB-`m%d9o*OO!tmC*FD-kChO_N*hf{G(SAd zH(|W;=UN@mIkdc!sRn*y4+vi>)G58EW<&G9xC)5Skz}REi@*3D2{;lhRNcQqtyd&Y z?D7_VKXl0J>VrW18)x4xIyIRjPdZCmO&u`8=XWpMhZXYcWT+(c;IAf|K$)9}yo%@tTMkSe370F3} zT5h?GpMr{Qar1^#L`ZuHx?ng7Xqa%GX8#%izcqpC-7q(MvX>+ewi!jLkTa1cq&1xe_UuKWdsY<9zSOj8h;(L52N)L}UN4GPf>A`pgmmDim=`HM9ipN?)40sZ zNgrlMg-09TI8I)9Yqf!Z8AuzPs`h@Jy}Fk^$^Z7@=K96-D+oeCSIe&pyjvXSl2NPg zFvQ`lBod4A0E|hma_3^tt-0OU0?7AgpV8kpD})Z6S%5X; zAM+qDdh#@yz8$+#TkW7KHC73Z%sH2}yjJVt8)!1{>fYls?WC1^GDzOL(sd1i4*J1; zHnCft-`Z!Ssa36H;#5_DNN@U7X4^nYyYO;V0ey})`6$t1H(4z2 zVr}Om>-u$@)%N)sL8DU3oE#pfu_7W?Vyfrcw>~ANvXGWy zn*d1Enuas;1$sp}TlAk93g4Pf_vK z7)FIiF!XfTgMZmFu#$V;qXA2G|5A0mhFZJ0q$?~i&OPKjsF766c4w?;zC&{IJiEk4M!lWHb?jzP9Y6?^ix({TeD(5H=YmPR;MG{#~Q(g0NfEd(mi7cBZHkjcjT> zE`}ctyG-|^D0U9UQ2_|O;+58hPVDg&2ewP|8%bm9SRaEo6HpE;6Jk{1g{uaG{Pk1r`sWmSe(r~FG-TVD0g zd=zgB;4DEJRkd`qTUS@08R)b#=(uJ5EMB%(XHBLLols{1 zldp1vC?(kbQWqC5QMDOs3?ClMnwYX92js^ctfJnWD4-s8YELv{Xvthtr{M8TyUlZs zC{71j;-p})b?COJCWuD$%yi`J|7PDoFH`%bI`xT)1DpLdN)+AV9p zp_F>zw>Lu(wi>Jg(YqGb=Oly^)f-ge(O0S;WEy834AvT{tueA7t(9w9wrW{v%~Qp{ zAqIEb$6DBesooiL-{O(BbD~p(!FGWkEP_89bhVq#t+34v(e>t_rx5FhHk8Q-H2>ZV z(B?+tH6k+9t4sTa0|IL93^tc1^-tScwl+|<1NvTRn=C>jh~Re0xgtc=n@px zuIjT$jh2*l6+46|nKdFzh&I7I1pJJ-$9n3SVpJ;^p_H{YF50zn&l&`=2P8}((wdFK zJF|*&(t{R56wh8Kg^G0Xe7Dq78|hQxdqk;dEewEJRhr6OgN;1G4q|TKytf3&VdON5 z$(Czs^_{x)xc$=|%e`u;zQ;JNcLa?QYwm?OWL3X)S#!eRbuex{vzR&L!b{^K<4+K1 z4tr$QcqHKHIyT@(m3=RaPYf_MX}=&uwT&B$QA>)@<}59t<#(XCT1;Nkz?LnP!PpIK zx{U@?xSpzzK+49SwR6ltyjE-Q>+3-sY9Sv{xz>)5G-%=A_5oYc;IjZ# z1B&%VG|~j^`D7y&GHE$cmvHaEnbjcCf5^Y2s_vDKzijq2#A`apc&xG-GpZV8i>s)4 z<#AUgM8ZX_%#ap)Ce_u98eiCm@qsy*8c7k9P|1Q&rnP#jaN0{z=Z`Zlv?(~8?X3rLR+NaBd z?y-8T?S!x`zzg+7YIgbdgqnQ&Yor5fhSK36Qx?N6Ac;oUt>SRFNFj`d=m zVPxydleem9L3&L*9vOtB=l~J>01*#K0M6KYq!IUS`@+4pn$uQSHMJ4Z_wL5-zClbZ zK=`a5)FuHBT`x4Ey5GiG^D=ALk=;YU6?P%i2T>;g2 zR~rPBD){_zql-LR&@M=LQ~JCFZJvExQroE1!E_=GHRAxo4FWPv(ZK%DTh>JwVy=o|QbA3@E?CHrlNC zLWI2~R>rE_%G%lZQA%n>s%LlVmvdj2`Y_V(pk)alJ;Gs`@^kTd0l`mBUxwqn34O*2 z18ufa_0VYZQiU08!DwTmKdNV@Y>Zk z?PlnJeMXvu$&_yo@le=@8*OYpH4>Xs@Fdo0bP-=+)Ya9_m6cVrurZsi+fwgDuYt8I z-YO!D>ohYIfg80giV{WIi(rZ%JXfsShFT=B_1VSKD=AZ7mPg-D+6uUBxWB0xmHV3U zu`z(-ctq{Ple(?%-#)gjl+(AL$cjxf{5WQ5Vb<%Bow79kR%l=>r+76dce;Mx8UJzn zROgF-qNyGIU0X_5y6TG_N2fZEu4UJ`VNJtJ^J&M>N6$PHvOAmPu$0JiR;t?qd!;Bf z^_(BXnTHY#clvw8eoo}E;AaIM|d^3gU)FCc#=8pn&y3mUr zIUnq^s(9>& zK5JJwD+WmYP-I00JAU702*2;{g!-#O2vF~i(`T7a!JG3y#k4=1$qTf%fF1~-HY*EI zo1cl*{_d!Wkq@rGQ0l)*}u28apxZJ;)| z;CKW0mMi+#s|^0@HI9dJBLG?0S;{(|Ja}daJjE2HbOZL_D5syYu%O-ZqWU%Ykg*L= zziaTnk4y8q@-f@!F3zWaZ%hNV$7_CZV8ojwm+Z|1v@Nycz%b5HW&v>$;Doga1o*#Q3iP3<67VdFUa1GzH=_>l*+9unlIe68wg0~)Q3R9B=>Sl$An0cY z7+{vkeP{N3M12SFRI+z~DOT|JYa9U3%FkCj1MM1Ob#oO>W6~DuBNs)=S6&P99KBZF z5BEtt!ljivedL0Znc3IX_OEs^1G?6wmS&r=scsO|v>8XY^iGC~S7AsMo$E9iYopxb zJA~%XDG=%o@I(b@YTaP_`Sfbn%*rw`1mP_|hpSJx+fW}Vi@0?Cd90v?Ypm+|w6oS$ zfh@KY21?Ie1u=qMQeE&G@wzep?Zpa?24) z?6HUx<8cra4&YAH{FGBRnJV0r19WS@6QAN#X3e|jbJoUpt9;R_)TsSzfWvUzzH3;Y zO;Ce><<#3}|BCEoy$l4se`b3v4gU86nBD!~u87OA?MxylGWi8M@E2&T%rhISNi;KE z0doFH0GrmX2h_?|@iZfUoi%|&!&TAGfl%+Cw6f7%^LmF+Y~NB31(wAW6k(#-XZFte z27q2GI5(NDl=KEK2f^cJ`X-q|K&bm0Fb?`xtQ`11lRfvpf9U?Bd3_snu0vy>$&z+L zTS^6N?*e~o%gal3S&Qo;YcfVTckiat^&jkx=#VbMD*0K(S+lOlRDNAV3MIWC&J*LZ z5nQ$!O}o{eQ)+AB*r(c9CS8O}PB2s(k$0|&6-tw-y_4fCeT~Q4SYvg;4K7SjZxMVMp(h9{j2|@cx`1Z=7S2l-x(a-~I`3xpC)n*D8u#oUeA_Ld9dVo2 z!2`}oZvO$5&iV^7N$>YyWlL=rN49V2y6F&myh(vnpMoMs#I2PEziEP_JuI#gH@!AJ z-ioQPbQ(No+DH9tZ84JO(e3G(y=WO9ipCYJowEi#AQ7MHS}yTpdV2ht(yj$Z+-m1@>@+^?N)ItTXFVkJ@vz4zueQJbR%_mu(-r@%maxS zwM{-6Sh3Op=jD6ofRpj<0{STFera^&yI)v+Ke^UHp6iBIf?c=LIG}%RcAf&NqbkdT z1RkalYaD+42q5+Z#X;HHp__LW07@mW4=K@18UF~LmPLI5Gjxt^Dd0Up9)C(6LGp}J zdH+LE5u`Y^**R<|@5E$_3YvavcpDlIJr;KE1NBKyhfSVr$_D}A!DnF?k-FjUXL$qS z80_662wQ_O?|^>PF+=adWzd<1BqsOT(Qg(JipW(@cO=@6QL2mu3vAOQoh(4;P@N{dK|0YXGNQltcy zAkslVN`in?6TuK6#5>nM`+fJ`=Zrh<9rwOtpZDIe#$do7j4Ddd$dDQ-ZJ&6Yv;MY)*ZF4IkbL_skPkxPixPqFDnsbFGUsItq29``4_}Z zC4ImZUv7uDw+{6*E%nbyN}h)(>AM~51Dv?{CR>I#{U=AF5r_$A+@a%BYN5$S$|DfB zeK96y7ej*?)LE#00 ze_O9cAaw^(dP+5=a4wt5U_smd)egN&rFYj&QH}3LlQVCQW#NsS`UbWUYW<{lQ7Z{^ zXzoS(!TW7T5E#q6-f4jTvjN?Qd~rW10nkxB z!e5&8Vh(Vv_8#!N!ApcYtd^e~r1qWjZ>YWDw$4b|jHBf)%jk1>WL3_MXbtqf zHM4giZTfKcJbxJ9V-U9*`%nPjhQL17_%aReDy9s^C+Xmxqk)Y%Pi8o6d1SD6h-DI# zd{_EBoiA=HZ1$I%K(_rM3f{Tb8>=eeYyvTHOu#1=I3gZ(_9X{|^yJ7&yc~-qSi{OI zzQ;NQhdkQNFH-nze^8Ca_kew_fB(|-nzRNToD>RutOM{A)3l>3tVglF;MC?4gWZlS zIyeywQKaON(nc^h;}^JD3S(@MS#*%aXss~K)<0U5ZZHLqOLV)5d^VGxd$Lr_CNj&d zN&-}ToOLDTE&a!ZUz%`A3pHt zFZiAI`5kt^Jg!tAtZ(XHXBFhPrmtU%H@ZAFJLY{V)5aVzS8VpSP?xsSeQy@dh^47z z0ugh!57kugbR8s^^7Rov`$L;7_CdQ)EjNfCqc^FFpKLonK&!DDWkD$iZadsLV|8wZ zi&e40B?it9YG+LilTt(+zpY)W$yUpZu3v(L5Kb6$!q>(ZN=+^Dgg1mdxkB2IDpyhc z0fvp{Sln7ZHi>?xKA9oI>;8tsTI zOu0iLp62WR$|>K?#E9HmY(Ie(*zRslX-g^fS4ZcVqBJJ$N49qTUB>|_(kysW5h!05 zs)*j^v3X>-Byx`TB&P#BTH4wgeU|~rr>OA`#Ae`1x#y6q(3qw*lx_u@w09Zj*qP# zullB;c+k1mY2X<|escHkUeq?O03w0VLkxP&%& zWaN=0To;dVPq;O+g3^1xB*zKP-9chn?1RnikzQxNM8SlSWY6H?Hks+37s=J4NYhMz z^Cs&8pQA4g(h5_+_nyFTD$aYhw6cAZu{LqEz-||?od|xYW!|C;48w1tp*;w-3d&bu zru&4UsFx*YH^Qi)z-H8Gq))<-@h1DrtSedeljC%ZAUP^{<63YiQtwje;yY{eGh(e&?cVE;cUP7kx05{&7C=U8BO^CP?y$HCTTI#S_pS6M#A zEK_=YJ&s2k8Y?Y6f#IYEmmq2W((*$_zpaVmo>8#^$aJSKI`!_-ZS7Ay8yj0{YahWU z&H`n!Jbl3A(8v3ge&&*xP3+|*(d~O?uu560iey)tK&>_NlIb_Eo11-ytZIV;_+ygg z@WN-8WTj0M%tK}gx|P|v9wamWy|{v?l6f~|b3qA1mtlLpb-eUFN@(b6{E5_*KQ`== zD+s)?Sr4EM)ZYS}X6k4Frdsxr|dlu4Egt ztSl`JZkHY##Y;O~ROnIKlYKq$^{bbGQqkYRa{tPF5eeNi+IMKH8`YX|CIx-Pr%sPd z%yJJvM(W2dTdzf^T(P_6+*h8iNkn8zXTiSX0rQ>7hf?!5K7VN+&5E{+Rq*}NzF$MC0+fNT?^~Fp z{qyKS3g3pdNOxKsNE}eP;Nn3i!EG|=H`Zy3(kV1-ueqZn5e%5>g5^Q?z7^leE;Jm4IQtUkkq9Fa<-E&fx>_}T+lJV$psnzY6!uQu za!OIzyux&Cm2`JvYE~^bB$n6jgPKN2^j=I!IbKoE$d5eduQrmjn_=~iT*l5}Y8Eqb zi=%*orkGy*)_@e-0-}<{TwGX3nNf>yz8R(kRX#rYl%KT3h!%+ng9IPRD$7~~7$6`4 z3odBstm&ane8~}q`nEH%j_&}Uyb|b>`nj%XY7MFj=$0&BK&$6nZ_nA3w!Dw?ZwnaJe;N4hb8VEpSjQL|&ZFu--Z|6LcU z%NVr!ukbqDzmV?0e3T-m}R_IJla`)u3#+J5kkNYn-Om4fm zl>}bUFs)I#Vs8;Q-MC#n)#Lk2Rx(h7=qLZ4u%EelCp4oJrnTCnmL809o2sY*BFI8v zcq{dL9*)X)motLshW^`@$sYUx+0o)db#Bot-0M{bTem;TZ-1)|*snl;I41Dvy>~;B zOwhA4Tff~HiWGBj^U!^N+xe#W+>As5{u0l+s3J7FviX}$S*gR_$!y+fTrObrjHd=k zu4&HsPses54BMx)66Bs}&4_Xf{N5W_YUBLNf<>4{UH9i-zSf?RffDtwv9D7lJ4cHi zc8+GDdyM9#9P)jVy6w#GSlJ^&VqD(Dilc1G3W_C~V6NJlU;YVi^AE7jFMks5|6?IU z;czp}x7$Mq4Z8Ml-Fz%idO(V2BJB?3H8SVb%W1FlPt{lEJne;~JquqG+Iv+fR{22o=?KyHiK67}a%+V*Tfy9Lmzjb+4>-jt^@QEiO9uh5k zr^fBdVYwCXXM_;DrR6jYn@~9z9Bx=#JF;6L6Ng@H@IliY;-L zIVs!R&PsM~ov=?uvT~a@20thj@74bOa+$r1F6NsmmU0dQQ$5GX)#r`Hbc5>s?F=5r_j1xxwmql-sw)8Rro9Kqdmi>lZ9GK z7e6c>QRvxp+wL^1>a)bjk|T(>sy|=DKQmJ-dU|vG_S2R-T_fGicAgo_jUP>g)-RO^ z5&$3bYEy_#v7&6iCkh0-Z-==E3~jq4<^_Sx^`VxCGBTa7CzA6KTa}rI7=ro&g)f7Y z-Z(81pWEu3&iGc_Z|)EWgNfG4wdvlt7HCir(y|CWfB^#NqcC;{EpuYj-oQHKbs#uf zwL{*J*-zrdpT9@__xf|@+m?LGZRwyrMnyQoAOa0=hdDR3Vc9dHrheDt%!NnY$ zR_l(BuZ>s47l~^fq=MJRgF{>6gvohrya(m12s)IS)**q7175Kkb!jmm(da|i&d>2q z6A;@nq_pBF3Qq@j4d^VdSvWy!H)K7yr_l9l)B9Iy8(Tb1&bJnD?kSPldGIF-AED{-b*}Qmu6z`6?({#?*>(9cV$ETR{SJV0U(P#X&wD%vQE}ujLA%@7m;$xt1>(;waw<|Ahr(qDV}rGc%qBWuFRq)M86urgiPH3e~D< zF;L!}d6D+Vp|fJs*kkB)3tU2>#00ALozM8Kt;(RU{B;&lwpMROWt40!3atTixjv%C zp>$%nH4#!O9&s#iB5>uF>#=;+XES?rT-_Yn_!xAhzAMB|ly@#;Zh2 zS0Wo61Y(v8u8iT80ziw<-ZUwe=0SP%5wwmnQ41qUzxfD=hN9JOsSYsmi0FRuo=4Yw z+}v+9{0!r+b z2wERl=p&lcjUZ+}C_;m5AGsiNtWwRgXSvI*6ERy5YM%bZy zLe(WLQhGn+zs^7VI^px!Xcr}aPE%tb#69w9^LNEzJN2iZnt%Wg$w5eAVNGe0fo%Bn zWxioPw#9_9pF?YZgo0of@4*wx{v!j#?Q-DpeXLW2HK9k-JfA*=~~w~&+#NiSFuGnrPE->SQ}URs3WknQ;jicHjJs zOn28PaEvR?w#Fk0k(oCdbDt)M=~o(g=SGyENuydK{LoVyWNb6E7tqk%=X$-?DGArV= zsc=SSxnl~JpFNxH001Jx=8v&78ppLqX^uVf5-rxi?ur<5zZ1)cVyPFgpc7&66z)R838W3_%dAZ_lU5_kNHRW1QQt1GyHMI?k`G@ zXRhKM^8FF#DrcQaO0vrDt>QxqlNw9z{(TB9$o+VXLoSnpgX78qhaLyVM-JYH52ch) z^{X8X6@mPyhVd`ashB$tUD9HsRZ@l3)_HUE4?|1>s#e1_8^kE50%k9zFm@3ujFF>9SAe&U0% zs1_c=*AZ6WoOlrb)TdSKNsJgkN>zTB7PE76gk_9J)*GE2V!inoI^e*4-qF6c_4Z3@ zskBp{M&0Q0zHWVWriWwLJ);?y46S#HH_`b=HP_LVrMborJ*|Y^ zkkd%a+arQ}-3#=*6sG+PoH~h4gM$#U{WGgrKF0+d>ojo38iA3Tz)JL7D=@OKXnxWr zy4E0lj_Fy2&go>!86Bcm<_>nb8E@Xr7GUaL7$rjBMLdUl1(2)d+PjYfzw;odKm6_U zo4y`1x?955?mwHmoKjYY88kz0mOkl-q4=ZuHn8P4r-+I-n2}8J5bV^rPf~w{*X@tE zmy1%j+Azb9ISQczLdnufb+m~mVcKaAwZ1-u4kdR>KUR(HcQtaiiiI!3-DoBrg8`rbtwS2?d68|RzbQZMRi#mAMke@pA!Qv=al zn->&L3of|e<5cO){Bxn(CrmX8#k3YVl|oD5qqU?*?xMpiV_I={{p8;EJWHC1qyl%d zSmHh6c_O&wJavM3YcFg0jbx;&t?T+ZQ0yFNU$_Tn^vqkdWdd?wu(El|3?8pDHc`mR zoG)LltDBjR+6$N;F3qG6yfofl_H`Rn>aW%hl!Dp)kT(|kwrdlXUY1`0-k6q5k0Es2 z8DGY$`Q3v+6QMQp58G7zrB9DT%n0ZgJ-gkY<6(m_JyD$3MmJ=KYiGT?#o>vl7W*R0 z;1UnG&!Pt}`jPR-cMo3w^!__|r#Xa)YEN3^A0>KYnF6qBOSsG0i2tBRY}u&GkEhGd zr}(rABA+HceQMR_?4q(?ku>iUseRwEvC&lGz{59J3gWND6l7+6Im?dE}gM(kVou=Q+qJjhm z2y1uH4^^=jCP7z6iD4kVetduBwEy?b($IP3-E#BU^HJs2xse+quj_K(Oa_D)3HJki z9?)BxKzPj`78%$h%b&u(N4CibW_!K*mL&!z+K?WHDH*J_9{*6FH}-tBd8neTz2*}l z-$)!Qjd5h+=^#gdn;684aUJ=dCg6$|fuCD9IyDj$8l~LDmt?7X zEG$@{!*;16uGGNprD>|jk@&9Bu8wIqS{LJe9pao>6s@d)kFl|yq!+ftUR zeJHvUq!$Lf6-6KY#fk4Flrm=(I^kEyc+i}pzS}#k7vu63b1(*C-uUzS=ZNnRpa}KZ z^_SnM!Okh-JL>6iCN5z7L%Va0$kA>GGDV3ltJtE=)q{3D;^!GvJNb%WDo9W`ct+c0 z+@?Xh-0pR>of)e05T2c5g029mT9lkBV*g6v1a*>;!F)pjrAQlOSX-Ru?Q{;*A_+0U z7O!Y|E^wq&>VsfxK&465n#RmpePdrqNlC8zcG)ZtT~S-0NCb?V&kqKu@)6(OX7hnb zF(lI}Iol4KLBA1uUzl|!`jJ-WGIP@UQnEyrij_eAxhgdvjjK0qU#>Tv)&l~nd~V=G zSLY0AhiGc?iy#puXNM8@q$)=CrQbw*jeH`@`8jjD2Py=yT>L6J$IQ5TP=Y!A676d~ z^bFjhDiRm=HTG1lZ*qawtS_9NzD}D$Us{Au3uh!Z<=d6Y-rKj!w-d6l%6oHf)`~uk znDl*jGasB3r+ehPfnJLPC+8!gq{^yUe6U`*-Y0kOgkGef{*gw2PuezzyXfkXBPfXKrSa|j1y)UfHNY7tc z%qGUwZ%}|hcFa+;A#A zO?`Ii$|ruYUp3DC_V6O-qbqcOItUuxoK)UZZ4Fzm?&IMsj%t3Mo_Z# zA&FLh;6=D(?nAy@OWE4OUb-Qz;-i8EwTmDgM|`6e2^?+l)*mZcmzAN{*yy}s^KTcIZl>BP!ecI z3A=Qs3(IPJ0Sw~!l^?c$0lKQE8#BPS!)zg4a@w9S^jj09o$YFU8wkB-b$IL;0PDkd z0o~a9LqKj)g4%m<9QVVOHV@p=3XRxq*aB%!3cGBm0~j)ZoK`Fk-J2Z?XH}ttfK_H8 zS148Xw&MYB`?gdbcAEp+8OH>?9GCEHM+lH@=ucDw(FGcn+3_AfQU}_9+=u?x>)>NP zb`=JU9VAxo&3V>a5Ur?91v>;pzp@5-fOsY~DV}w?>Ns%ilWgyrAog`W=C%1#&@XmC zkaq=`lK%)c>H1F%0qBgG@I@dy#tioW7DRzc`9BR!-X`N1<0=n;_Wh;dpYf0eDFd5> zQp;mQ;dVR3h()d}|)Bajhf@Y)<`Vy>q8en+6C4^i4<*id?S zd?+7KRD6Lw1PnMH@3<*<8vwpxMpe?kz#(7GxX(nD^)3I+ZxIys=W;9cSgB)*Sut{} z09Ss5>hWuB?uV*eOKfK#s%9Mf{t(b{{_DR%(G~meZ^i#F+)i5obG-@rc^z8x)HvQvs~hmC}B2gDMKOfkIUh&sf~E&^ujM|4==F z($J9N6?axDU2oXh%;%aSXwbgGzT2T9fuwIwbSJ{b%Pr^ZmBup)%-?;_T#=XCuZLfJ z@>f*$zuoog`Bzl-uc+*`|0cEde^XRetu7oG2mCE6d&4gq(#5j zABty#=M8($6bv^y3ocu`&+~c`sM{@Vv38}F<5|uoo8XF(KeMvs86_ET(>%E89dEf+ zN(D@9Fw4Z~)3&Gz{!1YImq7S0f$(1f;lBjJe+h*D5(xh#5dKRb z{FgxZFM;s?6Xo$=0%2f@`7izSzd@M&?-2-VI*+4jEtLST-kZUx4b{@dFiEXgk=@%x zpCdA^*eGoG?7A-87_%dY+pXBRTvBZ}>K|_v$ukW3bmZ0`D5jsrU^F(J9k!|b!RLA;4FF{{#9*89puZkY%7xBnojl>9PK zt3(=b=pHA@t;h%dZZ({^8Q6we+ry1I%k%m~@QN2m%1P%}D3DOegk$}J?xn4(eZ^MS zlEQ;aj}k_$h8;sPn-Fhz|F0vp6|~9Zx_NFlaxi}GDxG>+nTN!NkouiqZ9j88^KaiDp?WN8+#tE*(nrI)A{_3;6D&mH@Zuv$45w*3iljYP zt6j0pr;$JNwF8Zofg~iDSjmc_h8gP^I#W(kiNwo+~cCgnp*|CTO3{LOe#chj71+ldqT$$^YI+<_BQS*G`Prirtl!$!Q({YkW{ ze)1ibXPPTOJ4@@?d~QzVk?sSFB1HjIH?hyrs5@;zTZ?|%0>qd;dP)1qUht}P9DyS6szzAH`H_tLtS^=DOdd_)ra z>3T5dK$GvssM22LOHI+rQwC$yYOSS&Zm$3v`vX!<*V*#17=mps)(-uM(bz!4_o@Op zwZ{zyR%Jg2+1WYcfucCaj~E4>95d#7ukuPJZq`Q=)g2Euw< zT@oBXTKMdVPScETFWK16KBo$) zSW*jvak*s;)OO+dIX7~}NAw-HiBKs4i~Uyl&PjvujuHC^MTgzf@ulvyc&_Lzfnrn4 zwA;H6VRoKEIm(&dif8k4q&X9+x8IgCKfQ)Jci(K>r~wzplWnkw?rZNgj~~y>A4xez zdG%2#D_Hb#AFM=SoB+$nCk$ob9z&(t43KTQ2hS=ujChzNQ@U>=5WV)Gc_g%*0*Rw7 z*!+V+U6ik}h}{IOAfbjdY6KBV@wm(w3b-3t3ZO5jS?vw%-kgp{PU^^QC8tQhSot8tY^8YQr-EiXEs%ls=RndQu`L?#BFv@bMgHO zL_v6xW7^ngB0)SI$WaP3Q-fi<(%f6Ej$@2A4h=mCw6+B`!37Xtd}o+rcAs= z7>4SsJYTA45GE5C8K@B)lNS?x`XCqZtC%y%i`s79i(koC>bRkg=S8&kfl6#V!)9Eo zJ7qw0m7&!=@bc0gQJyFl+Nwm z-l<%*_Qy|*Rt(r=p8ZT)tH`Y_*$Ew~%Nw;_*0|a$@sx_P)$JwpN48bjN5o@#ayYdBaV*Ww4lp4&_y=B1AF3TT|q+l z6c8g3s10|dI(Gblfm6QRB5BWaW5j?cPvA5z)y>5$8bMPd&YSGjnb>Bri0u=Fa<%p?(8Wa%Q-SXRgs*tj0Gn8Q zUgB-LFe&;hkc4z6EqEm2gfQH+!Ol5xy2yPkK3--+cWzZN-J)B>_d+|ZZax;{yWn=O z(@~tapaPBSc5v6HE#!0}t`0HFpeL?stYw}3bau_guE;@KVT9lil41_yhh;iyq~|M{ zVm_Qp!h@scB=@N3H(arJE8$h!&c&sa?gb^!VCII}(b(f3VRpg7B zXGQHu2+?xgu2KKyqK1XRnE)$hYYF-Y`&X9sn2{nAN8aP><`WEvRK=%a_)GnaPL2Q+ z{fjvi-^V3bDY@bb^gXh`y1O0_1fGu%mp+BEFs4W2is#mM5i9Yv4s|V!&lcRo?Doj^ zuya{@pk`AW| zRJ0+y>I=#ZiM9mOHx-E~SFX?UMmjyU%|hB2TV&g82Te6F2+Su`Yz&lh$ITF6Beb2v zc+nkWqGecD>23fK+T#RTh3U}|2_+8-qs`1-PuBJSAXUoKQ&^1Rt~PNR8z;2q)5Q#T z!a37*V)4!wUwR-DI&D(?N5VW!ljW^4CB5~x2S$=-l5OBmFg6)AuER?)d9N4W-Pu54 z-l&zHkd{$@A1r$7rXjz0-Am7?IPpSH4?Zhx|92nw1a(2)my!kXU~lVj5{%y#2cqE^ zuO~42v|>{xP$A;T#8G{Ku*C+Jj$0%pKzM>hVu~BaifFoIq^G8v=nkR9wEZNiHkdQ6 zaz$!XgS>loLzu7ro;V=^x0tuYSGU~3Tfb8N zay(W~TDPZ~d!9~W5B1SS5o71j7?ij&e;jdh)vMZq(<*oN}O6pgh*|h7&uOm*2QE$_Vj|;@CKyTp<`=mb%Cw~(7 z9G(S?8k+JENL6o{3i*Lx=Tv3Q;)E`s9;2;HJYH7PY~s_}L|3n{;=#@1Liy(l{BPSu zWhw(w5T%@0gxR=u-#yBoPL4yrWnbeExWXaIVaUPpg<}w=w%^_oZ^h?$Oy@g_9KZzm zvTPqO(?k*kv{gC{xvqddJ+iL`ByaaIryK`xBmZ&8mAR8lR1p(Qh6-ccN@v$5#dFAi20!O?=_=we zjF68X*P20HxWg><+J>RUhU&V)8F;0CB%CTz40A3U5>p}#lGS~@5iNyRb|vg@yu6f*Aw=# zq9#4O*#h|`c*Xu{7|PtFhyO<^{9|}$|Kd!$B(h!9=$v1$1-;p0`gnimv=>I!;CxQE z%X;3bcEr~AC<}^J@nVp`7stDZo;M}PHV{21Nqbhab1$$?%Uv|yJm(6nb`G?*w{wtN z_GLI{%cyGkpl((>0HIWl=aLs~P`IQPX!MgqSL;WMZs*u3HK&(1^;6Dno8|SyobuRM zOwKP^)3+=91zo;VdOQo+3FP%1gihO+AJ4;I9G*#H7!0XaYHun{Qdh_`mH<7hs)kOY zgp~-g^yX=rRI|qzH|kFg33P@VGb#$)^{kG^pC{aj{xw;JwBwPOiF+_ zO@+TGE|9Xk1E}hv<>R;Spydt9iC5j_G)vR6H8P4E?DX}e(k{@xZv5~r3vlP4dpu^# zvaFeDfT0A9?{Np$(LpKNSYAW_W?V{(B!)Z6TPGISHb4HwNR;+9UW@qaWi2QlTEv7_ zSa6CUxyPu?{m@#$jJKW8%s$;X76S-djz5W!^c3?W3>Fw6t!y&$+Z63GXNkgoFH5J) z6k-h;ubN;24J96TI(AcKTm)^B7#U>GwhBE1J3pHu(-Z>-hTZL28qL;3l)=&^nI4#h$=ycd4xjP?kC`GlB|Jb)fPn zaTPbVJS3k^+}hitxG!H_8_#kS0U-Pap*gCPZ(zsy12@-3CVisPKdyINCtUYL6o}cS zlak4cH`ny)%NK)%)wR6=tbkH*fw>YAFZ*jIUN-{DLlfC}Lj}b(XO%N87%vZsHd^=s z76(tFSJ=WnJ!$*kd9+S2wOw+p4N8}fl^fpz00B+Wa%li5xvi(&-!Ys7c z2n+6g??@?2VvKBBj$vNae7V|#tFs|1?q`ncedrh5Xi87Sc6~+#oT^F#b3JkBu!1@c z)+A88lZc6XkMK_{POz&J2}#8GFxhlS_$z`9@k)JlVhgfwh{UBZaMwV7V|D?>d6}vI zd}^&tIel;;jSLb7sufCdbf=9!tx3z!qtoavm&YnwF46t2k;7kH44^;ie(f}ae6^}e zcXj##4<@+wqOhm%0^MVo74KfUw<-)5WL6e)*`x>W)@FU_*_?ImHwg>Obyho-rBa{i z?9rH79;{RagFfP;XHW8=Lcys0h zx><+2%7~E;nzXnaPX%-PN1xnx$XHAhW$EWo?vniKfjs8Jx=oM`tp<&tu4xT@C+ga4 z2Ue|Qf7@E)KhmNrd1Fz8(<+mqbh4{Y)3zo?>7LV@p5SzxhWid;*zd`pNBpaC|DINe zyCF{k{ux!%-~J_>ZvZa*xC8Jbd~@isFExrL*AmF0o@dkp zT{4yj;7=(=4-lP7Cu7TKyN@6Sb0<=rYuit#DCGFg_C(-)!WQ!fGVu^YD(;90b(7DQCZ7N? zqs|{jH;^aY^;&7Y$$(Q1%EVF+_?*Py2_!WzOOq;EYXKGhlJ4rUF{vivJn3jh@b7r* zMK~9tUWPlN+d^1L6i1|}3zgq97`apAY?FE>Bc#r8Q)wT@fWAuW73CR5tb{JzzXCqMerR|L4Bocsgp67dqk$xR}utE_258zP+~vCsu5ox zr>*d`yl#j1xi-&*DHLj>lJ2+%U2yP7n+- zi>%Oki(ja8^@kO((4_<1xLAzI^XNggG<~ap^bBGl-)(r{ZF%sN1qS%yjA*g3#heZT1${chg?>4w-W`CI z2i@A-sx2@Ol(0#0w7P~FkNO&**UzsMX!Nzl8cpb{N#FhP)z6u1VyL4g*|F@TKqYux z|8iaR;==9kFPDUZZxbWGiW&U=$CqpUi91Wf9rd1e>g@ z_IhKb`lI=-#pn+L!RgVAHS^-F^(1uQ^W2H$v$=Cr#or3WIQ*94>=e)bPpo#71B6v$Sw<`ZBEPizJ*<#d@! zS-_t>E2__3Rt{Ld9>H(;NtB4UhbqD39Irzo1fDdtz@|`6@0xY;rH4_9VC6URpSN0u z_p{BZ?#{q;qA;}ldiCwZ6xhy}F=A{bgYhIq*rrYqnrJtxZxZ#mpW;elDPa>pJ3yqp zuPO?;>)BTey`{&97tw$d9`YuoUo~1a>LT7YOEG z1jZsbD(3+Q^jighVfxmW_R{Mo$I2wzuJk9zyLjMCIz4uPfPVjpK_KV8w`g9e}dK9}}=47{6{-@GBu`7Che z*WWV`?LQv!@i1k`(rUD=2h>46ByL(7L^nz*{nG=sABg*x#C|yH9w6sZek!+reyOf? z43KjroXK+6mi9=t#GBax#RL%+VLv&79?}oTAS`cATFSM5)I48U!M>hC`Y-L%_Rp;e z{okpIA^ml&_91`a}?4G{r45KL5fMD?YW<@Idkx={uSRzd#zF z7Q7oaR;^H!M+`k2C!6bA^v^cC2kve(lQ<3OVCAAX!ik(|@8FE26oYYvvEsPr(*e_c z+Kb2Qd_;W3Vfo(R1{=kY{xu2o>AHs!0q6;dd+6ea$A0%b6DPz^7U%6tC@(kUP@XY2 zQ7FBUZhqI|a@!S_4iS;HHcmo>5FF~v$17~Cz7@dI@8o~p>$3^8+`Uoz{=H%A-MKl_ z#Ipf$?xGdHCZB`6#Hf4ezvHgGeMnx90}niy@(23Z&%hJ!_xvzh0KOLj^aVKf@C8oR zS=`PN@T@}*UfkznnnPFnfkzp;Z^8khrDI2P*jIrWeIf^xa`q<&9?SxItxX}IatFXQ zTY3BddZuXwpi_*=VCT1g13I7DYrxa~Plw_KQ%&$J6+q~Gp2qj=;0f@{-iLrJoo^F` z3t|IDipeo<8^+c~cQp5JwtMXd{SDX*ul?Z;pT0i7T%5F&es2mNWEOIxsaRp|)3EPa#%-KlqQcF1hx9`}+Q0H|3$bqizk2 zVTqRm!o6cvI*R3o+JqiPo~Ytl{B7|!iO@sFc3DNQnx61Vcs)Pg`*dAQX6XJ3XZ7-Z z$lgC8YB-)B&)J>GbxYYQ#cIraT%E|#ooqN!)qkY41RNZ!sp9`lgM1=z$y)3WSEcH= za(m%CbsIOCF5T07DHu)4pcvZGwqc@0ojS{Xg{~W~od-XMSexc;#m}rY8{u!sT`Ao^ z&&GuZh4JM{hD3x^?U?q(jV2*0w59`U5L#!)(lg!0(viECZqQs^M=-5+(|pW9?m$O%$W8~(1!lP?CCOp^W>EB z4C%!Bwa)hp!I-;ut)_KGq9hzpn5L|QYgJW?VeivXVKB>*1Bqnfmw`Rwe=vIfMD^$t zPi$H98*G@k7Mf3UypvD2(y-(RMNG0D1!6bT*qJ65B&9h1;ZHksK@y*)S_OviEE-x| zk!ya_Ov5J-i73>Ih%L21`Qfi;;*Onuj4&_DHFcsUYWqH!vKFfTB|lnWkM!P#=>xdI zWat@hJ!+(z=)X)W^8q7v-ZtfTB}^GMm+HO;^!jRDSVgUV0}1*yvKgc%q6| zbM2Q?0z`U#avZsU;SSQA>49Ju62e+fpD}P`Y!aH*vrJC?@R16(&x>Af0oBxf#Rjh* zO!|JL#LAy-T7-skUNpJ)PAO@6`)pQw1SouexOsX-@PL2^-n;_e`I!FTU!up2jOcYZ zmS6@$VPXd*L79wY8?HB#Vxx`F$?V~vMed40UY2~)z6+(jXVcepI5ZE-%eqMSNxfev zns;e(W0X86{QKGy{~(|JCfnrbYG-#VE*+7M-pF7 zzdXQek7*~*z($aW0k;6`CAxef)Q=PgJ>Kq@#*Mj2^X&yDt9O|F`-d>X!;sVAv?L0t z8~QjGl+f-s0A9%Hp66oO4~Kf3WEqUq0W8kL-SzGKL7}Y`m z>xVrDJMUlunUa!1rxB=an7Tg>;_6~k6OdymIBDTlF&cLIczG~EDWhxD`g2TK0o+$% zKwfCfL#Mg1LThw$15klGF2E17=SI-GJ-B0Sr`r>ZBhg_!^b&xT-NV|AtA?ae<%iaw@i7T@BPwu=zP4^p_SAW2@QkHu(KMkI(`lNRh$F$ zFE{fMj*x6=PFmHoMYtfw_|-6_fsRWK<>NwobT_XJ+}#zgdI0FPPx(s=jQTvZh_8v0 zI#|v8<#ZiJNu)eeI_L9f#Oq+~!_nmahUF+7I{$~d( zj#oVYhY0!a)C=Cr5mMeM7KXo!`BUio+OH!i_Iw5s2|gy;|RUkTK_PpX3oVGF;LUr_|WOuqW?oHYq;-O zT(RG?`XZfLe~o}~xgm@|V;*__Xf!{wrW^dvh`oafVD#vXpjBlM{KIiR)h+?bLA7(A z26d-4Hi?^_^Gh=wLa-+Ut;~|_K1=ZBj~5$`=~e_MMHA%)t^TDs(jWadG#0l5!y&um z>2Fs)CI>Fzr1L)2T;CO2N!}4U_XM>vxUsAu<}1hJn_3kxz=*!t186oD=k$2|R$qojCRiTgi4 KD8R5|>i+<|_U{J( literal 0 HcmV?d00001 diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer2D.jpg b/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer2D.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3af4a7a3e161e8589fde42091657db578ee829a0 GIT binary patch literal 28229 zcmdqI2Ut_8c`rFs3pH81Sdye5eBNH?50PYg#r&Fg-|8(Z`*|TTP08fVk_d#da&az*X zyLs-isT0FB9}fB7-sGJZx>ec6X*Ns}R&e%x!N_!li<^g6L{v;%LQ+vlSw&S%{q`Ll zUA?>d2Idx)R@OGQb}o-xAG^83JpB9vo(IB%g2N*sqoQBFib+aNNlips>uw0CrNb@%+<+cz>gHa;==bqY@)E-WrBuY6x!BX95Q?(H8?4v+pg z*D28HzgYGk2m4Rw0+?{>r!!|xpJDjpT&I2t1inwRojH3|?i~9~QwArW%h%+8JI`_J zOg*?;d~FaD2C_SeDwaV|XQ!s%1M%R9{mf`Di= zanSppeq{pvkA7+Uo)pnRp0dBTR?Z+iec`tw-sl!BQKI z)#^CRaH5jFZ_!4LSKVTg*ObRbja~IOO<7_h3uU?^m^BU|fQ1Q)lh84W@en9!?v*0T ztx3yarJ2i?xR`@`<2BjFeYWMYyT`}+`Smr8wP|u+=xURa$D3CpK^&m_r+zv6fB7pc6xnvGMF4&c`XWq@QVxl#=}&xFn@qhx z5{QniaQ4iG8j>Q1Hl;~*4y|AO-?;?|jXc45*=ZQg_#G=t+2W=Y%UJ?mc^7=~fAj$# zm#ZFsG8a0HI>}0%3z2U0^qLxa85bm?A>-|fY*`!abCDCHU!gXsWHKAoQl<3Ytb;pbyeOSTtTE@)DK1TFiVTUW~ zc2H1&Qh5rCW@B{LX0F@S5EQn*488)L@Uuj7paPq+HxLv-O}e?-UNyh;T5`$tP=0FIis?sE*mZS zs~&csD7IeFJP?&_AZ1zx)7?)I9`TSyi*QWtXr-5~eoI%?saZ@m^@GttlCzGmfZ9px z8E^`Oi)u;M*iWN?L&vlChDWjJ2A+zUWNubu-+?v^I(oL@rneY@3VK zW}@gE>rrw$<~W)d$%)pP?YZZ))!k<~s!8BiHFGc1w84{__rkcTo}|Hd#U=!eQab30 zyP&{?68IEVkEESVFk-fxs9DGUw61;bOF-rjdV{iu=Zx@uj(4teE6@_`}7X;{9HSPgBmuj z4WaaOL2Iy^HwjZUP<;3T_SYH+D}|Yu4|i$Di$ykZE%5Ayp1wD#=>J7lGXhr`Z#q*U z90Zl=5p56=xp)UN8i+nO9Y5{mu_2k9dt%3@kFbf4ho0NGf?Hy zPaFa_3+SNeQ97v2;~y)ry3j!;p2+zj3hHh(m7*=vO$AP0@DFw%dtHTt9 zb}&Cpb;g~>PVpq0ciQR2(L@n0iJkb?mtjKi>QCCzq>GUx&pX4;Uuud-0CzN9yh{#| z{7pKgYvry)*;BK6^8yODTGAcHoAl;qiitud*5@7iB#OHy0I!!E80axeG}POI-pN%$ z2(KNN6mAqKqaz|TdY!rF;CPKK zhVMs-9RU;!5gRSYPCAHfk|o=J!cTUwo~1q|X~%Q!`cZW$?m6JeC;t5oo<<)Vzfdim zie^`b_F?5pJn9ntQzjM}a~Zw08P9z}Q318z+sm{oO=eQ^ca3F?C6t7hcu@NVkIVMm z55OGo%lAeAGck30UyEcCOA|tvk#lm%bDzL93^AJeWWMeObQkN(urm~Wl6TKG!fssC zdw~w(TCXcFxBZ?YL3VtVPh}<9HfM{C+jmv1@@f^K^C+HO66h6)5S*l9eilEwVXB(G zFJ8)vbMZu@#*_Pa7o*c>WUM4Sy`190FchRcfI2&SF?s6ye`)f;Twz_{?b9cP z$crz0IVz z3k6LyVSpz)kcX%;($J2i{(Sdb4jd~#Wx0P1z`8&TI-SN ze*B{+N;5*bNxBwytO5fc7QcL-_SpFs%lnLvg+Zr;i5bR5x)Ob6FkK1d>8HKIIIm`Q zyV`pwZe8=N0&ZOyvy_ZD7w*zNv*apMedX!rhE_p)b6wui6XuGS;b~LZj8@9E4e`6S zXTobk_%vSf`-}@aZT*hTFKB>EEd+lA@{b};dyCf#{^1bg$K86Ne4xCkrrrCveZJ)R%&TEa%@n94$AYUIX;9O z7I)CD$)2Nwz5qflcZj8fZbVaMWTU~ma>yki2RdlU0lenM4&1M!gJf@jsbgR=8(f7J z4L(d9KpsXw2tEi^V6j*_NP>3@aG2leAam7pT4d{C5Wvjk_jJ(9*tXVP;F-^1fL;82 zl@4N#CG?zJfF9nRL>^k}9lh&p-Ib<;`ad(!K}rnNJB}>CdOb9k9VpF-mGS_35%S%c z4(iiw1b#F?{)Ki3@fykd0HFCN|e5}cYlx|uz zsu`OtJ6KH`>SjF!M?c8iZ1}AHJRAJAMu5w_WqYYH!9KS-G;LT$R~ znW~!BWrb4{($bXosUDBAu@+{*`EJtiB}VK3FX7Dnl0?X3UA-7-4pUr3z%{|$T3 zBio4-A3EqZ7tKs*j%GPbEAo*&VMcE606uWwCLP4a6^{H4Jn)t}3H&fRVeQGeMCK<> z>P>pdRJ>(Rs}QTMPxnak7M(rfgB3}}3=)NXi&2?Oj97@kYd0P*6>FYf%MFt1lx!LT zFt;{EhRhVtk()tT;HT?fz2~jc_>TK>p6U|Ar2#9zH3C>^VD3-~yvhg3)wP;oC2G>`544 zP*Gs_3EiFnuprek#9K6%V5pYC?3pQt@pvrXHz^^!V~?NKxDZCLn+ngiLoNVtlT2|WWVIrrL+V_41~wf*oV+eDN!v@GExBss=rN_Yr;J zI5wd>XR~L%Mp{^&efRd)_w09nipPbVJCe4ax`cPY&*7(`Hs7Xazs^o~1iA&m6a$s( zp4Z8hO&lmsUaIcUY9cilUKrNsx&E{wuJ6+u#SvIhhLPfrjNqb-8SOYgka@&-bbzEp zakSb<%iKga2a_g#3GgbMPq71cPSV}ve^b@uE5zaNfA!7qSd^93snnDgg+HCYmQui@)2(kHZ!8~|~Ex9?Zy;^0Tlkw%;+= zXj6RMqk(Z0$>2z^t&Eg!Og(k?^!A16aH+B$&b_n!5ve-rHx^UsIq&NFqOat3F!tS# z+5~L(%+$&0*OchUAYF!I)B1%4{n1VfGedk|s{UxCtqe4wT=f>p(0$FoNVqJ$+4>W> zfHO}@XQhLj<78wXE@Izf}=Sb-W!PVdl51A{5n1cpv&WV?#hK4Zy@&3c1LQ9lnQ%T86fUpg^ns$_mX*x=JkaYPwlAU zS8Zns%l0+4ODv?8;Y_NTnpzJY3uZrB>O(P($GP(8D=zRr)69%ii|`(kj*LZA;gh#C zp4FBMWMl^&B;XNC^nRIhiu@y&!1CBTHcW`@I=9JkcBP}-q${`gU%!7Xh;Kq^_r!J3?uvkuMZZwT8 zK{q)LNcgrKI`3%STQ}{zA6?x+6gQ~3!k#UXj(Zg^ek)!0>YjU_pM`@~QBI|AqC9Eq zH{|6oc?26p?^qt2&?1h2OduE+pkEt1!!#0)p`WZ9&Lxr(b_n2hY;=z37ult;8ZEc` zlT>qpI6|!yI!&3y9|={gab+OvhN5M5?}{KVPRg*ih_OU}I=0(I8C2M+x9o|XC(H^) z^0e*TG7gZ~)<3p5IaC^8;QS%*bwinzFLppH>PnN;mqFN<1F4eat};hiHiJS7vGb!S z%dt2I2CiOC8G{(_gDAC=3W&SO#qY?PuvF=V_289oh+ATGD_;vYynv|JAgp~f zZgEp`+b5qnP`El~<;Yj6!5z~}BmRv;tZfU-&j$8PD^)A$acJZ}VL_ zL>n^bao!3jz7^|>F)QTt&olG2fL?#VslzmUkWydcK?haIA0_vr`|^xyU0!}7!kj7< zI#Em(uyG@6j6s&*HO=605^qNs=srQOlLA(M^`blotv_P$l=b$81FRdcDhLm-<%>&{YxqVa+k+ zkx409<1Is!7|MTl}Y{2R1r`3B_%mM z%>z;cjjQ7&zr|ccNm`#wB{C$^l;L#`D3*k+xE9{Y-87Q-W;`iMmT_HMKAfN&yHM_O zZ?fewRd)=>EPI7w4%mU*SK0z2-io0*f?l%bDvpBc%fFSuZYqLnV0HLfjAX57A&J!4 z?z3;3AYh(q869t{3-{Pg!kDny)m&SUc9HeVU{9=v*BhFY&SP$KnlqSXozxn|1?1@v z(YM&qF&*9VC~L0hj?dqn~C4MyUGeUd~SIjC%)iZ$(05 z?U%{r;`1)I%;KRHeg0#Qh-kAYXd>1)YqMEx9Cd!c6JD3s)3MXh9+XACAMqXOb}Q9d z6%(RvlQk{XK2gfNX_d{pCt|E$RFvGHsn-T1L;7DvC{@JFaA<^6rQc1`oL)e{76=}^o zl|hl$EyR?f)4*)H5i(=08>^?_nyjx+iA=-)Lv2L&%OK`#yGR9<8oxD zvxjij{2I}d@coOK2Q{bXVkO(KpWcVDAIR8yNKuV4=wkiy-sAJ`yfqn} z>FKg1y(O(1CouR?<{Hb@Mz5O{y$zn-Z@=Ms72aJy2Rt`RY5lM|^lk@8`ffd#M0}ew zH_6NlnX{ig7~3grmwHutJ54ip8h5qVwZH%5{iLjPdB2ymr@f&K0r?0LnWH*_um@89 zo+C*678`<*@@(!5)_;iKy+xE#o1@i8Xrur z&4lgS`V~Q;k5KM-)?-0pw_$0v%Jr5=o3R2bMX`v2JvZJDIYQH#I=07L#vWN7mJwPR z@zBs-qhn)7tBOfopO`##n>bCf1^H`gycuwOFs(HcJPMEQZBQ?!Zo7{2e|# zMGo9BrGtKk;Pq%jdi#LxkG~4E8=nR5t>w~^TyA@`?j2!iDWL_nd|(mU7j>`Cj#r2G zoFk@YRAGZgb^(;d5vSkQXzOR*X`XU9FkRXZ z#^nvdelh8N@vEj^TWfXQC)w!WSsW1xqH<8o=UXqqV+N0nCj+p|2neY*9JAICh6&L| zmB0?_G9IUUY3{4%6!>Z9VQi~2J&#u%fBcZB;O>2}OM)B7r>r^^n`Mofr>a{lNJywi zh%Rv8Q!;(+RZ7P;%HJ-KFGWgHzFB(sFY(;s^x<^l?vpc(OG?Qwi!+OJZE`NZk`@i1 zS*(cDB5Bb@t-nzPj{>6*kWd1IhOU z<9U8_cfDD8aH2Z|ChbLdrMRzH&V4M2_z;dh+sk(=D=99I(^L1Ngie>8v~Wh1F2tr& z!q}kssRvHTs;s%}u*5PhYr)DpV+X_2XCAi{B%vbwV_I|T=E+;g<2h!=$qpna?80}$ zK-Ap#z*rPQf#el4`yG$!P7gp9&X%{tb2p7vxP94mOX^!Re0nYwov&Lt^$8PqLsc?x zvE3;T{!!bGV|+pE3UO1w`Ou|cbjIP$u(`tLl4-mPIk{%R)Q{+CF!~}XZhWOMDeGj( zZA-uI`H=pECgXhKr5PyHlg2`-s1L!c9w{ed96bdEN0clwP_<9W1T9x z@X@!_9JHd;WKjWcK*^|S$_gh_a!lOXO>>NRaZL)^NZi=a_NI96$5OU+2rKn0Q4OI7 zd5K^n2n2g`Qy%xXIm%?(l4j#e42iktR_l0|X0r*hY`vkb8tXYc9~#LjuVl(<1n$ki ztm*`>PKv%5QB(5V|7ccVfcW8$85nzkGS6U3uR(K(Vg^@tZmJCw8<35CVIhM*xZ`w` zjLfli83mS;`FU2)EK2on8sM~TWeln;R?%k4nHKYzH9}9X$WC?ahlaOt(LpTNC_nl70u4S(mr!=$d$ykkPpM ziuXUZ&i-g@L_ZJCDadmVZk~-S_P{HrmOv@?mYEKufgc5EJI5J{--C}|_Hr+L^Hk>? z_$)tA=ur)Lfv~HjU3@~${(zUU#?Sho`e)4 zlqdlNiobm*!zXQ~;h#x!RN+=qLR%r+`qrq^7bHQkCDf!F^0m_T>#YnZRU{*&%0p1( zn5pfgF;r+~E!QI(f152&mr0TXrgJ;@jc(SpyiKT*vAszDrpT7@#7c1A#t1yiNKeI_ z&G9L=YCj>sLw{^vZmD2!No;5%KLzFoo$9hv*2{wX?VFvXYYGIN6cE5u)$?4DxKl+Q z!Qw5XmA1L)43YWay>j`LI}`r7I9QGVF85f$Y8dOS7;oHxobul7Z_H#aM&WX7a=9Pg zP@Gwv|L_`76;FsR;OZCu*ryW5)hJ%aBb{Y$qU&!l;HTc7R^gIZD(F6xR;0kDG{h*; z{ybH;K2H_XZQrf0uQ=k?U}tCd)o$A6>$Lrp&6Iry%*_pU;rT>uTJ3O|^8UmGspR(A zrc3!c7v!$#akv-9l)S%Kn4~ZK$KNXbCrRJa?>wtr{8so4CHk#!+N<`*w71ptcwMP$ z%o&bTKXR^6EX&vJ&SjRA^!xz246N_4u=$j8v1^K)|h*Us0y{4oX}#0UzG z(rpj@E*P`zSfn!QvZpGd1(9wHsx#18wDRhJH&3_eb6CNiX7sN+{Qcr>F6q+Z#60y6 zk=DJC$LkgrD}5V>9;2qE#`eZp3rYdPL)dk4_0grj7d1~4x4st8enHkGp5!EM0^mp~ z`wv`c6J&iHaw6OvHI7{0gPw>V>sBet;WkejkYzHJlgsSN#|t}Ec_gkY{CrI#HprUw z0{Mqm1v=F&;TN*Lu-A0@GIta9@&!kxQj{HI_r6&iLCVh@!|-D;s|?FkZD8t?u}vL;JE z@26pl)^_)-lf~&E4|o7G2eN%eJN00!Nw4gEDxxUcAlJj9GO2)J)fgVU4OLHqNMYw` zf|h(8Zn)x+;~UuY4mQ)^add2ZH&jwiiVo^fM~(q_V%h;6gp`6(UV-;S0q8H04uZd> zgE|H2ppsYsZ3NRUvr^nlj;%k(7H7LN;U#fS>w9=%CgVWanKv2%|!K zggowq5LuDqIshgGP{0^XRY0E@Lk_)>2iuANOjkb=G$5EJXoem^2yh6He zv2-!C5B=l`6{Lx2I&x3SQWZa7f2Cd~n4E)$_t@$E>ZLTtgU}yy8&`}MGbOXW+_NAZ z{6>{?gJITbSI3>laQaW4AcP3HobYJ9iQJcAqlhBKi9Qp-f3*6;+fozu-gevOo4hHQ?t8dES%FPxW?q+E|_w;D+DvauKp(QPG9i%Uphl3uS0GN`2 z_HbGJ-@v#(n=t@`MG;q_W~3)THvB29bWr&zQUk7ls{2?L(L>JTYN$ z`Y`p>TEs;0GiYv49Za0C6KxW$Mk%Qe2sG0pXf{24(N{JokZpErCRZFzNt_e8Rikm|IDokTJm zUxkh9B+6M9eM4JcwnKR&d0Q>1C#7VL{}>ux9YN_za$wrC%&Ov5b=eGo+Rdd&x%dBE zdHWk9V6dawJXBxRl!GCgd9on!%UzAK4@oWoLR!|6VUHv~-8hhU6dL@9WV>K&nTU<@ zf%PS7^fN5KlwC$!y%7#e? zX=oJ!?sj^2=#tIcS9^!v>(BHRgOaX@9ry|<`iiSA1r~6KB}MADsbbeP!{e&T7Q{Y1 zw)7lno9G;V+QRFb`h60uHjF*rs3I9Qku@Ws<*MIqreB-BMA<0djOd!|XwO<}&)Q^q zB^$qr3P z6ub`8u{MkuTB$R=9 zo}@}$XNms2I#K$og#y2>XpUfhf#b6_*N>l_<0a#j80(#g$*_t<-60KS?8}aM$?^8S zOPwM2l+?{i1%rK35th7MS>{HCB^pz6wdlduwAnPP3~acj^@6=x)8+Xsrm)Z8T4ZM; z-tdz&ng!bJc#6_^EKOP8w$m%^U z8U5zEw(V3+D=@QCY$s8H#`S5@W)SHb+3nhNGlIywOIB#`iB_zlN@71xw^^cMJH`%H z#|(#T@n!{9RsJxu{%VA-)9v&O+yyIp`|D^))2Y7x%l1>#2Bd7>HwUhK`?C@!b>1yp zFhRB41?VReeyTE=D;g-n&h1B#a-;IeVi5>8v4j>dRc#C*PzayauN`Yr3DIgv1XvxM z-pP8-e!Yb=M`}iTw6nprC#uPu=w2|MklGtfxciv%My%xEjkjx@I}bz+UsZf}epbpD z-va@X_pVZsf@|eRP@w*%v38_cq5>43{#R)sy-%e zkRo-{Ak#xN#tf@toNNqfibb2{FE4$YX!U@`szYCC^CHyiPSP~ZL&^5Uz~5-1RQ)QF zIl_pGCJWwIG)KOtsZU^3I?Gv2YKNP&5%((LFHe4*8+c!S5L2^( zc}8jxP$LQAIOMRVI z{61HjVxl|BYD+tb?nF7J#0hZ`gET=nyw0xtP)JSl&b2&Nm`8@6s-IKVLtU_zUFybNK67HKFH?;h+|2MzD!Y$BgaQegPz*8^5@cnbmR8Eh|4zoCq zu{dk36gT12HKRu1&6XClfKAs}vQKQV$B6f%Pyy)l%EHaRSJ?lZMuy6T+)JiW)lU}% z649NUjqJ(aet<2%6(;Hm;;3y4Mv`xcaV*wXFypsg}fbzv0d=Xe|D;~FT; zReLh^T)Wmb#-@P{r5+Qdz}jzXaDTCTeto{A@s|J60I$ufzRcyS;`}9^E>*)^&ZsDv z*aFi*xd5OV&1<%tXO|6^0t#1tXijMvBHl$UPWGLlFplf$96T;MQ92Y1MP9z62qE0n6+w({mnQ&PkS}#l?i@pD2qAs z^+u__Dm)x9nBZ;B8kwN&?*re^lrrluXq~8}#0(cipapdoGXc z*WSvr+&ID2C3uPAv#=G#>lI;r#qhn!dxS1or31>j9vC=*MYj~lRC*RurAbtE%dQrT zm+RTo8!#1&r<5I zMpLp1^t+##X1UE>R}_gs#Xb!iNfT`0l{4~Hb(VBm8|d}j@xwU1&mT+DA0#KkdOmB) z9bZRilTgtcb7&a8C$1=b-*JL!iUnL=01LZ*K*-P)IBO4tRf(JW@C!V6N{QLsANrDa zU_8kkLpXF9m#*nu$ygrYE0vl(qTY<}G||jjbuu4Xy2c}HQrV294x+{L@85d$Jr@`VucAt zee#P~v#hd-{Vd_!47cQ0Jx;}w+3Kl6JUwZvy`tFjy|C}2+P`8q3budwdsiWAY^!tP z;VIe(WIq^!W1y88HcV8I=pZbRj_fA$@&WxW*m2};!6xtTUstHN-`~XAl|7*ueLkBr zvG8<0^4B^3*w=Ov$`u=z`uZyB@uoC|c(@8Up8&MKEH>%>wrJfSJT0-nQr$Y zE)&JSmDu*ECnV_vKDl&qX1hadg&VulWD?2meX9&J(l74vb1gRN^D{OSgjD7z#O#2a z9&-+z(H84n;goO|({HUbXLaZeZm?Lp{UZ~I5wpa{0`ioNwI` zO9t9rKDF*81N}Vj=csjEbkH`?b@MMz7tpQ-3IP2(K*lW}1fhd8PoF&MMSfQSQaCb_ z`W`~KK$bl@jokL_04mMJK%(T(gWSCar0Y3A!Y~A)gKXuYN5Izo2q+yS&PuthX-EhC zGDpq&KnE?B{3lW%6Zg_-;mGYdz?Al#rd=BPqjLz@O6)r1P%@Dc`7MwRx+P2PU8IAk zB-VeSJA4I#cUw4U&z(B`DGkXM2BGAG&%p!DCN>aSg*x6|0YsBap24V%V&Ul9fSJo5 z2f7n`P`2-Iq0bjgMxXvX>~C*aZep6mY-5=1PBHDlp?mq`rTKi*+){i?MaRGSYzHPk zs2nAF>|1+EezSc2B~g<+I8d0>;ruQ8plY9YDrBhY-agQsga`A%y>F5e!?B8q9B}h4 zUfo)810>8%n)NN?L|mKm7*%i!Ay~)init&@%!ZHH+Pt|Fl(v5Bef%@a z;D+JvGY(U4w~dg7OZCq$OMG&Y|HdMs#~5C~tN5@y%w}C;ps;>zJTuj9oOc;x<%f~# zGs?|zmgDrXTq8;u=9Jsk4;(fp+2Q4^;)H6PQ>Sn+zF z<~XTtA@f_D$PS0FVf7=Jbqgc;WWgwZeOB#b>oCsHvstQra-Yk}E;X~ft8D+ktnkeT zHO^smzg(f1hW%&OO@W4vXc6_A)rQKfdT4F&X1{di@b`w?+sKX%h|AWI)ZF1i#C@Q* zPO{+ZI1Nh8fGm6N#XyDt_3a1JC{_yGZ6lNp(g9$w>#Q+gYM1E-P}rBH8G(0L%S>rV zY8sUK@_$<^(Z%`@x$O>sWTDQ9KrwN#3i!TF6XZDtBZnOU^*>FF1~5B|5Z+c`mt;L^ z$2tcxmktt1r-QD6Nu;5#$F!IOnhlr(av1X$VPKmADjjrqz4h=C&_o>-)-$(j-MamW zk_XrqOTljJHn0hxA2*J=z$<_XxY9wtM^iNw_Ofe;9}l?NxBThtHQ;>aCNy~!Bj@vv#H4Az%h=k=tUOt+nZ!SC6?6 zMigY}I*pwy^Nt9PZ>Y@!`cPY=lp1r-Aa2!-Z&DRhHo>H!&ZER=|7^ntq^^q8_fsBq z`>)s37p4c$%FUv0@_I^#5Q;)B-oRQ#=pYyLC*+AcYvg!2m?*0c zG!?VUbt2C>0@i)>&3^`o{%LSWv%rQzCTVowQTwm3)W;OlymdR*&a{lQH)V)Bq^Ng5 zmC}db&@=fHb4#fj-vZ zUdUoD?I%Ebv`~=4LssfV$TFBT0;z62;c9(B2T}Z+{_tRUH8CpSF+V{M8EVjgAOZ04 zZ#~ow8n50y;Ft0bNPvgCm=4(E5ItbyBqW6ys2uO70LFL`LIOHi-y%Ei(guMj!;uEu zA&ihz0KIZw%-se&4SzBXIM2K+H5_kF2j#v7$XK8OW}ijq zSc4844xz{bf@1h-j@n$1gzSY-&jKJ`hyikE8%*Ixem+#$2O3sh5zS60r5s@$?rQ_A=meQd?_=aay-+Npo>cKKZb#Exc>I zQQ0!#3)0UV;ktv{;48?7-+|9?2d63 zplq|mF(*{rkdk!ZUz5E+wuKDk;sKko?@!?*#mp_VQaLhK2$P#!$G0d&vJ7k0R9;Hn z0`&YG9MZ8vDeV^ICDmdH0D?0=7awqrjz$uZeDI7N z(=mbahShGPZBye*)(1&}&+P{d?Hj#9#Hv(F`s5O8I+e_x4ka1+me$n6+E(4?te-wk zoo>|5q5xwGXTpXAuT#{>eES}hRL_OEsnZLSoqv?EImn4G6u~cj5vCLe(s36}re%x@ zCan-8NrlAw5IET zpxx`(?9F}&k+PK9mYi*G2kGY-wPQt?zKiV3C^G{VKR;dhVBPzHh?czqacrP{VMgFd zM1|0oztGvgmxumPB#0XF(Ofu$feK^Tl)}P$2qqogS3HwxQ@J&tFMDc>kYpqGW~|($ zTYjF=@xIWIfI-bT>T@qTW1X;0@egfg@dmV6M-SLI2T1{x;>Nx|pv~VPr1OBM+)m%*1bS}z zPBMY6f3fBdWVc9731F4;9i+ML94=B%gr)%LxUGf%!*JL&%8NT%vlA-XpS+fTwAt%0|>d}o9jT2a$Pqhf(u!lOXcviBUx|M zlhbvO=j=RlR4MTQaoF%CDPlJXnt;$C<}Juh9WStUHbZEa64@wH#DJmj&h#+8(Y^X6 zFG9SO0;5^V(@h$L*)Ch7Qt1ZB{iSaux>Z^lIj%X z`QFtGXxLs3#HfBOp4}8H%sPe@nos;VVZrF(`}p7B#PyB(CgL(0jq|aQg)lo zkO%88cmFI`{F5j8Q@Q?~r0`#qP!cf8B>>ZBUy(EC9ZSopx@6gxIBc{d=U5AyEv)t2 z#*gJMRB#EUqZ28BV8~l@)F4MLzwz@dhZh|ik}A;JvGW5=KYp;XQXFZ=mSm5wz~~&5 zd2N>cd&fZg^B4n@%-Na3_cN)xc5Aa+UKP2bG8x&*i_`F_r7#VhC%EUn z)zDA`JH;DI6{ds~A+#sMMBr#*Qy72axU{AUaftu=Wy-K7ulI6tdy631xS9E#rb`(d zsvF{P&YO45^kxO6i0{0TH(O}6_uMZ{nvs!PdnsOvtY7PFNp}Xm^&w5u!WH10;v?hjk?(2Nn- zcr3p*Zzjgf;Yy60Rnv-Cp%Y(e@1e-^5u_Y6Y(gF3*Ra~c)YMda;f|l<`s>vl^+iI) zEQ^Bq-Ck}Do&E*kr>eb2YX~mOF7^6IRbza2j-aJ;ZpxBfY*^v(_~?AOc06?89h9$gzGPnx=z}ze5P+ z$hBX9U;=og4H!qk9=!t$D-Z~WM*neTE=#Qj3XbmqyM_Y_rKjm287v)i?)yKl2MpAl z+-uz)qJvt(|7p*H69eEAxinik=-?|7le;Gm>;j=3u~IytN3S889nz$zgP;&@S zHs|!uWRbn`ti(F1EHYAkr31VHrsyDR1F)yo!PB|;-09rM04N{^=wVa9K5}DjV-q|` z1I&Mf=dtV{?Mv(i)ay7fHE`d%a_HIopuPkW=Ga>rYCPSAJz@&|;b#6Kw2&WCvl+;R z#6V#ZUVFIa^7>5(X`*%SXXMg2pg=QvN2$QsA%C8|+GJ0GRs{W%M!>VMcmiiF_Z0qQ<1y>MOO?PMB`ObAW{-nP zTX?u$6RCZ$U3H5#kx`p_Ics4vsmIgR&znzqj2%@-F1P{ve516l2|jqK`#xIotp}-V zyR-q35N|8f^lGqBcmkU`D2;O8(JyY%gfcpR9}T!MPKfof_CFNhEl2vOjYVWwKlh7; z*=de=$gUPICfJPFWQ)XD*a{^nTPNI|xh+(Hk^H`PTS`K*@8+3ssPJekFvQ`dIG*9D z3(Rrk#49Y~|ClRR&hQveD)?iH!(aRLU*%nGnUfgEBE#`rVC?ZbZ47)=DYI_Z*g|mfn4Ns33gh zQV*Z<0(a00FZLE7a8*^05DM=kK4~t$cRPQ{%TnE9wO2_F5Y)(>LqK{g3&yeAoI0+i6E3UXsM^fXd3|=Iw!UROc|^f)P{F{m8rZ z2Gml^fk)G(4Zw`XG;Y^uBT+lbGu_#vJD%TCZ9Fq6!b|m7@2rHrxnG=%X}4TTMtJg4 z9IEQcCPIn~ZPUI{PukBef_hl-&cz^9u<#BY(ol@18c~*kQnJt?$U9jx$Kt6z-V=o| zN5OcPGhv>k>*|_JN*a9nE3uZNKi)?f+7~Y=F@4CDG{P9jXSs;?u`k7?*e@lS=_}V6 z`tLv9^+}OZ_lZOErot@BnJj^M2g?zZnIc-npzNwDO06wq*a*{wdi?GUnQNh^-6Y~D zEwccqg?6W@3t~%chDz|Orc)kAgDAmxFLnGkPkekrIC?GJ4if#o!Moa!$l@p^z$0es zXl99{jL%cetOxyAB#}% zkp9$WrSDGGYW?DaH*#dMm*3AX@TLlxTA()HuMTF3FvVE3!m~uA+P9}Pf=wc6H>e6^ z^ebrFEDt#)3bzQ}4Dc@Pn8#kJ*kXmh@Z{?c2_jUF!7Xh~WxN(2#r$9n z65yew=kEWS(wuC`RfVcmm{bgC#cKq=Q?!Q{9cL@(kHm*NTd*XaagX!eg#0 zB~yii(EBH-ursSUU@~irOp7Sorv}PQ)oG^als&NR4a7V^cb=$=|B!8BK(A(amCs_z zDf##bD6Y5gdzxMRm+F|cjjOfQjcGja3?}*Xn)m|Kk>`zJ;~uMp$-V%bB+=-w`&fS9 z1{>O}w99-E^(yV+Q_WGNU5v2}yjx-rWs!X-(QVw@oLr-xxT!(nG9+ z7gQPd-j!wc*q8Ij!0y-97xrL!%iJr!!k%+wWVc}Rfsvs#vZtNvQYvmg(XR-K_jqPE zV`sv|dimxpPOZWq(QnS zLM^5Oa+pJMFuQXu`HDuTBkBDq`r!!$fq^Bz?^nfmN)R&B*)=ExY- ztLWMUdAJ8t*=oog%+1M8$%>-a&n+bmTF>sua#E$p-W$COtTADXly4+E+Gi*j!H4zW zb!*FwYh@4clIpUX?14rG(;^Ef8U1a7%s|OI-(f8b{3Ss8wvm_b#Gq8B)H&tt58J6V z`AY_H*m$JZ4IDwVc%pc0CfLlt;8JF+Pps!VsY37HQlE%CsHl~RQty}iEN+qV%;wuY zf)$)9V(_e#mqW@TJXPH{B{Vg6i8DF3?ZCCl*;)N!~If8BrOl{J8I0@v2`BX?mJ)I& zs&C8uW6emw0MdZ;VO$o+xXJrt1S7t*ovN*`Tbg>eTRC21o^0A^u*RHW8lPE}lJay0 zTjLaiF^LwlXW8%=?P_?NVFAOLT*xuX7-=ft5+9DP!5C-6CdWxyM9RSQyJ-+|bw?{V zg7x?o)%>I&>$BP{{}q?6El zkYa!&yxW;Gb7tnpcYWu)*ZVW&+0Rb)UTd#)uY2835-N<*QP8DV3K_pX<+{EXqyMl4VjL7yx-cQj>g10pz^J?=B_Tzqq z@^ascuDAw6u zYj?6szUo9j_5v&1A$8{b*&!<9_`Z&2^pZ{+k?wxgjL;?1Yv@%&_ff{k}#n)I_2 z9#n;cw8*E-khClD6cy0nA8O_5CRICn>dYazqZScPyPo(ki?dK!7XEts!fow!G*z9A zV&K`A>TWsmT!qupQ#5l`(%i0CvH9`N&=+du`VG87DwHDqhQ(le5!D;QYaT3+efy`d zvKWdr(BRmw$K~i@0I7sB3<-~XI0n|AD^Ajf58M0Q4{*OIKfj==V{^l9)bw;u*{W-} z1#$E0!b=7_FpLl4YfO(XJf>&utw1}Zu@X!=wD+VB4`I%q zSH>hi&^xYys7_ ztz6nkc8M7%@);eCnp|0PsT5>f@dOE>+pUy!+F?|BZ+*iS1l?mj``w#%0%KX4uG;K@Z%qc z(VtNMcbK0w8i|p3E;3~8_*f~gVGRVl>1WG>RCq`w^~Rm=tUj1YRcja$wjaK5j+dcIm%>7FEqI# zpA1&l7bSJDm(MZ~6x+PjtD+7x8m5&~ytm<8d@-lj5MdHfs$LRRZg0}_D^&8oScwCX z>Jt&yZij{jTz!6rEdYa(g(e#d4WYWHZ^|ljP^jtK3(C5;CAVM_zXCAR*e}Lh6Z?%%N%+s6T(kph{Q|xg#+Cr87Y17pxgjZl zC$vIm6&`FsE>hOrpMsa$xCPGke_je|RsR@Tf1?>xME)Et`*jazH@K`s006WAiv`61 z*v1jc*Gh&b*S7s{(}2!O`TpfT00;&s1A+TYk)7uUV9_`ge0JIJUk3ZjtS_#=1M*7Z zEYuRZcKCpT2i`fMRba$3$OlabdQc_BYFQp&%{KJ(^@MQc{med=JJ&AmW*x0l`=+@o zB1tg4Q0)cQlL1)TInA~huj#5o%-mO=tMiWax`&1c{I@&qlf%3{)VVQ;v3# z?)ap5ex>~@qcjdShND8~7-i`ATDnHRH(XEdi%a>Z%AWGroV>y+9xPviDS*%v8}puV zg_z&!kIOEae`a4*e9U%YBpFxZnN;2{Q(To!M#<7nUH+b-K+hU8 zJ1Tv4{<1ZV5cwyyhbA;iD^biZ23edqF7YK~gyg#MbRrjoc%Rd@ATf&q_^%TrjuR*y{l>eDp*4&Uyhw8ng8ss zN9#mMzm@y*Sc$uW}__NN#`U|FV*iZ88h?*UJ_|o(gBw%8G7+uz~^OKD`WW3$8o4xaK5^(xutTh zeoFO$VBDMFypTE;p;{eLkYcG@+jQ4Mo}SN|bJEJu>HBhB{^2Y!{kk+B)KH7? zf1igRKFhy{vkgDq0Q^Z@&_AXPT>_F?+#t$&SQ~782>Q1vPqyGo6o~#yW@~{R3f${AJ z)^L`{u7J^%nLV7Hl&?nL|JawnK>j{B7|vojZ(}>K;V@u+ioC4rY*pZ471>b~Kz6W! zF-|k3IlTB~(M0@m1+1%LmivhEbre{6b1hc@b`bv=90HPyfHWaQV1;Od_b(5#8=Rr+ zMc)6#tUtf&SqVg36ii~DYw|9U;=e36!hc_1^UC5Q9<3uI+l{wZCZ>dyI_hNSuiJS& zue&9g@Lb%rM3@ixsmZSCxkAjoVt+?r8QHs0VP+sfZo6(__G;vpnOk>bWIE9_+nAF_ zKPX^2qDPOGL{7NQ_qyJ(>=eKI?@;AGVgXOU4y$5D-2$bAWsc=@(q}OznnZ`x-%8>k zN*~I<9*d^#r;gYudx3$}> zqKow(ZJ|5_m!_RT%7LMU&}&&7Hsbo z%I!v?qMutJFU#%t`Qovkcuz!J97A35CaKd%GMDZGc6E)B@>51u*Bj3s-Hz;IhbdeE)sfqX(7SSh>IoU~Yc>U?3xXD-Cqgks> zi~C*Z12l3&>sPYgxw|%OrKo7fo*22Ahr4jUVt@W)1^yG{!jD?VyMC~C3vzP`wi4Kd zSMUSD#;M!F+&VvF&h-}F4nKYFRbSZLxjb(CFL%4vPp}v+GPnd2WwCdcxTB&(0&!@Rpr4_Ltk~=@3jl-xr zogvNmoqL;7r*6Y-3U>~wo>YH@tV2F~$WIRXJ?e|Zp`_mceC8( zWFJo29YJYLGoP8$v_=PVbEH>c=y()7g$@(IbwP!61=(=gnF?P|;5;oG3B7yP4gns; zBRB53&rZq9dd$WKL8d1ll*$K;^favBaevLP$jiRWq^?zLbPW+bHJF4@cbtNs;>Ca$ z+FExNs>*|Soq8j73sh6G#*Z1)vybZUw?5rf=R~xr^}2DaQ^qWIH6O)^OcVVS=zQ?Z zdqHdc{>Hd^^6OjqUtZ`Htd$OAouW!x2;U`zuA%Pv)0HCR<}68V6AWTO<~wr-{O!eA zdl8C@eIIPizY}-DrI2mH@?D}>(O{0$P<&C|Q*BG<5d-^uHh$20%E1Z)W=ictk0^hq zw{GQK&kLC!7>evO%^$WPI}ETrx^jiUwF%VAcrz4*tIsA}UcT27C(%awDYJm>VTL$n z7&~T_sQvCoe&R5Dl1R8wAsj&Fy)Pt;mYTYBHJw|aUQ~5(!V_JPHHCVC2 zAd^b(H=bMfNb6Z}XQR^nX%e*s&C1s&{Lla3!|Y8rjk}o->x*#|JaAiBEzhn>j!#Ut zBggXCE?ukkXZu_$%4jH&3LT?Ng0fYG)GkM|464S_3$+9_e}bjS-6VIC^rDDoP9#fN zdizRF=c1R3Ep*+Pd)9v9q^pMOuPH6z>pevQv?=~#(|VYFof%O%0_9yyki_@96xl+aZer`P#oThmsL(}VxtkpwSh_8&fuQbxg;Ls>i^;ndz;vT{M2CI4$ z9o^`Exv@&#jbb)_MZ&IIfHOqofU_s<*KzU^C2ow6>bCha`{p0+?*G42Sv!<*Jo`mP z@6gS4Wt+V_93P+IZ<*YREvaI)5mXUd8ZStXW<5>vr zG^AS5bel<}SCo;ok)XsU$C{FSx7vT^h|-hH(5QQw(m;SS>qkUGqHW+-=0|3QE&Hku#n$u3JSXx*xHJEFTFEu z(5?d3l>>Z;_%pW^)IG`ig27g5!4XPy!H1y-mhO^dP3i-7DLSnNBNNGb6zQUH4a7c# zJV_y3*s{OeJPH5f)TXo1p}cp|(c=bvXkxEMnwk+i0uIkgrRp+Cb3L0{Ea9t5_Ceah zOz9CozQF2?W?2E;L(D~UPc42g#zrw@r5~ip{fMPQaZah19kZs-zqGa0cTTgvs7u5g zudhL)uM{>x3;B{Kt;$Ouva*Mj>nv*E)crOkS^~&iad(nzw5w58l5C`_0_p^9jFFf+ zm-FllWt*NLroGw$W0oD6wbx-MhL>f@R`#k-}ri6h0BG)eIFd? zoQ_3aoXqTs5v6z8Wbq<%G`mjAMw&a@gt^wlSa%plqA-^#UCc5V<9oZaM2a#Eb>oZ< zd1cb~AkW6A)4Xh#o(`N+*SkMql1Q#WhTM;OdepEUE!0f!shvNsxTl)rCs18-7^_`Y zS;5?S-cF=l{uWbbA}wv^p?J!J5!T4NaBXGp443lm!jvKE+fW&gqDr(`=Ojj6ap6sz zwWC*|U!1m`tmjdYMv?L0Ea}iNS+ktI40M9;=)NK{DkQ5d1(~Fgh2V2V?c-ZRs9>&| zxPL|@3ctG-_C^}f{LRB8ab)1kfYa$p$<8^qXxhIlb5SLaXJ{C+*5i1`O|vlb*Zb$G zN;T(swSK&CoL5iJLUgC)oVEtN%`JYc^g#VR(+Gtm1dVQ8?t5I?E;G$6)&d(t&gf2) zv+*JE)R%rz`&!C9QS0Cp!(3SQNfY^0?XBQ(txRt=TXNvX_eet>(>Y&`ChqQkfx;Xq zKB(JiRuW+Fvld!sa8@j-`f1JwJX?w)*93sr^(iXsweK@A|(8-uj)(iJ@3@E#g+#zrUDI7igSCAwPb_KC7P&YDR%OK>Y zdmFD(xoJSd6o?GG*@EmF=BB{bLBzi>17qyQcbS4T${4f?1Z_+La>FpY0#H6ofX#s@ z!<5J-;lKGpmv+ErK<3c`a)4*G9FW_Q0Z$eGEdyWkehZR8<(g48%L%N<#8q?r1W2oD z=-e_uTmYXb!Zv9zIvTui2AZnFoI(A+%_(m)IssG) ze7~y{oTL+gO2M_MHzQZ16^Fdc!gR(L#>ntbSBsmm-8TAg1LL4ULj*kWON2(uY3`!z zWO4*m)TKcIp%z`0SsYh;q_`^QUW0xoerIvgj!|OmpmJj$-;(jOv3LN$RKQ25$NZ4^ zLqP4T>Kt>N-3JS)5M?AuxvfhCSuqz1-FzKXyAymfrfiCos+zIt8IcZ#xS^?NNg|D&+y`o!3|MSU^H? z6<`@n54Io=%t2xeV6Ds)!2ct1g%$sgRs+h+0ZXF{Ah$*CLJIeUGY17-2Z=kg6rYP< zl?MqPmAlIOx0!)`bXb|2NnqFDS+=kq*hVbK^TC#S7%zse-Q16fMfaB})*No$8;rhG zcR!lv_5-_3i)+Tm0#2qi5OQ^QEP4baWsXJJRsUJ}3>pFAALPF4@NT4TD zHKPG-XmrRVPFykcoiDlE?~eA_2t->-#w^Wp*09tp%s%01-=KAG?m7x}DPv#e_RNfJ zs0$wlw z(*FSn75pZ|Qd)fJ&*X_c$hw1fgFY6cGz{~)5DU8E>ZpfUwQJ8=wLp5>PN9IcIAXjn z=UWi>q~BZj2L>UNm%4qXi}&yEw>our@Ty9f_D96=(wy!WN`|4>t_h?RiH! z07Yb7{ym(=ES^eWXFBe!3%9zZq8Ypq|j@Y{wz`&Py~93+&k^?1Zb9zJ1In zCV=5<)>(K4aZbx&X_B5A>dSeYhRL7NOkVGh!~f82{t~;Wp7b(b6B(|m(q&L(-Fpvi zKM-f(Xy<$5e3Ce-bw8*Wc;a;UX`BbSj`@E?od4u*Zhy4>!p^8i8j?)Nak$F1#Eq)N zFjQ{zOHN(n)fIY*o*7I0VZ`Ze#C{u3HXC3lM+scj3wzeO6Ttey)^~%I4L8}wDNYBh zVk-AyDUgRVhv3~Yl;vgccX_~AT@dKsE`=)LzElbIRLSC~RkBR|N7=CVSW~hek*<*9 zlpdH#HCWCDG?OPE?PPkSHa!ZxNSJm`=*4zltwjZN>3G?uTCY1lWS{^{ILuwwBP(v5*Hu0qg4QbT|L z(OH@C;);TbVityhna`b9xal`pQdW|z63;{Ti4`@K4DX`x`A}XMv%%Td@iNB!G$2sq zpjihZ*rQ2&rn%L`>S z+AI;TB~v>eSlBHZ`ce$E>@+zkT$pQ>6equUaW~tU z1)45u?F&dXlpv^x3Vu7_ir>dcCDb4G%NmKo>Qg##r`-0`iy6I7_1is*>R4Q9RkUEH zidhC*Y`7z@bbJ2N{-#}lpR|)K%>$=JziCNxv%$U(DXb{&(#rC&EJft#%PuHCMTs=H zDyU6Nq?0hZKhivX;tQ8M8VKQdcW7|AaJ47vG^H*ps6$&Y3rEHAG8KcX4o@YY?ih*K ztbNagabl{3vG8^kf*{&9!CJNX10mE-zMHM#p>U#D*s`RBC_oF-Yp+?aYF>ggTmpKe{R80v8cJAczZ zcdk&^*wJ|48&R$DK?ma74bRE$Om zAUy~rEURprHvhio|MliB$UX5N!k@LA8y)k*=HvUqdtc#@GU62*Vvxy<-@Q;vchL=K zUA%4%eV7lUdAky9n(LHy-&S6m{NgT^KsWs^d8c=@l_d1_s*nJA{p++lq~tbk%~zfr zkNMlNLkyW8UVeU21X?{^0g-9SP%0h+IGKQuIh{*>rHs{N1$ylXk#}Y)J|UGXRXxVg zjm6Nd^l<)b+BfI{2Q@eK-!X?nP6h}O>_49=N-{=v%&P>VJXU7iwM?oCp7+YofQE0VCSV2%i*xQTtyR~UvLUVd9N5*P6Pbd&YoS*oBbW3PQ*aX`#R9Z z;X>!R#U;x+1IFTuEy#9VUhLH+#59yiARcB-(m}I=bvFFXuBt2TB%11=ICo;y;F+qS zsr#ICqygHgOI2y7Y51k(wpTQ=pi7&nWo2}HPZeRK^WD8PBadYgkCx5XF!X!3;*z{Y z=d1oqZ@R*pC1nFvdmxkg@c~}A$nazZEQM`c%1jPYBNVkBq<6_jymVsp6ljiDC9S!8 zqu-;i<;CuNV%6nv#cK6v{cG>$xTx1(-{a69>?=#As>Z)yf4*lnPSCV4E0{32p;%dr z573cH{pI&c(+*tcwfF$PG&=U9)o#h-A0j%#e4J``3>*&lyvOn1n+&+wox(y(H?Dkc z`S7xAMYc9YW=6#M$?A6xD#A-IlHdGLf|?UQJP%0XJQ91j>qj8NEIrU^2Ttu%l+8d0 zlo<|4VT)V?{MrTtMnAd*Aq}%ZC^)|Y0fOG(Ni&pn5I*)#K>(iT$tB{yqPHOLKW_tz zIm`i_l4I|;AfCVM;PO#60zuKklRJE24_gAivIz=B! + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/requirements.txt b/PyTorch/build-in/Classification/Sequencer2D/sequencer/requirements.txt new file mode 100644 index 000000000..76482464f --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/requirements.txt @@ -0,0 +1,66 @@ +git+https://github.com/BorealisAI/advertorch.git@cc8ccf971fbe28d0086d56defca2255d8f5d7b4e +attrs==21.4.0 +boto3==1.21.3 +botocore==1.24.3 +brotlipy==0.7.0 +certifi==2021.10.8 +clearml==1.1.6 +click==8.0.4 +conda==4.10.3 +conda-build==3.21.5 +cycler==0.11.0 +deepspeed==0.4.5 +dnspython==2.1.0 +docker-pycreds==0.4.0 +fonttools==4.32.0 +furl==2.1.3 +future==0.18.2 +fvcore==0.1.5.post20220212 +gitdb==4.0.9 +GitPython==3.1.26 +importlib-metadata==4.11.1 +importlib-resources==5.4.0 +iopath==0.1.9 +jmespath==0.10.0 +jsonschema==4.4.0 +kiwisolver==1.4.2 +matplotlib==3.5.1 +mkl-fft==1.3.1 +mkl-service==2.4.0 +ninja==1.10.2.3 +olefile==0.46 +orderedmultidict==1.0.1 +packaging==21.3 +pathlib2==2.3.7.post1 +pathtools==0.1.2 +Pillow==8.4.0 +pkginfo==1.7.1 +portalocker==2.4.0 +promise==2.3 +protobuf==3.19.4 +pycosat==0.6.3 +PyJWT==2.1.0 +pyparsing==3.0.7 +pyrsistent==0.18.1 +python-dateutil==2.8.2 +python-etcd==0.4.5 +pytz==2021.3 +PyYAML==5.4.1 +s3transfer==0.5.1 +scipy==1.7.3 +sentry-sdk==1.5.5 +shortuuid==1.0.8 +smmap==5.0.0 +tabulate==0.8.9 +tensorboardX==1.8 +termcolor==1.1.0 +timm==0.5.4 +torch==1.10.0 +torchelastic==0.2.0 +torchtext==0.11.0 +torchvision==0.11.0 +triton==1.1.1 +wandb==0.12.10 +yacs==0.1.8 +yaspin==2.1.0 +zipp==3.7.0 \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/train.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/train.py new file mode 100755 index 000000000..a955bf15c --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/train.py @@ -0,0 +1,870 @@ +#!/usr/bin/env python3 +""" ImageNet Training Script + +This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet +training results with some of the latest networks and training techniques. It favours canonical PyTorch +and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed +and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit. + +This script was started from an early version of the PyTorch ImageNet example +(https://github.com/pytorch/examples/tree/master/imagenet) + +NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples +(https://github.com/NVIDIA/apex/tree/master/examples/imagenet) + +Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) +""" + +import argparse +import time +import yaml +import os +import logging +from collections import OrderedDict + +from contextlib import suppress +from datetime import datetime + +import torch +import torch.nn as nn +import torchvision.utils +from torch.nn.parallel import DistributedDataParallel as NativeDDP + +from timm.data import create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset +from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, \ + convert_splitbn_model, model_parameters +from timm.utils import * +from timm.loss import * +from timm.optim import create_optimizer_v2, optimizer_kwargs +from timm.scheduler import create_scheduler +from timm.utils import ApexScaler, NativeScaler + +import models +import utils.timm.summary as sm +import utils.timm.checkpoint_saver as cs +from utils.timm.dataset_factory import create_dataset + +try: + from apex import amp + from apex.parallel import DistributedDataParallel as ApexDDP + from apex.parallel import convert_syncbn_model + + has_apex = True +except ImportError: + has_apex = False + +has_native_amp = False +try: + if getattr(torch.cuda.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +try: + import wandb + + has_wandb = True +except ImportError: + has_wandb = False + +try: + import clearml + + has_clearml = True +except ImportError: + has_clearml = False + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('train') + +# The first arg parser parses out only the --config argument, this argument is used to +# load a yaml file containing key-values that override the defaults for the main parser below +config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) +parser.add_argument('-c', '--config', default='', type=str, metavar='FILE', + help='YAML config file specifying default arguments') + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') + +# Dataset parameters +parser.add_argument('data_dir', metavar='DIR', + help='path to dataset') +parser.add_argument('--dataset', '-d', metavar='NAME', default='', + help='dataset type (default: ImageFolder/ImageTar if empty)') +parser.add_argument('--train-split', metavar='NAME', default='train', + help='dataset train split (default: train)') +parser.add_argument('--val-split', metavar='NAME', default='validation', + help='dataset validation split (default: validation)') +parser.add_argument('--dataset-download', action='store_true', default=False, + help='Allow download of dataset for torch/ and tfds/ datasets that support it.') +parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', + help='path to class to idx mapping file (default: "")') + +# Model parameters +parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL', + help='Name of model to train (default: "resnet50"') +parser.add_argument('--pretrained', action='store_true', default=False, + help='Start with pretrained version of specified network (if avail)') +parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', + help='Initialize model from this checkpoint (default: none)') +parser.add_argument('--resume', default='', type=str, metavar='PATH', + help='Resume full model and optimizer state from checkpoint (default: none)') +parser.add_argument('--no-resume-opt', action='store_true', default=False, + help='prevent resume of optimizer state when resuming model') +parser.add_argument('--num-classes', type=int, default=None, metavar='N', + help='number of label classes (Model default if None)') +parser.add_argument('--gp', default=None, type=str, metavar='POOL', + help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') +parser.add_argument('--img-size', type=int, default=None, metavar='N', + help='Image patch size (default: None => model default)') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--crop-pct', default=None, type=float, + metavar='N', help='Input image center crop percent (for validation only)') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N', + help='input batch size for training (default: 128)') +parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N', + help='validation batch size override (default: None)') + +# Optimizer parameters +parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') +parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: None, use opt default)') +parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', + help='Optimizer Betas (default: None, use opt default)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='Optimizer momentum (default: 0.9)') +parser.add_argument('--weight-decay', type=float, default=2e-5, + help='weight decay (default: 2e-5)') +parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', + help='Clip gradient norm (default: None, no clipping)') +parser.add_argument('--clip-mode', type=str, default='norm', + help='Gradient clipping mode. One of ("norm", "value", "agc")') + +# Learning rate schedule parameters +parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', + help='LR scheduler (default: "step"') +parser.add_argument('--lr', type=float, default=0.05, metavar='LR', + help='learning rate (default: 0.05)') +parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', + help='learning rate noise on/off epoch percentages') +parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', + help='learning rate noise limit percent (default: 0.67)') +parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', + help='learning rate noise std-dev (default: 1.0)') +parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT', + help='learning rate cycle len multiplier (default: 1.0)') +parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT', + help='amount to decay each learning rate cycle (default: 0.5)') +parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N', + help='learning rate cycle limit, cycles enabled if > 1') +parser.add_argument('--lr-k-decay', type=float, default=1.0, + help='learning rate k-decay for cosine/poly (default: 1.0)') +parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR', + help='warmup learning rate (default: 0.0001)') +parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR', + help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') +parser.add_argument('--epochs', type=int, default=300, metavar='N', + help='number of epochs to train (default: 300)') +parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N', + help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).') +parser.add_argument('--start-epoch', default=None, type=int, metavar='N', + help='manual epoch number (useful on restarts)') +parser.add_argument('--decay-epochs', type=float, default=100, metavar='N', + help='epoch interval to decay LR') +parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N', + help='epochs to warmup LR, if scheduler supports') +parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', + help='epochs to cooldown LR at min_lr, after cyclic schedule ends') +parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', + help='patience epochs for Plateau LR scheduler (default: 10') +parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', + help='LR decay rate (default: 0.1)') + +# Augmentation & regularization parameters +parser.add_argument('--no-aug', action='store_true', default=False, + help='Disable all training augmentation, override other train aug args') +parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT', + help='Random resize scale (default: 0.08 1.0)') +parser.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO', + help='Random resize aspect ratio (default: 0.75 1.33)') +parser.add_argument('--hflip', type=float, default=0.5, + help='Horizontal flip training aug probability') +parser.add_argument('--vflip', type=float, default=0., + help='Vertical flip training aug probability') +parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', + help='Color jitter factor (default: 0.4)') +parser.add_argument('--aa', type=str, default=None, metavar='NAME', + help='Use AutoAugment policy. "v0" or "original". (default: None)'), +parser.add_argument('--aug-repeats', type=int, default=0, + help='Number of augmentation repetitions (distributed training only) (default: 0)') +parser.add_argument('--aug-splits', type=int, default=0, + help='Number of augmentation splits (default: 0, valid: 0 or >=2)') +parser.add_argument('--jsd-loss', action='store_true', default=False, + help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.') +parser.add_argument('--bce-loss', action='store_true', default=False, + help='Enable BCE loss w/ Mixup/CutMix use.') +parser.add_argument('--bce-target-thresh', type=float, default=None, + help='Threshold for binarizing softened BCE targets (default: None, disabled)') +parser.add_argument('--reprob', type=float, default=0., metavar='PCT', + help='Random erase prob (default: 0.)') +parser.add_argument('--remode', type=str, default='pixel', + help='Random erase mode (default: "pixel")') +parser.add_argument('--recount', type=int, default=1, + help='Random erase count (default: 1)') +parser.add_argument('--resplit', action='store_true', default=False, + help='Do not random erase first (clean) augmentation split') +parser.add_argument('--mixup', type=float, default=0.0, + help='mixup alpha, mixup enabled if > 0. (default: 0.)') +parser.add_argument('--cutmix', type=float, default=0.0, + help='cutmix alpha, cutmix enabled if > 0. (default: 0.)') +parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, + help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') +parser.add_argument('--mixup-prob', type=float, default=1.0, + help='Probability of performing mixup or cutmix when either/both is enabled') +parser.add_argument('--mixup-switch-prob', type=float, default=0.5, + help='Probability of switching to cutmix when both mixup and cutmix enabled') +parser.add_argument('--mixup-mode', type=str, default='batch', + help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') +parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', + help='Turn off mixup after this epoch, disabled if 0 (default: 0)') +parser.add_argument('--smoothing', type=float, default=0.1, + help='Label smoothing (default: 0.1)') +parser.add_argument('--train-interpolation', type=str, default='random', + help='Training interpolation (random, bilinear, bicubic default: "random")') +parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', + help='Dropout rate (default: 0.)') +parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT', + help='Drop connect rate, DEPRECATED, use drop-path (default: None)') +parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', + help='Drop path rate (default: None)') +parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', + help='Drop block rate (default: None)') + +# Batch norm parameters (only works with gen_efficientnet based models currently) +parser.add_argument('--bn-momentum', type=float, default=None, + help='BatchNorm momentum override (if not None)') +parser.add_argument('--bn-eps', type=float, default=None, + help='BatchNorm epsilon override (if not None)') +parser.add_argument('--sync-bn', action='store_true', + help='Enable NVIDIA Apex or Torch synchronized BatchNorm.') +parser.add_argument('--dist-bn', type=str, default='reduce', + help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")') +parser.add_argument('--split-bn', action='store_true', + help='Enable separate BN layers per augmentation split.') + +# Model Exponential Moving Average +parser.add_argument('--model-ema', action='store_true', default=False, + help='Enable tracking moving average of model weights') +parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, + help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') +parser.add_argument('--model-ema-decay', type=float, default=0.9998, + help='decay factor for model weights moving average (default: 0.9998)') + +# Misc +parser.add_argument('--seed', type=int, default=42, metavar='S', + help='random seed (default: 42)') +parser.add_argument('--worker-seeding', type=str, default='all', + help='worker seed mode (default: all)') +parser.add_argument('--log-interval', type=int, default=50, metavar='N', + help='how many batches to wait before logging training status') +parser.add_argument('--recovery-interval', type=int, default=0, metavar='N', + help='how many batches to wait before writing recovery checkpoint') +parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N', + help='number of checkpoints to keep (default: 10)') +parser.add_argument('-j', '--workers', type=int, default=4, metavar='N', + help='how many training processes to use (default: 4)') +parser.add_argument('--save-images', action='store_true', default=False, + help='save images of input bathes every log interval for debugging') +parser.add_argument('--amp', action='store_true', default=False, + help='use NVIDIA Apex AMP or Native AMP for mixed precision training') +parser.add_argument('--apex-amp', action='store_true', default=False, + help='Use NVIDIA Apex AMP mixed precision') +parser.add_argument('--native-amp', action='store_true', default=False, + help='Use Native Torch AMP mixed precision') +parser.add_argument('--no-ddp-bb', action='store_true', default=False, + help='Force broadcast buffers for native DDP to off.') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last memory layout') +parser.add_argument('--pin-mem', action='store_true', default=False, + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') +parser.add_argument('--no-prefetcher', action='store_true', default=False, + help='disable fast prefetcher') +parser.add_argument('--output', default='', type=str, metavar='PATH', + help='path to output folder (default: none, current dir)') +parser.add_argument('--experiment', default='', type=str, metavar='NAME', + help='name of train experiment, name of sub-folder for output') +parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC', + help='Best metric (default: "top1"') +parser.add_argument('--tta', type=int, default=0, metavar='N', + help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)') +parser.add_argument("--local_rank", default=0, type=int) +parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False, + help='use the multi-epochs-loader to save time at the beginning of every epoch') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') +parser.add_argument('--fuser', default='', type=str, + help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") +parser.add_argument('--log-wandb', action='store_true', default=False, + help='log training and validation metrics to wandb') +parser.add_argument('--log-clearml', action='store_true', default=False, + help='log training, validation metrics, and weights to clearml') +parser.add_argument('--task-name', default='', type=str, metavar='NAME', + help='name of train task') +parser.add_argument('--output-uri', default='', type=str, metavar='NAME', + help='uri to save weights of model') +parser.add_argument('--log-s3', action='store_true', default=False, + help='weights to s3') + + +def _parse_args(): + # Do we have a config file to parse? + args_config, remaining = config_parser.parse_known_args() + if args_config.config: + with open(args_config.config, 'r') as f: + cfg = yaml.safe_load(f) + parser.set_defaults(**cfg) + + # The main arg parser parses the rest of the args, the usual + # defaults will have been overridden if config file specified. + args = parser.parse_args(remaining) + + # Cache the args as a text string to save them in the output dir later + args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) + return args, args_text + + +def main(): + setup_default_logging() + args, args_text = _parse_args() + + if args.local_rank == 0: + if args.log_wandb: + if has_wandb: + wandb.init(project=args.experiment, config=args) + else: + _logger.warning("You've requested to log metrics to wandb but package not found. " + "Metrics not being logged to wandb, try `pip install wandb`") + if args.log_clearml: + if has_clearml: + task = clearml.Task.init(project_name=args.experiment, + task_name=args.task_name, + output_uri=args.output_uri) + else: + _logger.warning("You've requested to log metrics to clearml but package not found. " + "Metrics not being logged to clearml, try `pip install clearml`") + + args.prefetcher = not args.no_prefetcher + args.distributed = False + if 'WORLD_SIZE' in os.environ: + args.distributed = int(os.environ['WORLD_SIZE']) > 1 + args.device = 'cuda:0' + args.world_size = 1 + args.rank = 0 # global rank + if args.distributed: + args.device = 'cuda:%d' % args.local_rank + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group(backend='nccl', init_method='env://') + args.world_size = torch.distributed.get_world_size() + args.rank = torch.distributed.get_rank() + _logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' + % (args.rank, args.world_size)) + else: + _logger.info('Training with a single process on 1 GPUs.') + assert args.rank >= 0 + + # resolve AMP arguments based on PyTorch / Apex availability + use_amp = None + if args.amp: + # `--amp` chooses native amp before apex (APEX ver not actively maintained) + if has_native_amp: + args.native_amp = True + elif has_apex: + args.apex_amp = True + if args.apex_amp and has_apex: + use_amp = 'apex' + elif args.native_amp and has_native_amp: + use_amp = 'native' + elif args.apex_amp or args.native_amp: + _logger.warning("Neither APEX or native Torch AMP is available, using float32. " + "Install NVIDA apex or upgrade to PyTorch 1.6") + + random_seed(args.seed, args.rank) + + if args.fuser: + set_jit_fuser(args.fuser) + + model = create_model( + args.model, + pretrained=args.pretrained, + num_classes=args.num_classes, + drop_rate=args.drop, + drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path + drop_path_rate=args.drop_path, + drop_block_rate=args.drop_block, + global_pool=args.gp, + bn_momentum=args.bn_momentum, + bn_eps=args.bn_eps, + scriptable=args.torchscript, + checkpoint_path=args.initial_checkpoint) + if args.num_classes is None: + assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' + args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly + + if args.local_rank == 0: + _logger.info( + f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}') + + data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) + + # setup augmentation batch splits for contrastive loss or split bn + num_aug_splits = 0 + if args.aug_splits > 0: + assert args.aug_splits > 1, 'A split of 1 makes no sense' + num_aug_splits = args.aug_splits + + # enable split bn (separate bn stats per batch-portion) + if args.split_bn: + assert num_aug_splits > 1 or args.resplit + model = convert_splitbn_model(model, max(num_aug_splits, 2)) + + # move model to GPU, enable channels last layout if set + model.cuda() + if args.channels_last: + model = model.to(memory_format=torch.channels_last) + + # setup synchronized BatchNorm for distributed training + if args.distributed and args.sync_bn: + assert not args.split_bn + if has_apex and use_amp == 'apex': + # Apex SyncBN preferred unless native amp is activated + model = convert_syncbn_model(model) + else: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + if args.local_rank == 0: + _logger.info( + 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using ' + 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.') + + if args.torchscript: + assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model' + assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model' + model = torch.jit.script(model) + + optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args)) + + # setup automatic mixed-precision (AMP) loss scaling and op casting + amp_autocast = suppress # do nothing + loss_scaler = None + if use_amp == 'apex': + model, optimizer = amp.initialize(model, optimizer, opt_level='O1') + loss_scaler = ApexScaler() + if args.local_rank == 0: + _logger.info('Using NVIDIA APEX AMP. Training in mixed precision.') + elif use_amp == 'native': + amp_autocast = torch.cuda.amp.autocast + loss_scaler = NativeScaler() + if args.local_rank == 0: + _logger.info('Using native Torch AMP. Training in mixed precision.') + else: + if args.local_rank == 0: + _logger.info('AMP not enabled. Training in float32.') + + # optionally resume from a checkpoint + resume_epoch = None + if args.resume: + resume_epoch = resume_checkpoint( + model, args.resume, + optimizer=None if args.no_resume_opt else optimizer, + loss_scaler=None if args.no_resume_opt else loss_scaler, + log_info=args.local_rank == 0) + + # setup exponential moving average of model weights, SWA could be used here too + model_ema = None + if args.model_ema: + # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper + model_ema = ModelEmaV2( + model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None) + if args.resume: + load_checkpoint(model_ema.module, args.resume, use_ema=True) + + # setup distributed training + if args.distributed: + if has_apex and use_amp == 'apex': + # Apex DDP preferred unless native amp is activated + if args.local_rank == 0: + _logger.info("Using NVIDIA APEX DistributedDataParallel.") + model = ApexDDP(model, delay_allreduce=True) + else: + if args.local_rank == 0: + _logger.info("Using native Torch DistributedDataParallel.") + model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb) + # NOTE: EMA model does not need to be wrapped by DDP + + # setup learning rate schedule and starting epoch + lr_scheduler, num_epochs = create_scheduler(args, optimizer) + start_epoch = 0 + if args.start_epoch is not None: + # a specified start_epoch will always override the resume epoch + start_epoch = args.start_epoch + elif resume_epoch is not None: + start_epoch = resume_epoch + if lr_scheduler is not None and start_epoch > 0: + lr_scheduler.step(start_epoch) + + if args.local_rank == 0: + _logger.info('Scheduled epochs: {}'.format(num_epochs)) + + # create the train and eval datasets + dataset_train = create_dataset( + args.dataset, root=args.data_dir, split=args.train_split, is_training=True, + class_map=args.class_map, + download=args.dataset_download, + batch_size=args.batch_size, + repeats=args.epoch_repeats) + dataset_eval = create_dataset( + args.dataset, root=args.data_dir, split=args.val_split, is_training=False, + class_map=args.class_map, + download=args.dataset_download, + batch_size=args.batch_size) + + # setup mixup / cutmix + collate_fn = None + mixup_fn = None + mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None + if mixup_active: + mixup_args = dict( + mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, + prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, + label_smoothing=args.smoothing, num_classes=args.num_classes) + if args.prefetcher: + assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup) + collate_fn = FastCollateMixup(**mixup_args) + else: + mixup_fn = Mixup(**mixup_args) + + # wrap dataset in AugMix helper + if num_aug_splits > 1: + dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits) + + # create data loaders w/ augmentation pipeiine + train_interpolation = args.train_interpolation + if args.no_aug or not train_interpolation: + train_interpolation = data_config['interpolation'] + loader_train = create_loader( + dataset_train, + input_size=data_config['input_size'], + batch_size=args.batch_size, + is_training=True, + use_prefetcher=args.prefetcher, + no_aug=args.no_aug, + re_prob=args.reprob, + re_mode=args.remode, + re_count=args.recount, + re_split=args.resplit, + scale=args.scale, + ratio=args.ratio, + hflip=args.hflip, + vflip=args.vflip, + color_jitter=args.color_jitter, + auto_augment=args.aa, + num_aug_repeats=args.aug_repeats, + num_aug_splits=num_aug_splits, + interpolation=train_interpolation, + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + distributed=args.distributed, + collate_fn=collate_fn, + pin_memory=args.pin_mem, + use_multi_epochs_loader=args.use_multi_epochs_loader, + worker_seeding=args.worker_seeding, + ) + + loader_eval = create_loader( + dataset_eval, + input_size=data_config['input_size'], + batch_size=args.validation_batch_size or args.batch_size, + is_training=False, + use_prefetcher=args.prefetcher, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + distributed=args.distributed, + crop_pct=data_config['crop_pct'], + pin_memory=args.pin_mem, + ) + + # setup loss function + if args.jsd_loss: + assert num_aug_splits > 1 # JSD only valid with aug splits set + train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing) + elif mixup_active: + # smoothing is handled with mixup target transform which outputs sparse, soft targets + if args.bce_loss: + train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh) + else: + train_loss_fn = SoftTargetCrossEntropy() + elif args.smoothing: + if args.bce_loss: + train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh) + else: + train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing) + else: + train_loss_fn = nn.CrossEntropyLoss() + train_loss_fn = train_loss_fn.cuda() + validate_loss_fn = nn.CrossEntropyLoss().cuda() + + # setup checkpoint saver and eval metric tracking + eval_metric = args.eval_metric + best_metric = None + best_epoch = None + saver = None + output_dir = None + if args.rank == 0: + if args.experiment: + exp_name = args.experiment + else: + exp_name = '-'.join([ + datetime.now().strftime("%Y%m%d-%H%M%S"), + safe_model_name(args.model), + str(data_config['input_size'][-1]) + ]) + if args.task_name: + exp_name = os.path.join(exp_name, args.task_name) + output_dir = get_outdir(args.output if args.output else './output/train', exp_name) + decreasing = True if eval_metric == 'loss' else False + saver = cs.CheckpointSaver( + model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler, + checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist, + log_clearml=args.log_clearml, log_s3=args.log_s3 + ) + with open(os.path.join(output_dir, 'args.yaml'), 'w') as f: + f.write(args_text) + + try: + for epoch in range(start_epoch, num_epochs): + if args.distributed and hasattr(loader_train.sampler, 'set_epoch'): + loader_train.sampler.set_epoch(epoch) + + train_metrics = train_one_epoch( + epoch, model, loader_train, optimizer, train_loss_fn, args, + lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir, + amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn) + + if args.distributed and args.dist_bn in ('broadcast', 'reduce'): + if args.local_rank == 0: + _logger.info("Distributing BatchNorm running means and vars") + distribute_bn(model, args.world_size, args.dist_bn == 'reduce') + + eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast) + + if model_ema is not None and not args.model_ema_force_cpu: + if args.distributed and args.dist_bn in ('broadcast', 'reduce'): + distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce') + ema_eval_metrics = validate( + model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)') + eval_metrics = ema_eval_metrics + + if lr_scheduler is not None: + # step LR for next epoch + lr_scheduler.step(epoch + 1, eval_metrics[eval_metric]) + + if output_dir is not None: + sm.update_summary( + epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'), + write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb, log_clearml=args.log_clearml and has_clearml) + + if saver is not None: + # save proper checkpoint with eval metric + save_metric = eval_metrics[eval_metric] + best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric) + + except KeyboardInterrupt: + pass + if best_metric is not None: + _logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch)) + + +def train_one_epoch( + epoch, model, loader, optimizer, loss_fn, args, + lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, + loss_scaler=None, model_ema=None, mixup_fn=None): + if args.mixup_off_epoch and epoch >= args.mixup_off_epoch: + if args.prefetcher and loader.mixup_enabled: + loader.mixup_enabled = False + elif mixup_fn is not None: + mixup_fn.mixup_enabled = False + + second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order + batch_time_m = AverageMeter() + data_time_m = AverageMeter() + losses_m = AverageMeter() + + model.train() + + end = time.time() + last_idx = len(loader) - 1 + num_updates = epoch * len(loader) + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + data_time_m.update(time.time() - end) + if not args.prefetcher: + input, target = input.cuda(), target.cuda() + if mixup_fn is not None: + input, target = mixup_fn(input, target) + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + + with amp_autocast(): + output = model(input) + loss = loss_fn(output, target) + + if not args.distributed: + losses_m.update(loss.item(), input.size(0)) + + optimizer.zero_grad() + if loss_scaler is not None: + loss_scaler( + loss, optimizer, + clip_grad=args.clip_grad, clip_mode=args.clip_mode, + parameters=model_parameters(model, exclude_head='agc' in args.clip_mode), + create_graph=second_order) + else: + loss.backward(create_graph=second_order) + if args.clip_grad is not None: + dispatch_clip_grad( + model_parameters(model, exclude_head='agc' in args.clip_mode), + value=args.clip_grad, mode=args.clip_mode) + optimizer.step() + + if model_ema is not None: + model_ema.update(model) + + torch.cuda.synchronize() + num_updates += 1 + batch_time_m.update(time.time() - end) + if last_batch or batch_idx % args.log_interval == 0: + lrl = [param_group['lr'] for param_group in optimizer.param_groups] + lr = sum(lrl) / len(lrl) + + if args.distributed: + reduced_loss = reduce_tensor(loss.data, args.world_size) + losses_m.update(reduced_loss.item(), input.size(0)) + + if args.local_rank == 0: + _logger.info( + 'Train: {} [{:>4d}/{} ({:>3.0f}%)] ' + 'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) ' + 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' + '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' + 'LR: {lr:.3e} ' + 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( + epoch, + batch_idx, len(loader), + 100. * batch_idx / last_idx, + loss=losses_m, + batch_time=batch_time_m, + rate=input.size(0) * args.world_size / batch_time_m.val, + rate_avg=input.size(0) * args.world_size / batch_time_m.avg, + lr=lr, + data_time=data_time_m)) + if args.log_clearml: + clearml.Logger.current_logger().report_scalar("train_iter", "avg_loss", iteration=batch_idx + len(loader) * epoch, value=losses_m.avg) + + if args.save_images and output_dir: + torchvision.utils.save_image( + input, + os.path.join(output_dir, 'train-batch-%dPlease specify the project name..jpg' % batch_idx), + padding=0, + normalize=True) + + if saver is not None and args.recovery_interval and ( + last_batch or (batch_idx + 1) % args.recovery_interval == 0): + saver.save_recovery(epoch, batch_idx=batch_idx) + + if lr_scheduler is not None: + lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg) + + end = time.time() + # end for + + if hasattr(optimizer, 'sync_lookahead'): + optimizer.sync_lookahead() + + return OrderedDict([('loss', losses_m.avg)]) + + +def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''): + batch_time_m = AverageMeter() + losses_m = AverageMeter() + top1_m = AverageMeter() + top5_m = AverageMeter() + + model.eval() + + end = time.time() + last_idx = len(loader) - 1 + with torch.no_grad(): + for batch_idx, (input, target) in enumerate(loader): + last_batch = batch_idx == last_idx + if not args.prefetcher: + input = input.cuda() + target = target.cuda() + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + + with amp_autocast(): + output = model(input) + if isinstance(output, (tuple, list)): + output = output[0] + + # augmentation reduction + reduce_factor = args.tta + if reduce_factor > 1: + output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) + target = target[0:target.size(0):reduce_factor] + + loss = loss_fn(output, target) + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + + if args.distributed: + reduced_loss = reduce_tensor(loss.data, args.world_size) + acc1 = reduce_tensor(acc1, args.world_size) + acc5 = reduce_tensor(acc5, args.world_size) + else: + reduced_loss = loss.data + + torch.cuda.synchronize() + + losses_m.update(reduced_loss.item(), input.size(0)) + top1_m.update(acc1.item(), output.size(0)) + top5_m.update(acc5.item(), output.size(0)) + + batch_time_m.update(time.time() - end) + end = time.time() + if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0): + log_name = 'Test' + log_suffix + _logger.info( + '{0}: [{1:>4d}/{2}] ' + 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' + 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' + 'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' + 'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( + log_name, batch_idx, last_idx, batch_time=batch_time_m, + loss=losses_m, top1=top1_m, top5=top5_m)) + + metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)]) + + return metrics + + +if __name__ == '__main__': + main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/__init__.py new file mode 100644 index 000000000..f04000a2f --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/__init__.py @@ -0,0 +1 @@ +from .timm import * \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/helpers.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/helpers.py new file mode 100644 index 000000000..88f5dadbe --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/helpers.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +from itertools import repeat +import functools +import collections + +import torch +from torch import nn + + +def rsetattr(obj, attr, val): + pre, _, post = attr.rpartition('.') + return setattr(rgetattr(obj, pre) if pre else obj, post, val) + + +def rgetattr(obj, attr, *args): + def _getattr(obj, attr): + return getattr(obj, attr, *args) + + return functools.reduce(_getattr, [obj] + attr.split('.')) + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def train_rnn(model): + for m in model.children(): + if isinstance(m, (nn.LSTM, nn.GRU, nn.RNN)): + m.train() + else: + train_rnn(m) + + +def normalize_fn(tensor, mean, std): + mean = mean[None, :, None, None] + std = std[None, :, None, None] + return tensor.sub(mean).div(std) + + +class NormalizeByChannelMeanStd(nn.Module): + def __init__(self, mean, std): + super(NormalizeByChannelMeanStd, self).__init__() + if not isinstance(mean, torch.Tensor): + mean = torch.tensor(mean) + if not isinstance(std, torch.Tensor): + std = torch.tensor(std) + self.register_buffer("mean", mean) + self.register_buffer("std", std) + + def forward(self, tensor): + return normalize_fn(tensor, self.mean, self.std) + + def extra_repr(self): + return 'mean={}, std={}'.format(self.mean, self.std) + +class WithNone: + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, traceback): + pass diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/__init__.py new file mode 100644 index 000000000..269d3443c --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/__init__.py @@ -0,0 +1,2 @@ +from .checkpoint_saver import * +from .summary import * \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/checkpoint_saver.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/checkpoint_saver.py new file mode 100644 index 000000000..ea657b0a0 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/checkpoint_saver.py @@ -0,0 +1,163 @@ + +""" Checkpoint Saver + +Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import glob +import operator +import os +import logging + +import torch + +from timm.utils.model import unwrap_model, get_state_dict + + +_logger = logging.getLogger(__name__) + + +class CheckpointSaver: + def __init__( + self, + model, + optimizer, + args=None, + model_ema=None, + amp_scaler=None, + checkpoint_prefix='checkpoint', + recovery_prefix='recovery', + checkpoint_dir='', + recovery_dir='', + decreasing=False, + max_history=10, + unwrap_fn=unwrap_model, + log_clearml=False, + log_s3=False, + ): + + # objects to save state_dicts of + self.model = model + self.optimizer = optimizer + self.args = args + self.model_ema = model_ema + self.amp_scaler = amp_scaler + + # state + self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness + self.best_epoch = None + self.best_metric = None + self.curr_recovery_file = '' + self.last_recovery_file = '' + + # config + self.checkpoint_dir = checkpoint_dir + self.recovery_dir = recovery_dir + self.save_prefix = checkpoint_prefix + self.recovery_prefix = recovery_prefix + self.extension = '.pth.tar' + self.decreasing = decreasing # a lower metric is better if True + self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs + self.max_history = max_history + self.unwrap_fn = unwrap_fn + self.log_s3 = log_clearml and log_s3 + assert self.max_history >= 1 + + if self.log_s3: + from clearml import Task + self.task = Task.current_task() + + def save_checkpoint(self, epoch, metric=None): + assert epoch >= 0 + tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) + last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) + self._save(tmp_save_path, epoch, metric) + if os.path.exists(last_save_path): + os.unlink(last_save_path) # required for Windows support. + os.rename(tmp_save_path, last_save_path) + worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None + if (len(self.checkpoint_files) < self.max_history + or metric is None or self.cmp(metric, worst_file[1])): + if len(self.checkpoint_files) >= self.max_history: + self._cleanup_checkpoints(1) + filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension + save_path = os.path.join(self.checkpoint_dir, filename) + os.link(last_save_path, save_path) + self.checkpoint_files.append((save_path, metric)) + self.checkpoint_files = sorted( + self.checkpoint_files, key=lambda x: x[1], + reverse=not self.decreasing) # sort in descending order if a lower metric is not better + + checkpoints_str = "Current checkpoints:\n" + for c in self.checkpoint_files: + checkpoints_str += ' {}\n'.format(c) + _logger.info(checkpoints_str) + + if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): + self.best_epoch = epoch + self.best_metric = metric + best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) + if os.path.exists(best_save_path): + os.unlink(best_save_path) + os.link(last_save_path, best_save_path) + if self.log_s3: + self.task.update_output_model(best_save_path) + if self.log_s3: + self.task.update_output_model(last_save_path) + + return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) + + def _save(self, save_path, epoch, metric=None): + save_state = { + 'epoch': epoch, + 'arch': type(self.model).__name__.lower(), + 'state_dict': get_state_dict(self.model, self.unwrap_fn), + 'optimizer': self.optimizer.state_dict(), + 'version': 2, # version < 2 increments epoch before save + } + if self.args is not None: + save_state['arch'] = self.args.model + save_state['args'] = self.args + if self.amp_scaler is not None: + save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() + if self.model_ema is not None: + save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) + if metric is not None: + save_state['metric'] = metric + torch.save(save_state, save_path) + + def _cleanup_checkpoints(self, trim=0): + trim = min(len(self.checkpoint_files), trim) + delete_index = self.max_history - trim + if delete_index < 0 or len(self.checkpoint_files) <= delete_index: + return + to_delete = self.checkpoint_files[delete_index:] + for d in to_delete: + try: + _logger.debug("Cleaning checkpoint: {}".format(d)) + os.remove(d[0]) + except Exception as e: + _logger.error("Exception '{}' while deleting checkpoint".format(e)) + self.checkpoint_files = self.checkpoint_files[:delete_index] + + def save_recovery(self, epoch, batch_idx=0): + assert epoch >= 0 + filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension + save_path = os.path.join(self.recovery_dir, filename) + self._save(save_path, epoch) + if os.path.exists(self.last_recovery_file): + try: + _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) + os.remove(self.last_recovery_file) + except Exception as e: + _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) + self.last_recovery_file = self.curr_recovery_file + self.curr_recovery_file = save_path + + def find_recovery(self): + recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) + files = glob.glob(recovery_path + '*' + self.extension) + files = sorted(files) + return files[0] if len(files) else '' diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/dataset_factory.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/dataset_factory.py new file mode 100644 index 000000000..dbac8b6b3 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/dataset_factory.py @@ -0,0 +1,158 @@ +""" Dataset Factory + +Hacked together by / Copyright 2021, Ross Wightman +""" +import os + +from timm.data import IterableImageDataset, ImageDataset +from torchvision.datasets import CIFAR100, CIFAR10, MNIST, QMNIST, KMNIST, FashionMNIST, ImageNet, ImageFolder + +try: + from torchvision.datasets import Places365 + has_places365 = True +except ImportError: + has_places365 = False +try: + from torchvision.datasets import INaturalist + has_inaturalist = True +except ImportError: + has_inaturalist = False + + +from datasets import Flowers102, StanfordCars + +_TORCH_BASIC_DS = dict( + cifar10=CIFAR10, + cifar100=CIFAR100, + mnist=MNIST, + qmist=QMNIST, + kmnist=KMNIST, + fashion_mnist=FashionMNIST, +) +_TRAIN_SYNONYM = {'train', 'training'} +_EVAL_SYNONYM = {'val', 'valid', 'validation', 'eval', 'evaluation'} + + +def _search_split(root, split): + # look for sub-folder with name of split in root and use that if it exists + split_name = split.split('[')[0] + try_root = os.path.join(root, split_name) + if os.path.exists(try_root): + return try_root + + def _try(syn): + for s in syn: + try_root = os.path.join(root, s) + if os.path.exists(try_root): + return try_root + return root + if split_name in _TRAIN_SYNONYM: + root = _try(_TRAIN_SYNONYM) + elif split_name in _EVAL_SYNONYM: + root = _try(_EVAL_SYNONYM) + return root + + +def create_dataset( + name, + root, + split='validation', + search_split=True, + class_map=None, + load_bytes=False, + is_training=False, + download=False, + batch_size=None, + repeats=0, + **kwargs +): + """ Dataset factory method + + In parenthesis after each arg are the type of dataset supported for each arg, one of: + * folder - default, timm folder (or tar) based ImageDataset + * torch - torchvision based datasets + * TFDS - Tensorflow-datasets wrapper in IterabeDataset interface via IterableImageDataset + * all - any of the above + + Args: + name: dataset name, empty is okay for folder based datasets + root: root folder of dataset (all) + split: dataset split (all) + search_split: search for split specific child fold from root so one can specify + `imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder) + class_map: specify class -> index mapping via text file or dict (folder) + load_bytes: load data, return images as undecoded bytes (folder) + download: download dataset if not present and supported (TFDS, torch) + is_training: create dataset in train mode, this is different from the split. + For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS) + batch_size: batch size hint for (TFDS) + repeats: dataset repeats per iteration i.e. epoch (TFDS) + **kwargs: other args to pass to dataset + + Returns: + Dataset object + """ + name = name.lower() + if name.startswith('torch/'): + name = name.split('/', 2)[-1] + torch_kwargs = dict(root=root, download=download, **kwargs) + if name in _TORCH_BASIC_DS: + ds_class = _TORCH_BASIC_DS[name] + use_train = split in _TRAIN_SYNONYM + ds = ds_class(train=use_train, **torch_kwargs) + elif name == 'flowers': + if split in _TRAIN_SYNONYM: + split = 'train' + elif split in _EVAL_SYNONYM: + split = 'test' + ds = Flowers102(split=split, **torch_kwargs) + elif name == 'cars': + if split in _TRAIN_SYNONYM: + split = 'train' + elif split in _EVAL_SYNONYM: + split = 'test' + ds = StanfordCars(split=split, **torch_kwargs) + elif name == 'inaturalist' or name == 'inat': + assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist' + target_type = 'full' + split_split = split.split('/') + if len(split_split) > 1: + target_type = split_split[0].split('_') + if len(target_type) == 1: + target_type = target_type[0] + split = split_split[-1] + if split in _TRAIN_SYNONYM: + split = '2021_train' + elif split in _EVAL_SYNONYM: + split = '2021_valid' + ds = INaturalist(version=split, target_type=target_type, **torch_kwargs) + elif name == 'places365': + assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.' + if split in _TRAIN_SYNONYM: + split = 'train-standard' + elif split in _EVAL_SYNONYM: + split = 'val' + ds = Places365(split=split, **torch_kwargs) + elif name == 'imagenet': + if split in _EVAL_SYNONYM: + split = 'val' + ds = ImageNet(split=split, **torch_kwargs) + elif name == 'image_folder' or name == 'folder': + # in case torchvision ImageFolder is preferred over timm ImageDataset for some reason + if search_split and os.path.isdir(root): + # look for split specific sub-folder in root + root = _search_split(root, split) + ds = ImageFolder(root, **kwargs) + else: + assert False, f"Unknown torchvision dataset {name}" + elif name.startswith('tfds/'): + ds = IterableImageDataset( + root, parser=name, split=split, is_training=is_training, + download=download, batch_size=batch_size, repeats=repeats, **kwargs) + else: + # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future + if search_split and os.path.isdir(root): + # look for split specific sub-folder in root + root = _search_split(root, split) + ds = ImageDataset(root, parser=name, class_map=class_map, load_bytes=load_bytes, **kwargs) + return ds diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/summary.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/summary.py new file mode 100644 index 000000000..9ed3cd36e --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/summary.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import csv +from collections import OrderedDict + + +def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False, log_clearml=False): + rowd = OrderedDict(epoch=epoch) + rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) + rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) + if log_wandb: + import wandb + wandb.log(rowd) + if log_clearml: + from clearml import Logger + for k, v in train_metrics.items(): + Logger.current_logger().report_scalar( + "train", k, iteration=epoch, value=v) + for k, v in eval_metrics.items(): + Logger.current_logger().report_scalar( + "eval", k, iteration=epoch, value=v) + + with open(filename, mode='a') as cf: + dw = csv.DictWriter(cf, fieldnames=rowd.keys()) + if write_header: # first iteration (epoch == 1 can't be used) + dw.writeheader() + dw.writerow(rowd) diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate.py new file mode 100755 index 000000000..a16f6c296 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate.py @@ -0,0 +1,384 @@ +#!/usr/bin/env python3 +""" ImageNet Validation Script +This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained +models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes +canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit. +Hacked together by Ross Wightman (https://github.com/rwightman) +""" +import argparse +import os +import csv +import glob +import time +import logging +import torch +import torch.nn as nn +import torch.nn.parallel +from collections import OrderedDict +from contextlib import suppress + +from advertorch.attacks import GradientSignAttack, LinfPGDAttack +from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models +from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet +from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_legacy, random_seed + +import models +from utils.helpers import NormalizeByChannelMeanStd, WithNone, train_rnn + +has_apex = False +try: + from apex import amp + + has_apex = True +except ImportError: + pass + +has_native_amp = False +try: + if getattr(torch.cuda.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('validate') + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('--dataset', '-d', metavar='NAME', default='', + help='dataset type (default: ImageFolder/ImageTar if empty)') +parser.add_argument('--split', metavar='NAME', default='validation', + help='dataset split (default: validation)') +parser.add_argument('--dataset-download', action='store_true', default=False, + help='Allow download of dataset for torch/ and tfds/ datasets that support it.') +parser.add_argument('--model', '-m', metavar='NAME', default='dpn92', + help='model architecture (default: dpn92)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 2)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--crop-pct', default=None, type=float, + metavar='N', help='Input image center crop pct') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('--num-classes', type=int, default=None, + help='Number classes in dataset') +parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', + help='path to class to idx mapping file (default: "")') +parser.add_argument('--gp', default=None, type=str, metavar='POOL', + help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') +parser.add_argument('--log-freq', default=10, type=int, + metavar='N', help='batch logging frequency (default: 10)') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--num-gpu', type=int, default=1, + help='Number of GPUS to use') +parser.add_argument('--test-pool', dest='test_pool', action='store_true', + help='enable test time pool') +parser.add_argument('--no-prefetcher', action='store_true', default=False, + help='disable fast prefetcher') +parser.add_argument('--pin-mem', action='store_true', default=False, + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last memory layout') +parser.add_argument('--amp', action='store_true', default=False, + help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.') +parser.add_argument('--apex-amp', action='store_true', default=False, + help='Use NVIDIA Apex AMP mixed precision') +parser.add_argument('--native-amp', action='store_true', default=False, + help='Use Native Torch AMP mixed precision') +parser.add_argument('--tf-preprocessing', action='store_true', default=False, + help='Use Tensorflow preprocessing pipeline (require CPU TF installed') +parser.add_argument('--use-ema', dest='use_ema', action='store_true', + help='use ema version of weights if present') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') +parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true', + help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance') +parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', + help='Output csv file for validation results (summary)') +parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME', + help='Real labels JSON file for imagenet evaluation') +parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME', + help='Valid label indices txt file for validation of partial label space') +parser.add_argument('--attack-type', default=None, type=str, choices=[None, 'fgsm', 'pgd']) +parser.add_argument('--adv-eps', default=1., type=float) +parser.add_argument('--adv-steps', default=5, type=int) +parser.add_argument('--adv-step-size', default=.5, type=float) +parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 42)') + +def validate(args): + # might as well try to validate something + args.pretrained = args.pretrained or not args.checkpoint + args.prefetcher = not args.no_prefetcher + amp_autocast = suppress # do nothing + if args.amp: + if has_native_amp: + args.native_amp = True + elif has_apex: + args.apex_amp = True + else: + _logger.warning("Neither APEX or Native Torch AMP is available.") + assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set." + if args.native_amp: + amp_autocast = torch.cuda.amp.autocast + _logger.info('Validating in mixed precision with native PyTorch AMP.') + elif args.apex_amp: + _logger.info('Validating in mixed precision with NVIDIA APEX AMP.') + else: + _logger.info('Validating in float32. AMP not enabled.') + + if args.legacy_jit: + set_jit_legacy() + + # create model + model = create_model( + args.model, + pretrained=args.pretrained, + num_classes=args.num_classes, + in_chans=3, + global_pool=args.gp, + scriptable=args.torchscript) + if args.num_classes is None: + assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' + args.num_classes = model.num_classes + + if args.checkpoint: + load_checkpoint(model, args.checkpoint, args.use_ema) + + param_count = sum([m.numel() for m in model.parameters()]) + _logger.info('Model %s created, param count: %d' % (args.model, param_count)) + + data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True) + test_time_pool = False + if args.test_pool: + model, test_time_pool = apply_test_time_pool(model, data_config, use_test_size=True) + + if args.attack_type: + normalize = NormalizeByChannelMeanStd( + mean=data_config['mean'], + std=data_config['std'] + ) + model = nn.Sequential(normalize, model) + + if args.torchscript: + torch.jit.optimized_execution(True) + model = torch.jit.script(model) + + model = model.cuda() + if args.apex_amp: + model = amp.initialize(model, opt_level='O1') + + if args.channels_last: + model = model.to(memory_format=torch.channels_last) + + if args.num_gpu > 1: + model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) + + criterion = nn.CrossEntropyLoss().cuda() + + dataset = create_dataset( + root=args.data, name=args.dataset, split=args.split, + download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map) + + if args.valid_labels: + with open(args.valid_labels, 'r') as f: + valid_labels = {int(line.rstrip()) for line in f} + valid_labels = [i in valid_labels for i in range(args.num_classes)] + else: + valid_labels = None + + if args.real_labels: + real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels) + else: + real_labels = None + + crop_pct = 1.0 if test_time_pool else data_config['crop_pct'] + loader = create_loader( + dataset, + input_size=data_config['input_size'], + batch_size=args.batch_size, + use_prefetcher=args.prefetcher, + interpolation=data_config['interpolation'], + mean=data_config['mean'] if args.attack_type is None else (0, 0, 0), + std=data_config['std'] if args.attack_type is None else (1, 1, 1), + num_workers=args.workers, + crop_pct=crop_pct, + pin_memory=args.pin_mem, + tf_preprocessing=args.tf_preprocessing) + + batch_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + top5 = AverageMeter() + + model.eval() + with WithNone() if args.attack_type else torch.no_grad(): + # warmup, reduce variability of first batch time, especially for comparing torchscript vs non + input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).cuda() + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + model(input) + end = time.time() + + if args.attack_type == "fgsm": + loss_fn = nn.CrossEntropyLoss(reduction="sum") + adversary = GradientSignAttack( + model, loss_fn=loss_fn, eps=args.adv_eps / 255., + clip_min=0., clip_max=1., targeted=False) + elif args.attack_type == "pgd": + loss_fn = nn.CrossEntropyLoss(reduction="sum") + adversary = LinfPGDAttack( + model, loss_fn=loss_fn, eps=args.adv_eps / 255., + nb_iter=args.adv_steps, eps_iter=args.adv_step_size / 255., rand_init=True, + clip_min=0., clip_max=1., targeted=False) + random_seed(args.seed, 0) + + for batch_idx, (input, target) in enumerate(loader): + if args.no_prefetcher: + target = target.cuda() + input = input.cuda() + if args.attack_type: + train_rnn(model) + input = adversary.perturb(input, target) + model.eval() + input = input.detach() + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + # compute output + with amp_autocast(): + output = model(input) + + if valid_labels is not None: + output = output[:, valid_labels] + loss = criterion(output, target) + + if real_labels is not None: + real_labels.add_result(output) + + # measure accuracy and record loss + acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5)) + losses.update(loss.item(), input.size(0)) + top1.update(acc1.item(), input.size(0)) + top5.update(acc5.item(), input.size(0)) + + # measure elapsed time + batch_time.update(time.time() - end) + end = time.time() + + if batch_idx % args.log_freq == 0: + _logger.info( + 'Test: [{0:>4d}/{1}] ' + 'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' + 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' + 'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) ' + 'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format( + batch_idx, len(loader), batch_time=batch_time, + rate_avg=input.size(0) / batch_time.avg, + loss=losses, top1=top1, top5=top5)) + + if real_labels is not None: + # real labels mode replaces topk values at the end + top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5) + else: + top1a, top5a = top1.avg, top5.avg + results = OrderedDict( + top1=round(top1a, 4), top1_err=round(100 - top1a, 4), + top5=round(top5a, 4), top5_err=round(100 - top5a, 4), + param_count=round(param_count / 1e6, 2), + img_size=data_config['input_size'][-1], + cropt_pct=crop_pct, + interpolation=data_config['interpolation']) + + _logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format( + results['top1'], results['top1_err'], results['top5'], results['top5_err'])) + + return results + + +def main(): + setup_default_logging() + args = parser.parse_args() + model_cfgs = [] + model_names = [] + if os.path.isdir(args.checkpoint): + # validate all checkpoints in a path with same model + checkpoints = glob.glob(args.checkpoint + '/*.pth.tar') + checkpoints += glob.glob(args.checkpoint + '/*.pth') + model_names = list_models(args.model) + model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] + else: + if args.model == 'all': + # validate all models in a list of names with pretrained checkpoints + args.pretrained = True + model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k', '*_dino']) + model_cfgs = [(n, '') for n in model_names] + elif not is_model(args.model): + # model name doesn't exist, try as wildcard filter + model_names = list_models(args.model) + model_cfgs = [(n, '') for n in model_names] + + if not model_cfgs and os.path.isfile(args.model): + with open(args.model) as f: + model_names = [line.rstrip() for line in f] + model_cfgs = [(n, None) for n in model_names if n] + + if len(model_cfgs): + results_file = args.results_file or './results-all.csv' + _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) + results = [] + try: + start_batch_size = args.batch_size + for m, c in model_cfgs: + batch_size = start_batch_size + args.model = m + args.checkpoint = c + result = OrderedDict(model=args.model) + r = {} + while not r and batch_size >= args.num_gpu: + torch.cuda.empty_cache() + try: + args.batch_size = batch_size + print('Validating with batch size: %d' % args.batch_size) + r = validate(args) + except RuntimeError as e: + if batch_size <= args.num_gpu: + print("Validation failed with no ability to reduce batch size. Exiting.") + raise e + batch_size = max(batch_size // 2, args.num_gpu) + print("Validation failed, reducing batch size by 50%") + result.update(r) + if args.checkpoint: + result['checkpoint'] = args.checkpoint + results.append(result) + except KeyboardInterrupt as e: + pass + results = sorted(results, key=lambda x: x['top1'], reverse=True) + if len(results): + write_results(results_file, results) + else: + validate(args) + + +def write_results(results_file, results): + with open(results_file, mode='w') as cf: + dw = csv.DictWriter(cf, fieldnames=results[0].keys()) + dw.writeheader() + for r in results: + dw.writerow(r) + cf.flush() + + +if __name__ == '__main__': + main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate_c.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate_c.py new file mode 100755 index 000000000..78a98bfe9 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate_c.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import argparse +import os +import csv +import glob +import logging + +import numpy as np +import torch +import torch.nn.parallel +from collections import OrderedDict +from contextlib import suppress + +import torchvision +from timm.data.loader import PrefetchLoader, create_loader +from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models +from timm.data import create_dataset, resolve_data_config, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from timm.utils import natural_key, setup_default_logging, set_jit_legacy + +import models + +has_apex = False +try: + from apex import amp + + has_apex = True +except ImportError: + pass + +has_native_amp = False +try: + if getattr(torch.cuda.amp, 'autocast') is not None: + has_native_amp = True +except AttributeError: + pass + +torch.backends.cudnn.benchmark = True +_logger = logging.getLogger('validate') + +distortions = dict( + gaussian_noise=0.886428, + shot_noise=0.894468, + impulse_noise=0.922640, + defocus_blur=0.819880, + glass_blur=0.826268, + motion_blur=0.785948, + zoom_blur=0.798360, + snow=0.866816, + frost=0.826572, + fog=0.819324, + brightness=0.564592, + contrast=0.853204, + elastic_transform=0.646056, + pixelate=0.717840, + jpeg_compression=0.606500, + # speckle_noise=0.845388, + # gaussian_blur=0.787108, + # spatter=0.717512, + # saturate=0.658248, +) + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') +parser.add_argument('data', metavar='DIR', + help='path to dataset') +parser.add_argument('--dataset', '-d', metavar='NAME', default='', + help='dataset type (default: ImageFolder/ImageTar if empty)') +parser.add_argument('--split', metavar='NAME', default='validation', + help='dataset split (default: validation)') +parser.add_argument('--dataset-download', action='store_true', default=False, + help='Allow download of dataset for torch/ and tfds/ datasets that support it.') +parser.add_argument('--model', '-m', metavar='NAME', default='dpn92', + help='model architecture (default: dpn92)') +parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 2)') +parser.add_argument('-b', '--batch-size', default=256, type=int, + metavar='N', help='mini-batch size (default: 256)') +parser.add_argument('--img-size', default=None, type=int, + metavar='N', help='Input image dimension, uses model default if empty') +parser.add_argument('--input-size', default=None, nargs=3, type=int, + metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') +parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', + help='Override mean pixel value of dataset') +parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', + help='Override std deviation of of dataset') +parser.add_argument('--interpolation', default='', type=str, metavar='NAME', + help='Image resize interpolation type (overrides model)') +parser.add_argument('--num-classes', type=int, default=None, + help='Number classes in dataset') +parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', + help='path to class to idx mapping file (default: "")') +parser.add_argument('--gp', default=None, type=str, metavar='POOL', + help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') +parser.add_argument('--log-freq', default=10, type=int, + metavar='N', help='batch logging frequency (default: 10)') +parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', + help='path to latest checkpoint (default: none)') +parser.add_argument('--pretrained', dest='pretrained', action='store_true', + help='use pre-trained model') +parser.add_argument('--num-gpu', type=int, default=1, + help='Number of GPUS to use') +parser.add_argument('--test-pool', dest='test_pool', action='store_true', + help='enable test time pool') +parser.add_argument('--no-prefetcher', action='store_true', default=False, + help='disable fast prefetcher') +parser.add_argument('--pin-mem', action='store_true', default=False, + help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') +parser.add_argument('--channels-last', action='store_true', default=False, + help='Use channels_last memory layout') +parser.add_argument('--amp', action='store_true', default=False, + help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.') +parser.add_argument('--apex-amp', action='store_true', default=False, + help='Use NVIDIA Apex AMP mixed precision') +parser.add_argument('--native-amp', action='store_true', default=False, + help='Use Native Torch AMP mixed precision') +parser.add_argument('--tf-preprocessing', action='store_true', default=False, + help='Use Tensorflow preprocessing pipeline (require CPU TF installed') +parser.add_argument('--use-ema', dest='use_ema', action='store_true', + help='use ema version of weights if present') +parser.add_argument('--torchscript', dest='torchscript', action='store_true', + help='convert model torchscript for inference') +parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true', + help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance') +parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', + help='Output csv file for validation results (summary)') +parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME', + help='Valid label indices txt file for validation of partial label space') + + +def validate(args): + # might as well try to validate something + args.pretrained = args.pretrained or not args.checkpoint + args.prefetcher = not args.no_prefetcher + amp_autocast = suppress # do nothing + if args.amp: + if has_native_amp: + args.native_amp = True + elif has_apex: + args.apex_amp = True + else: + _logger.warning("Neither APEX or Native Torch AMP is available.") + assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set." + if args.native_amp: + amp_autocast = torch.cuda.amp.autocast + _logger.info('Validating in mixed precision with native PyTorch AMP.') + elif args.apex_amp: + _logger.info('Validating in mixed precision with NVIDIA APEX AMP.') + else: + _logger.info('Validating in float32. AMP not enabled.') + + if args.legacy_jit: + set_jit_legacy() + + # create model + model = create_model( + args.model, + pretrained=args.pretrained, + num_classes=args.num_classes, + in_chans=3, + global_pool=args.gp, + scriptable=args.torchscript) + if args.num_classes is None: + assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' + args.num_classes = model.num_classes + + if args.checkpoint: + load_checkpoint(model, args.checkpoint, args.use_ema) + + param_count = sum([m.numel() for m in model.parameters()]) + _logger.info('Model %s created, param count: %d' % (args.model, param_count)) + + data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True) + test_time_pool = False + if args.test_pool: + model, test_time_pool = apply_test_time_pool(model, data_config, use_test_size=True) + + if args.torchscript: + torch.jit.optimized_execution(True) + model = torch.jit.script(model) + + model = model.cuda() + if args.apex_amp: + model = amp.initialize(model, opt_level='O1') + + if args.channels_last: + model = model.to(memory_format=torch.channels_last) + + if args.num_gpu > 1: + model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) + + results = OrderedDict() + un_ces = [] + ces = [] + for distortion_name, distortion_alex_value in distortions.items(): + errs = [] + for severity in range(1, 6): + correct = 0 + dataset = create_dataset( + root=os.path.join(args.data, distortion_name, str(severity)), name=args.dataset, split=args.split, + download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map) + + if args.valid_labels: + with open(args.valid_labels, 'r') as f: + valid_labels = {int(line.rstrip()) for line in f} + valid_labels = [i in valid_labels for i in range(args.num_classes)] + else: + valid_labels = None + + loader = create_loader( + dataset, + input_size=data_config['input_size'], + batch_size=args.batch_size, + use_prefetcher=args.prefetcher, + interpolation=data_config['interpolation'], + mean=data_config['mean'], + std=data_config['std'], + num_workers=args.workers, + crop_pct=1., + pin_memory=args.pin_mem, + tf_preprocessing=args.tf_preprocessing) + + model.eval() + with torch.no_grad(): + # warmup, reduce variability of first batch time, especially for comparing torchscript vs non + input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).cuda() + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + model(input) + for batch_idx, (input, target) in enumerate(loader): + if args.no_prefetcher: + target = target.cuda() + input = input.cuda() + if args.channels_last: + input = input.contiguous(memory_format=torch.channels_last) + + # compute output + with amp_autocast(): + output = model(input) + + if valid_labels is not None: + output = output[:, valid_labels] + + pred = output.data.max(1)[1] + correct += pred.eq(target).sum().cpu().detach().numpy() + + errs.append(1 - 1. * correct / len(dataset)) + + un_ce = np.mean(errs) + ce = un_ce / distortion_alex_value + results[distortion_name] = round(ce.item(), 4) + ces.append(ce) + un_ces.append(un_ce) + _logger.info('Distortion: {:20s} | CE un-normalized (%): {:.3f} | CE (%): {:.3f}'.format(distortion_name, 100 * un_ce, 100 * ce)) + + mce = 100 * np.mean(ces) + un_mce = 100 * np.mean(un_ces) + results["mCE_un_normalized"] = un_mce + results["mCE"] = mce + results["param_count"] = round(param_count / 1e6, 2) + results["img_size"] = data_config['input_size'][-1] + _logger.info('mCE un-normalized (%): {:.3f} | mCE (%): {:.3f}'.format(un_mce, mce)) + + return results + + +def main(): + setup_default_logging() + args = parser.parse_args() + model_cfgs = [] + model_names = [] + if os.path.isdir(args.checkpoint): + # validate all checkpoints in a path with same model + checkpoints = glob.glob(args.checkpoint + '/*.pth.tar') + checkpoints += glob.glob(args.checkpoint + '/*.pth') + model_names = list_models(args.model) + model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] + else: + if args.model == 'all': + # validate all models in a list of names with pretrained checkpoints + args.pretrained = True + model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k', '*_dino']) + model_cfgs = [(n, '') for n in model_names] + elif not is_model(args.model): + # model name doesn't exist, try as wildcard filter + model_names = list_models(args.model) + model_cfgs = [(n, '') for n in model_names] + + if not model_cfgs and os.path.isfile(args.model): + with open(args.model) as f: + model_names = [line.rstrip() for line in f] + model_cfgs = [(n, None) for n in model_names if n] + + if len(model_cfgs): + results_file = args.results_file or './results-all.csv' + _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) + results = [] + try: + start_batch_size = args.batch_size + for m, c in model_cfgs: + batch_size = start_batch_size + args.model = m + args.checkpoint = c + result = OrderedDict(model=args.model) + r = {} + while not r and batch_size >= args.num_gpu: + torch.cuda.empty_cache() + try: + args.batch_size = batch_size + print('Validating with batch size: %d' % args.batch_size) + r = validate(args) + except RuntimeError as e: + if batch_size <= args.num_gpu: + print("Validation failed with no ability to reduce batch size. Exiting.") + raise e + batch_size = max(batch_size // 2, args.num_gpu) + print("Validation failed, reducing batch size by 50%") + result.update(r) + if args.checkpoint: + result['checkpoint'] = args.checkpoint + results.append(result) + except KeyboardInterrupt as e: + pass + results = sorted(results, key=lambda x: x['top1'], reverse=True) + if len(results): + write_results(results_file, results) + else: + validate(args) + + +def write_results(results_file, results): + with open(results_file, mode='w') as cf: + dw = csv.DictWriter(cf, fieldnames=results[0].keys()) + dw.writeheader() + for r in results: + dw.writerow(r) + cf.flush() + + +if __name__ == '__main__': + main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/visualize_erf.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/visualize_erf.py new file mode 100755 index 000000000..7f276020e --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer/visualize_erf.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import argparse +import os +import glob +import logging + +import numpy as np +from matplotlib import pyplot as plt +from timm.utils import setup_default_logging + +from erf.scaler import MinMaxScaler + +parser = argparse.ArgumentParser(description='PyTorch ImageNet ERF Visualizer') + +parser.add_argument('--result-npy-dir', default='./erf_results/224/npy', type=str, + help='path to save npys of ERF') +parser.add_argument('--result-png-dir', default='./erf_results/224/img', type=str, + help='path to save plotted images (png) of ERF') +parser.add_argument('--result-pdf-dir', default='./erf_results/224/pdf', type=str, + help='path to save plotted images (pdf) of ERF') + + +def main(): + setup_default_logging() + args = parser.parse_args() + + os.makedirs(args.result_png_dir, exist_ok=True) + os.makedirs(args.result_pdf_dir, exist_ok=True) + + npy_paths = glob.glob(os.path.join(args.result_npy_dir, "*.npy")) + npys = [np.load(p) for p in npy_paths] + scores = np.stack(npys, axis=0) + scaler = MinMaxScaler() + scores = scaler(scores) + for p, s in zip(npy_paths, scores): + file_base = os.path.basename(p).rsplit('.', 1)[0] + png_path = os.path.join(args.result_png_dir, f'{file_base}.png') + pdf_path = os.path.join(args.result_pdf_dir, f'{file_base}.pdf') + + plt.imsave(png_path, s, cmap='pink', format="png") + plt.imsave(pdf_path, s, cmap='pink', format="pdf") + +if __name__ == '__main__': + main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer2D.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer2D.py new file mode 100644 index 000000000..ed2ce6a22 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer2D.py @@ -0,0 +1,78 @@ +# model_factory.py + +# sequencer +from model.vanilla_sequencer import ( + v_sequencer_s, + v_sequencer_s_h, + v_sequencer_s_pe, +) + +from model.two_dim_sequencer import ( + sequencer2d_m, + sequencer2d_l, + sequencer2d_s_392, + sequencer2d_m_392, + sequencer2d_l_392, + sequencer2d_s_unidirectional, + sequencer2d_s_add, + sequencer2d_s_h2x, + sequencer2d_s_without_fc, + sequencer2d_vertical, + sequencer2d_s_horizontal, + gru_sequencer2d_s, + rnn_sequencer2d_s, + sequencer2d_l_d4_3x, +) + +_MODEL_TABLE = { + # vanilla sequencer + "v_sequencer_s": v_sequencer_s, + "v_sequencer_s_h": v_sequencer_s_h, + "v_sequencer_s_pe": v_sequencer_s_pe, + + # 2d sequencer + "sequencer2d_m": sequencer2d_m, + "sequencer2d_l": sequencer2d_l, + "sequencer2d_s_392": sequencer2d_s_392, + "sequencer2d_m_392": sequencer2d_m_392, + "sequencer2d_l_392": sequencer2d_l_392, + "sequencer2d_s_unidirectional": sequencer2d_s_unidirectional, + "sequencer2d_s_add": sequencer2d_s_add, + "sequencer2d_s_h2x": sequencer2d_s_h2x, + "sequencer2d_s_without_fc": sequencer2d_s_without_fc, + "sequencer2d_vertical": sequencer2d_vertical, + "sequencer2d_s_horizontal": sequencer2d_s_horizontal, + "gru_sequencer2d_s": gru_sequencer2d_s, + "rnn_sequencer2d_s": rnn_sequencer2d_s, + "sequencer2d_l_d4_3x": sequencer2d_l_d4_3x, + +} + + +def Model(num_classes=100, model_name=None, **kwargs): + """ + Unified model entry (NO timm). + + Args: + num_classes (int): number of classes (可直接用位置参数传) + model_name (str, optional): key in _MODEL_TABLE, 默认使用 'sequencer2d_s_392' + **kwargs: 传给模型构造函数 + + Returns: + torch.nn.Module + """ + if model_name is None: + model_name = "sequencer2d_s_392" + + if model_name not in _MODEL_TABLE: + raise ValueError( + f"Unknown model '{model_name}'. " + f"Available models: {list(_MODEL_TABLE.keys())}" + ) + + return _MODEL_TABLE[model_name]( + pretrained=False, + num_classes=num_classes, + **kwargs + ) + diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.jpg b/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9edff0b35a5c06a6a65171f5036d4fdea5d73ccd GIT binary patch literal 36221 zcmeFa1z22J)-GIl2o~HOg1b8j!2<+$NP<&Ha0?WHkOYDgAi>=!tO}Qq;O-FI-L&wePVefHV1_FC_H*V?D+5jIX< zK0!f2S{5-WQGQ8o0YUzs8bQLu#KgwJCd0uYrxHD6#bxDfHbiOIaj__(5#SZnx@N#Lbx5axXnQZjN1=7%h-Z0v$U!Xlz# z;&M;q6%>_}RkU?<_4Ex4jVvs!tZi)V?A_cwJiWYqe1qSHynFv4G%PmmQ+z_==PyZF z**Up+`2~eVl~oXEbxmzu{kQLJ?H!$6-900tW8)K(Q$MDcmRDBS);Bh{wvUcaPS4IS zV3$`vjSC5Y@*l(c?~(mtT=joC|n@0BE1N&d&ng(E_AR!hH1s@;- zxVU1?3cUaC^6xqLw+#IMMg~OFKQ!}a*vk-0hu1dIx4vb4TC_hYW zYjV8h>LJ@r6-h^=g<#lBFkss=WbvBH%}7{U z;&33hurq(!zL2@CM2m5)Hd2rJv64ckq`MaIUFylRf@rf_8@LIr*M=nC{__d0f0$Tx_PHq&Uv zd7K+nmy=b$)U;ao5b13}aKeTdW1{&3tXWddN)D*(=Q$7e;|q6;+^;z7&Z94j(b`K& z*}jsaCCG|z)TktW109RP7U16=fVKp64#LcjpGwNz0n8k4+5wM9KUxnt!+V$aEK~^9 zoVd9b%PUe%6Fo=A(_Ol0SvLwdITY>yoy`=Z)C4EbygxpPO+U3WPzdM8iq2udfK?bW zu&% zb`32S8AN2!igu_+}6OB zV?seU{>vgr>0Qq-ytwuy&F!p(OtVzUt>A;#(ywR5@kILeYD8xiJpJ@M^4ma|X8l0o z=JH6V6h%TGezbXYS8lbfM8(dkDJU_q;ABqAD z`7w_COQ>$$WNG3{VBa4N@w}`WgQC4#e{zBfr0miah}eTr7w=CI`zcRw3e8D5pZ3Pr zLT%g)iIq@`i5)C)7f%+TE{ukwH*80YZB@V#p-TH0qo(kFLf$W0{G}ApxCX-N^Z6bo zFvS`;@fx@b8}q!lse-pP+&@brk!Fq+#YTOS+gZ{PY+I>#ue!Vjn(*+MR~*lm2A2W8 zW9;X+xAy*S?cyTuv9XHGF8MXm*_k5NVvfe%${M58b#O#Tvklz{9O<`p8jUShl9G5z z>nFLi*d~fzSq8XI9!Y<+IYN@ubn|^LHzpY4Xj6#xkU3T?y)Wt&Et@2b@7{R^ZjT1Z zCO#~H%L6K^p3H_twUaR4w#fR7-&l;&B@K_LOVTRC#E#UrpyhbC^d4ciB>!Uz7BVzb zIk%Wdv3{Sk5^-&q(H%f%+}Y_=m7|3~xnPS_6%(DY28&)R2hBn0)mLQ7v9KE4D!dPk zTwr5Zl%Cxw(D*Z2UXz{uFgYdtcQmATnqScZr~T_Pa8Klqq~n%JQr%}lnY@v|kQn>W zKc^HG^*{{{?m8_BU_GLqi1*0b64%Y~^6kjmuig@f4-0xj-$3IWD`cXG>ru%k<1f4M|z6(HAKsG3NK!z#kOy&)0*FYV$o2VGH^Bw9i z%`ilckF#AZ;0TTk973;7mkBH7{sLXf?tji;W4HSx2J*x$DBSo}hYu>Pef>Psi1w4J z38MoF|E0H&L=x@*GD)&g;g*)=+oFS_B~y~7nXu0$c1c_ZGkLM|PaF9vBBgmnqsg0$%y6%L_(h$ZIC6XsvZCx~A{APN4A;gwKifUEe7Pb0Z~8kGW@VrKhQs`yfn z96vJLf-0~mgM_Pf5&O`e#$BZ=u?)|eH@~D(zUW0q z312;HXPIi`v038q%RAD&QJQW{oTzUw4*Bs)=iTM&res{>0>J9x5JuMK^nPmDbhk@h z-*G49rE@cBXnZ;D{aVylicV*#McXwt9TeOl7CedTOf@wnM1-pkiFgD5)F`u`ZWKq}JU_3u`cdUv6H23Osf!l*An@bKa~iiGp!IdTIOzS} z>E?~R7h3M#*puA|)8W*|2lqTzkG}$5sVkrZ!;T!%alt~yJIOOU5gcv!)&f%5YR;~G zjmVj=m^XP}`DwdPj2(ESzaxYNJ%o>4bDi`soA5fb@ha_3wMwrJVdyR3-#kq6ZW6Em ztXj`&z|2qo)M@^W7ach{CxDy=nqTOhE`Tn(HqNigfcSv?|Npy1$+ z%E9etGao3WsDaQOMRtVS)k)!8R?f8=9BY!X*_>3oXi8LzZ*<+GC}CcvNlNZ}iv3BV zjVqLLOb)|$03SJ4ZmCJ34e`>(dFIjAe#M*BRVG`BQO{pslG8dKlL@XNt-Uc%lxeRa zZoXRovVuqBt!Iv>ZfLR_4lQF4-oRmUU zRe?WV+yR~>zWh*w>Nt=r@GUlHqku8k2u^-9|FKvu*Fd@;c6*Yft4}gdH&!;VR&V-2 zxmZu(3PvPH+z8Z|OqvGv{rkvlj&CDYQ8#n;5#8gF2UAiy?=Ym#G7IFXXmDY!6$}|_ za_U%b!mSzbHq`(g;x&&7HO@&1B5^+8luPs^@>C(-a1B>m=W+0G69igG)B|IQ}{95qYnn3 zr!2#6cXlCA!>&(Di&bQJr?2hxfG&9Kfr~ci8_MFr6|uX<4dCM)AT$Fv*capj5*Nw8 zNIU_9;16gqKtsh@phZ;k8#UU)K$rwP#FX$30ARQSEZd@F%%{(r4@7L;uHEdU-vQoY zv@V>oZ>GbLNbdlIiZ^u9xOad|{U>*TulHJDNr26to6Y|D4jXrXC8R^I+xzt_aDY4l z3nMnxJ-pzSP2FLcLu4NK!$5JIpW5C0`4)9|0GRA+>N~(=aOfQ%5Fd0T`0t+od(QtY zE&tE%P^7E%RPchy`w>Z1nlG3iEjrHGWcuSy3ORXS$!;*dX*WKEN7*g)qmL>>;?p=h z82e%>2ig*>S;i_@j*4V{si}Ucn`Eice^QR_&Kd_i8zy&vU#cpZKi5q|7>}A{8e38+ zN3VM?CuM%2siJws6_Pyo=6gIVj3fFH_2(e*2RWn2QwE0iAFzu) z4IKfyoDn?bSJt0k*H+C>Xzo8?*E(nk`%vT-%NGxh^k1mU=1t3{@et_n{?Dk(@lQ}! zI!sOy2LZa~gAq%p`7`JuUv$1AIimAK)aW}h4rKc5bv4%PH$Q70V2fCOO>{SoRNjFZ-LBe)U zAd^$BDU~I!Xm3TS?8e8B@$*_q-4i2D_FwA9(}|pyl2aH3kIZ?>){qp^g?cYN z!a#X5I;7hx!C@*sBB4rY>sXpiru00Vfpx%J^DDybrd<&g8pdg?v={fYOP{_9;F{=e zo4PhJxZOxnG@-5A>onTiNjYk{33P1P<;VwZSqcpg{gyfV+t&yy6R=Jzld-che?CPn z-nuxl7cO=os4up$bqSIc9gt!-SE__$Z2K#`p1P=A8Dh}T(jDN_<{hBp&;Np+yTCnQcYv5E zNaC@~_nU$ApDu|g-jrcqmnpx|y#vU-hihlw0Vt~S?WF0dTf%?~1pbwCZq0ZVOnxxB z*I!qkFT#n$T~wFNqb_lD;y@@jGJW&^ee;CIG82CRjicGgw_8U2H#OBuoO#$hr5ar= zb&_is3yk;09H5r=zBI+f#&*bEvxP_MDqGe3Fwvsqlea$HD4(0gcHsrN|cHOs6E>$U-1k9`94avCxSEH!)UY za-hMS#^YS2=SbaTfb;&Jh9+E=aZ}s|r`wLfI(T5(fw-nLUn$*&2Q>=BtE)iJ(Hf|9 z?QZog<*eP$5nyBI3#~4>*g|z>t7rf!OSYqM7v;nRd0)QIAA$VbQV)-(f1>WS^&Mar z1XDG>)^PH?q)}N3kO}TPzVPfc6VQZEsCC4ZDOwa78+e=Vo%dUxa0q>hEW)l>#J^6U zLkS}FblH-nsrhNB3ght2o8m6hQ-8T{=kqUyAjuMH>asB^CDYc~AHL2??_&&hYS!V+ zgYN*XBARC2>H`LXo2RfUh8%nXi?5!PadgxX5#Qgm5+Rj2w`ac@B1PGXGnhxZ1H^hQ zm0hFFfHreWlR>caxZ4f5bwwk}9f0ZJyZJd*^&MbXRhF9g9~54yjfgwIDvIMB;DxEg z9YB>H99cI=c?al1pMig?_kt(LnNkCYH0A$+@`w&Q4Gnuh7`bt*~<* z_&xxB9OGU+3IHd`$M9{PAFR)rlrg{KEoyX$OYar3gbDjG*^GEJY%&9)OLU59zAh=! zrrMCJII0M*1=L)ZUR@?KljW1M^fhr^ESw&T_i*5S_uV)zt&(#&HkVcDd;2^V7qfFR z_(041qrSN5Czsa#z0phIZ87Gk*M1;=)(Blzw=$TxbG}PYa`+|f4UXqzMTNY}h`kx} z;jV(C!n~A1yE=u^Di*t6Wb%6l3qh7p4~++X6ZOQQH6QTiXXmOr9_?nc$ZSMHRg~LR zi#Mk=Mh3xeh3rJncdRE%C)Jd1^h>9(Eb>b#`#r66Nus%pEGC`&@U&-6OCkua_20L* z*o-;GaO)Bqad=!l;gmM9s7%8IC(0f6gQFPdB$AW2M1k+A-fU)<->O-AcQcRjxF0+) zZb=xbfY!dY!EZr|s;rTTU>%B3GE!aeo%1rvY2lH0yVH|$K)TPxc`Zc=N{^3h4(lH( zn@6myVlrHs@gdv$Yq>JmL_EJrzGro3w+4ezn!Gl!?Dx2gE}YOasE zK3&m#rp8;@(wSD-i?Kdc+TP3xd;X6}!mpPiVuYM`o|^BAoDfUN-T{toNv|Uip*szE zZGU6+NCm|fsJ@mQhe}3}1dY9NSt-|uNYXah#G_93eb0z#CMdQcJU<4jI5l;b zTOes-Oz*bI3HTB%qaS3?o@XsQtE!LAZJH$%CFh`e6Dj4LsS{k?@V+XkIhlkttDO5r z7hAmX$}#Ut!M1?09pE_@ z)XOK`_E;cdYNjEruG8VjKw9u<#crCU(LLGk4j|t8R8cb$)tf#Y5F#eVWQpt_w?Jj3e}hr zJV}LzroR(Cud`!#j4w-WZQ~@cGo>2^Qx}AloMDzZlIpBvyo7-e%28pk{|*q3z|f^?YAhUO%y7?JB_VSv<93Fb&7x`Ys&BbSXS6eMbhOLZc9xD-eF&P zV#F^=KYVA$?D!!gi?_7%^e_T)HJ{59%G2$kXS;hWdTN7GzG8a=tgNekcRZ~M{4k+p zsRfQ03RZP?#>s8;JXEOoP7(6NEbzXgJX8h(IX*kv+EiIr{=VDv8o}(=&(v1QQMaiP zb(K+K^OtuC5yyvs(=pij%JzkJ4@&%{U6z7^6s9vyy z=lSDVz^EwdcCBf~i*TKn@eJWj&I^nnPz>~FzufT(u>yOgH6`Mqy;kxEkK;`b*+LvKF={hgtW4Rih-JUBSHGM>}M zewXJ}*B6|26s6VBJUM^+ACZrG(hBCruEUzl{2e_CY_Im_I@0r$`Ax(~lU^%+UW1bp z-2prr(&12}agQ?6A6p~#u?B{Q)3Um0$Z&^}hCs3< zFXI_!<2`H7Q*DcwhoQ`Jb<}cfXXvSh6#sNk|GzAf#SY;hsIb*oEqGdO!L)lP0p307UJBA~816`w%FAchU^neKYUD>X z=r%2yX}y<^;ViaZ&-0gk=n^$-q+h1Dt$#vjm;S>A$Wh1yKKAKLw>z2UZ+LZ&X{pjJ zkXImj&Z~8vv2H|CF3ZksE9kpkl4^X?+A2ZYV9*wX4V44~CneCAO5bOr`wk#I0tdnc zeU3qckd~x7Km_2G8Q0;%?6PfNNHK1V5A|7PyCZjoXI03}FY76t z@x4KhDyv7JJP)==E~=vyiH5uVDEP}Ye|(U>37b7xy#u7%frJpAPQ(Xm&IzWt*$~8g z!0P@Dms_!NiM^-Dgw0+}f>A;;dqoUp2xG^MhaS6+mbch>p~ib?@(-w^hIaLEep6%h zy8#Jwuh^MxMQg|XX9NZfAY60|qwTi?0OSc-4B7jLPV@Zt9An@EmvHoOXs#xVscF`+ zl0U3QsnpgdsRG?~TYoUpKW{Ier9+|eos?VRqPc!D!c^T{Ye>D}V-d>iqcw-iN6~Q8 zhc2Gtv55x!UsFQ!(^9e^mQog{<_-{LCo>OP2U32OIaa?x4DWe^^DPmyhCtA>j|$M~ z_5kl`ERgWeK7=3*YswP=eoOhmq?B#Paz`F;d-D1j-1}8`{!6w)n|~(h#7mKzFsnmO)UI4y>SHd zP;T`i`cW4{ZNnGNhp^8~V>CbZ%pc>}juP`#TRDpsO8PHwNY9VEEp;5gWZheu)?R8b z%A9oefjDSpge|56AO1P3Fp2*^GZCABtT5KUvpB?dVyiKM3n{{@_`afyB>_5*^fGVL zGx{qkIPU5zc)Y2&JeZ;P8j-7t%b)%Hqm`>g1=Zh5&_vm-`Fn%TwA-zYxz>@Cmh#aw_nxEQTRkhKU`38t56#&oDW9I-O3)WRwCD)~ zdbmfSpx82^M5)vl6=do@E(!;r1Bf8cFvM0smw*}2l)m~MV5AF}4R-6e0~{kEoc?lf z>K!0AdQ$Q4!mQ(M~`_LuoLX|CEg7Y3lD{87Pn;Kt}KB{Xpv+n59C!b?{l}&vM zy5BPdiC`anEPP<+2m(1JgR3YSzThg8Go~SuBA~k~H1<*cjn`jV?ATmKjh)w~l1@6R zktSx86vxX%T4s-m>129qUTlXGa@D#RTmhPS_Vp|BZ{K9I^fpc4x;n0sB8m-cr2(+l05oD#U_q`7uZRu~)6UlY|o z>VJR8miVu2g!`AWYVwyX)2~b_&r=ihJXDq(AYdJ9sMrLMKMbUmca@pB8vG!wfoJHS zHg0OKJo8@(N2Vno?ZDB;`bmVyC1PhfdZHjLePCQ$+*eg4fhu(V8jWzazIy9%g z>a^x4ZZsd%JCOzK(&n2F)KgPx!23rn-h8l&>gT{)w&(^2bbjyX)eY+HOIvz9(Oh32 z|H>*ZEBxBmFKK5dNh?C7MpenS@!autUPkB3U=|LSCdX38cRHyWwJol`i<)_t^iZClN=J?n^tqa-di1p6n zJRgmT+tzK4f6t$(pKk6`7Pob=>)g9n=qghMO4_*~mSiiKMUoCF6!xH&!K%Fj^xC$B zbS?#g@Qk*ApO4JaT?ma~!D?!g=T!B0myUQx^tVbkK)0u3rBFv{${A&p@2p*C7`4)G z^&Ow&G-d5vrsyPpJa^8U52Tz0|Xov!TL#=3`^p zJ%tT4` z<)c+~$(ajM@PsIlo7W0Yweb|GL7hh>I0_@6d)xd#aR;@Bi2_b7RT`Jppp7EDb(yXO zV6q>M8Hao|{%w_en(c~PWXAO}L<##I-2-pBi^6YXE>WDRd)e?7{N=*J-X!+Pagg~N z;?gnFi3(F=rAj&WcZz0jb;dil+F%-O=06As1XCaN{@~jpN?p04S%BB?a`;BC`8u5Q zl?**TKQ6H)7uSxzS!a4mb+XUdQ^BYNgVFeCK{z6Ycbavr+ z8FOXbSmH9?duI;~-0c|q#0a-jA`7QEP1IXN8Hpr)wdC~$4A(O&;U zG?^Cok+$(fX(#wXs&>b;$t_Lf4iJq~a~tA*aLf6wUi?>S^4#KW%v?KCuVV`bd{V%L z)cfUE94UvX2u@3{TK^TKW>*Gc6@6+*b(H@4W$w{O|K<(lA*4%ZZpq%Ls}_ zj?8~Sed;36^%CfV+RJc8RxEt+VD4*ND_tzhb&13nG%?peEpK-IiSTX6Lf>1Pc$R|B z+~=baPez6hu0Uq*x(l`Kn)DXR&bB2?hnFVZzc}PcC|_nrdqi|95bJ+4%DDmuQ_ijI zy^=5HIzG0q+3C&Y57)pLx~LA(^Y{D~gSqf4zvCuHVeHn)uYLnTaAQBW%?H z`^qnAeWrs{f3^Be=A8Wl7A}c&W-`2en*)m4(N9pN0S^lt%CQ~R5-E;$U+Y5p8t#e2 z;)^|}_lXSYon=d!I|1Ay?hTX)rjMspb0ZpSW4;r>79WVa?P&baNKTl3U;Um(=#+PA zcKd3+rO>Ej;gP7@WWv@vs$Bp0BsN~G0Cx{#O)CAE`>U=r&>hh z3l*f&73qSUHr`Vx*rm`&J4(ajbxqCSv2a+tIN#g)>%onM;eZ$NM7puR6$=x80srXr z$UmrNGMOn);`Dkkb5ttM&aSOXw|JGP?vye%`4~$g*uw^Uk5XZT?qxWy&XvAc;T<63 zwe0YLIP+d(LZp9ZU(1(L%9-8>0mFBOIb8X(Z%7$$RnKMQ;J&(e)Wz~y#@kAm{^I)_ zZz5}Pf@F*undRB)o&D4MZQ<75^-jFHBr|1kublqh*oq_mHqbj*uLMX zE_%yflUh@R5dWo!{{zI)f09i|1Kta8S>c6q;?b7VN{Y?DBfW#T-@E#1X=3Kl=#1le zjtVL5aS#2)%<;-e(Oa>#mn=f}W(VGs-smpNfjli78M^zMl9VHA2x%Lqr2Jd(d^dT7 zcGSc@YizmAdr^GCIE%17SVoGbV6TlAW;$N#j_xvG+J6~Xm+={F26fcm@5ohwI%V$X z?1Ibk{d}tIb+$&T(i^86s;Zlsm3o>+1fB7x+qvdS^fjX?#CUZpb`|bjX_vZ7(JF^` zJLXTfmRr__I{`q@iSz=4Idys<^Xo-NV04BiK&=o!_;D6{_iU!G$E;*5x?8!;W7sqn*@ z?1=*cb>elFS5R|X%+0b2nb~Tn>wOW)DSEQMh1&bCYr>0GuruGebqxvy*K0h00kt8i z&(vS-z} z^n53ZdfxA&n_Cttct8cSxmHyjojzYyt#fj+7$XnQjYp}*ma~-yuv;Diy0&GdX!Rye zZ*f_3T2kW%K-^4`ED6e#s7sni){BH+^S6AVkx)zS)62G@eM=TQJv|IGLnSn&Q#y*} z8{^`sZJebD^%-qp5**3C`jPw_zvvLpn-$xI6YN^XwXIkj+Z%DmE9snG`nm`xTav_a zuG%5JMsj4TvU%ct&sTs9tma=ppk>Mb88`jSJGZQcAi_)NQ_t4>Ad0C zB3@(ZjdZY`o}Ko&S5;b)r*X@Wdz^<3dScGe@ny~9c8g~j72TqRwpnju z^<-z9b`e0~UeIWHJW-Zw3^cNw5K;ZSO2xY!@#B4M2^ZopxJH^3RvYT~ z*Xc~8JaF)q;$zC|`OeEDQx)-Uiu#mdfZ|Bewy$nNh4n<@5a+On#kfX+RD{Pj``OB= zl~j=c+k?wQRe5wgmFT$c4-(E0Y;$?urpO$)dbyx<#2=n>~ zVP}6+lH%V^xcl4fp_u;_z7_oDuX3}1O$7G)RapCP!M+Vi=Q{u$rStXut%WN?9Cwdj z7^c6)_J2EaDt!1Qv$ndsss4qdLw2F(3gaffeKsi@YnBZ&S)Bkf&La;Qa^-?chGnu< zasN-2hoFs;baj_~Jg66+92GvNCPR)rZ&5ZtUuG$lR=pYmNgB~h+&%m(5YD~3_St1N z4#B&p3lxe-TsJ6=!g1GA(gyC+0)2cDxwJol-M`O4{@;Owq|2!pS*}Q=tt1a3BsqmK zO6^gGIuA^GW>dwNn8X29W~0~q^V^QXQ_6y+>tr9JF%P2D&mJZhJu3XxPi7g9`{Sj{ zJGo7Hw~>OCUiIvY(u+HQ@zi_?QC*R?*&h60r!H{?M;w^SC4@x}PtTW`e$Ln|eSCXw zZvhz!X;PpzR;_Lcgzvj!V`T12XRHPgyViDrAg`&te5i~}F6uc)zGMlv3CwI9a1Z3z z+?<#WUo7>GJG|<>aNF2K+)AJ-t5UFqaPz8}WaLUlB=enIMeFIb&rPveVk5^q`h$e{{{{gD)0}fe5yJ8gcELf^4+XI02hlyRH{i&k=c3@KHsA7LL5}3% zzD&=@Ogn&g96Dt$WA8a} zmvAisCp0hKiM*MSQE8BWy3$voW7V^-H7dmFAz>~qSzI{73F6;3U0U5j%b!9<8KvYD0cES z=M*&LB1?sASj)U8vML)zitYdouz-Qn^m%c|Prjl{Et*EqPg%>;))ld_TBZp4E9coC zfq4GuXh%TuQ8DPn7LNZ~$>$`p@_q;&zfP2kRN!3PX^E-~$-F5nN#cc#ll#Yy-6dT6 zBwAK>FC$W?z?&0mFpoGoR8{}xWZM8+<*>pb)O8pCs|?tJH!27mjTZ9VA*%E`((*XjES&YOuqo-qnI? zWfmFJ`}F>sa8LFZ zbtldEWr#V~LO+~@P5#?`e!aNyn{#t!*et?+WFj1dzb_K_E9yTmGivv@uS(| zjjQ@AYE--BQV?Dhe^plTynzrY2HW zoQx7{5?#CK1L=bB=)#N!nn!7tMFYBhxN@Sg6|3Sqsqhzar8|HH!!O3xs!2K96xULG z0%(G*E^b6W6ap;jCnso&=I7-$Afe(6*@ zR{ha}hzbD_PDyC({}Wr2_r|<%b`wt0)C^&*%xgbkdb~qWRRjs~d#hy-bq-7&Ib(Bt zt({{q0|WCsX!owXtoXdhR2A(h)yAt%m2Hnw;5FT2nMx?fk?`(q7@mkV z-$fnSfcF)}8rO#EAdxLrF3ebKBr|N^5WE)K{yZXa_k?v;=h%yXMG$F0$wZ-p7i)13 z`xWM1(s8k>If320*x}%LLSjC2uPxDacDA!XfLxUw!-4GGP#8d`dl4yS?5c?fX<&zj z9a?j!JJ6C>Tdx;hnsHw-nM1hEgo?)GjlQHnHXa_#>AUK}x+FfHu*T&U{bgzk2`7kR zA}dfdio6y4!Er54b9Ck@d~)>2sapME=k#*tf#itz;_aZ5_@wi%3Op`1>U*JQFLQ}J zN_fnrIk9|TJD~AIqwOS09SCl02yP9u3^npsaKqF z3_PHH-Xsl~Nuk?71NUxBuC`DT+78$g`Z~c)rJWD!Chg97x2o-c=$_o_#<9v$YKB>1 z9}qUQ)6yhobvCyFfoTeUS0nnT_{flA4>H1)Iu5Z`-pkj&-=va824bJgdkQ}6s%Y+H ze_WHf3ltw4R}!sK(RN?Yc|S?NY@iTa`~wy{^?5v3)$dSxr(+gfdL=R-zAnQ|*)QkG zNvzFdi$p0el&Hf(p;9pqN0Q^#itEj=LaF4U^skE(wf4iY;S+<$)&@!ZQG()S{iK^E zGWYDc3camIl-)MknxA|xgMHr2kWK`6s+b_h=f!2NG^|J5=y#K%7nRc;kWs>Pjgxy(}orTMNmJgsT;B zAU3WsL*vQD1FuOfZGHX3RZO1t;tpB?7H5*npOV9kKUQr8AWoKxTq3JzX~x#? zY~2AcoWD%iRmc3;I+UZXH(nOP4xI|bFuUbTs*xvRwiMn+2piu&1FZa6n^r~!;5a`Q zhFL}GPrLWO%{oS3(SkG%ON}y1rtSc?TiP>GXP@WDPhWJE02OvpIeNsi?fI)#ox1nU ztn{@?P}s=m!h<-XM}9Q2l-&Vd_Gz4(o{*NidL!g643Xl;18?#mA<*D@i|0lj=^EUp zJ#|r%PPWyldDuSPAp^R9d04(pF1E5jP8;FzEGa&2T9M0Jbh{?%43<2m%SHRlR9~U0 z*(3nY?22I#skrFMz9*+9!9S{IiWr2_i+{rIL;BA){}^YML)a9g`rbVFu^*HoWtcn` zP@>*C7K@G{Hx|AUHWXbO5&oRCb((CEa7JiNFQJ)fw9#!TeUFPr12_}Py;fHj^~{<- zK?iq=4=0ldP#E_Wg|QHDTB@yIG3vsfDOqaYA=H1E4t1O78)JqJPxMT} zWjruQh5~&kYpu_}9?UR+Hz~wzlZ=dla!(SiHZ`0_i3h&DZ<`m4-LZ3&QMW)Ame%uV zb?*OgL!|Hc>~`x%+50mnBA4fY&9A&3M7rhYe|_8fj|9$stShl4sxS+|Z zTO4Qe6O)<&-n4+eqry+0^uTlVJEK>-qV3(AWea(!AnZhGdK!aVzeuh1I6H&im8Sfk>}i4hJ5^(jTwPY?MByh2=DAH|%d-K~nAjo>)ztLU-#$48=0Mq+d1=0-h|N*jdL@<^QYC4IT9F+N~D)L9+wH}1S?*y*Nd;+=vqmo z9D2v8Kc3W+BkYh_gV46g;ASHNBF57J*}$TyP9GcgUhH7H;L)ioTKk=XIFq9va2x7} zqvx)30jyAYB0U9!zW=8LaQ+y0z?xNMHF;X=0TX0t&@Al7|3Y`aZI@b^?zy{^(tv0N z4m*c{!4Z0D#_ebGZc)E{zHiaSsuB%ooU#VvQ2@eGs8Mm9RSaCOFyazW?AA|(cd0S zB`-62T?+Si%Lh{yYcOip*jQDw47{bG#?y|{H0oMb8lMz>ey^)tou)W%j(cLEhy!0) zMR*+uSv@8-OhbG1)ZCC z(QZs`(?Y|a1@SenT)~r7Y=cCHQSHJuCHW{ZPXAV{qUb5 z(tnINJA|}3SOA)FhdKdiKg!r4(4&W`E9uyXLy~sBerB`EUR?cszQKH-Dl+U62XNfwoH>&s)240Co6|pfRJsUD zPm;*^tjA1Z?EAS)H2D5)@bE`)pMUEf_%XqTzVWG-wZ{G8yCp>pPYK+pg0EdZM`duSO|MUpapDISNUp_sx_x%u zwwLx(&Vv%`cWaO4=*;JYC4!A0dRlpxpR)GW5RVo$?YGc$t?f2HQRFI}HU`FW4aE2n z`ObwtB*j2oDdOGFPd`>O7-=-kH=R0YsSVa>2o3ilxgbDgT>%S>>BaAVd=b#P9*Fs< zD^@1{{W1rS{$YG|Md59hRm~Gt6ntbzq~f(=K3Q>ueHgBgg`cDI#I^r!sAh0dJe$$= z!fB0pERV5w;*-YOW3Dq*L*@?8Pz)Ijqfx|JGzC4HPNB%xY}v`gP~ndu1I$!P0`Q@Y z!z}~0j2#-%#@G{8eEWpmhNrF(Rdja%%W!nKbyR?wV65Ku*`=Kg3~M&*BH8JdBwfm> zg|IfO;uQ2?dOM84ZPg5y22)yZ@U>0ZXu*Lo(PNhspX~At3{2NvL<&56=$|hCZ+%bk zS98~Y|05p%fuer@Q+B^1w)SUq)jv?5+^FTf`<$c9$P$BQP?f@rsnX^n+KD>V9Vx%> zBC6gOiKM<-y!?`!ULAF-l>~CwzW%@=RyzD3es&YKo*tKabDpl(ClRyEh^HWd^Mq=` z1be>(g?@XoO~JPCHrlK8jG7my2`(MhO!NMv+(WSFbmcqA0$TD*6+Z$)$hh)q;%hYW zX72z+T(_%m1p6mFPCZ}hPWNQuhWZuV+vh@!A9>~X75F02L=>vQ$y%m7*{7iOx5b)z zx`ZEh9Q6@V^M{@geVHsZ8|fGUSiq%|$nz`>dIrW#MNPdu^rrKi>ldTVg9CQ}j)48t zs!pTsu@s~5#nJ0s`@(OC{-_05mlDUh`INM}u*B!dy8vX^l!L0JPK*3WOmngtd37Z*>%%AG3{@f!-lxq`9v4}H;SFFkQ{ zqItz>a%`yd-u@=kp@dOrYWm2^PH4QZnCL-ftTNRi`}dbAYC80{OjepV=iN6?yMX3P7--bng9%{ITz4iyhZ_qtXXRHF(->7;L)gbIh6IsT+%1#fp6l3VDUWzY6iW3_Txf7W>Rx}p!2UFJZYBdm1^LOl}w^8Alfr1nx z^@y)~5iS@Yo#YBfJndtLCrC|XKexH&)XsEa^iT+vm1K6}XXmUl2RXY@?+IIiT;u3# zdMO@B7h)GQq*_7@j;@AD3Jbjb`)BNVz_$HwB!kh7Ew*L1TQ+t|q~cZm#Pa6nK|Y^u zb0w~o1QV|9E~xv7AFDY(egD){MRilJze7)JR;2t)dJaf{M?IzNL6$bLKha@PjkUEh zLltq0(Q;fA!PaZZru)_*S-gl11v&E#no<{IuG_Kqj9iqiF&>nJxms{r;J%0z&(4)j z2SMUDuueYNje4it-xEIk*AD~!Yk&6`GCwbn?^Ht0pL&*VrmIn@;{R*!x}%!RyL1o~ z=_*nL0s=vlCdAM|iV~WDU;t^NMi2-gK#&#$#EuY}fS~j)ZO{NAAXR#k8VJ4jK%`1@ zGiUeP-I;Iqn>lml%$eQeU++2h@rqzODKz}uT4+! zH!rJcoG6#=pFseRZl|Y*mB{a^Tv0W~%u(u5OTK)u<#VO3%T1^BEy{vTfkgvUsUfN2 za;>X59a$9np@r_z7_m!F)h230i1`ODuI%1^H?@JGt1s+v-5^`(fn&o{eYBf{hYy!h zN=nyeI-h6Yo{76<18=`+XHlDCQap45VLT(ebsuD4cYTR+jV0SiUWJo0Kkez?Rs z_2oNY_vwHiW?~x|HKOZyehJtn@x=el0m8_ri+Bx>Mxmuk?$nn9Te5oDo8F@xe4{(} z{SZRxteVBCC(e7nge2%0RF)t%$xN(KP6@DB{j6QivlQr4cWNKqmff2PSPP9PCEscG zK^1eOXVx;o^G(<5I_TO_PsQaaI{yc;c2w$tz;h%VmhK@LJ+aVvX%iY*=fl zZ?PEq?Gu-z#Lljj?Sbb!;lc0nh%rljsBLTV`_QeAnT~-6C5NN52gLAWY8t&naEx!& z3zdex;_Ma4G~UEk1HYdHR;olxHR27xJ+*S{LKcoj?&NBp59;0Sn82OvO;aIp~|yK0_jVKkTQ`0KML6GQ%BQ6k3AdcNseJ5 z7`-c9#N$m4``|bz*!qL{m5_Hflglmr@edP%A`nPEi}C1NM!nm)5aX+G?p!Sky7cK) zQO~Td*{uKxbRoT!zoxcimgp|7&da#_S+={sEU($TYab~R} zXvD)tZLLJ(-~us5=^as0GBXeP<<-nGF%v)!UCM9YNQ`Sut$2xZV1z zq#0jd#6nI|-vTayDgxkt6g^?YR%*l3y7q)IWJkF!pQuy3&&AqHT*m-gllS#?i@Hi zufE0YLNM_^MP(Na=28_uOyXNiVLb&dumXL}OJPnzebF1=P#_%P5!{(NnTgfco!+h8 zmS`CjrcBrrUZiSIMp*2gp`TcN+PPmRvyo_YDE9U{fN{GHPA2J9k%!fG`ty@sK=y{x zOI}X1U-nFEs8cv|7%jXMfA2;G_}1r0K~hU2H$=kMZPwIdlRog{j7RZ?Ox1+#Je+(` z6fja2K*?EA;mWeObp_2*f4K8b!K_R~mp=`kU+3jq)F^CyHRo)Lj4Lf0$}$3k)p#>< zGvEW-^5I>SAzUq21JR-sd_EiU^E3Bf^?JwgR;ntauCOLWl@w~|5Yh}V6TQ8tOG}%w z2z9FmI2yxb4eVQ?SosQh?(k@>Ad>Dt(l*>oEkifcDaNus_}0QUlWKWn3YEObdi?Cy z<5Ui#P@v5IAL}sqr?&0igE;t%r@zIu@2uL<{5F}z*=%AmsXg0^z`Q*mt`!UwifLdjD z9mf-6!{A#=9P;n2F1yiZP8u01Gy!g#+h_q?4`oX!M@S;ggc5v@P;Xu)LLUjXkn+Qo zg}!9|z&C!xkUg9YxMY6F{!pA+YwX-VIT}EDKTrK?^e;_n6Y||vb3|$zTMdvp;fALG z8w{BHwK1J4vpl9N@O7ymU>r-;+6*<<)q%pojMe$t_%C-!lh)oY*slNQBD2hw8JRDW7j|LpkQ_TCkWkLdc|1i!E!y^v{i?KIp2;6Q zx&N2jWXCq{@>+i))aS)@af}M4j=GrM)r?fh3)CxN^Apfp6nmm%a;0!&v6h-ihpsro z8ySTixdj^A)xwjRt9A@F3cqa<)z-9qr~={HKVN)3Y9-qS{T)E(m9+0fP&!Xb#k7BA z4g2M?+_cm{#m-;aer)w09{itt<+C4C7^77V?4AJ3|@UGE`lWY)d%AYW?A@RR1& zdO<&9z0TZO>i8=4vs*%eitRwrE?UmG z z`MuN0MNYvONzHSi_JV=T+|C0U)lEYoflwLs6W-^(F3xEaN-OY~IEjm7Dl;OF&YLL}P0%FRU#zPeOIu73&I>)ZWJC|4{qE5~wv*QP%M zBN~fDu@&NoU6C2da% zP7d$Lrj_AKdb81>EUt`5V%e#K_7F|0DocCwMfcPz@Qwi(&RUT5!7%wY?2Zqg*+}P8 zTzR|6?CvCRcJ}jtJ_7*SbxL=fd73P&xM1vX$X?QYv3qlMlWFfo@}6w@t(L;+t2!d) zPS|pD&ujM{vTQ4g0muFioYU_3vYvV2=b8t+|Zg`Vgj!EPFYu4Lm zS;hO7;>y=9C;0fFg>A;P?|Uf?+c;8MQW>VXzr8^YU&7rx2AB%>NoHn&jV*rIX}oT5 z>#)P%#5Yfw8bzJ1$dPkW^9oVu%hH3~eDHiPOVc4I9@Xfj%m#cZ)+DXSj$R-i?lj8y zQ(vLW9Hw>8VKo9?Sz7X1!pkWQa2Xcybw54FMEe*4q7}+`8QP< z;~O3kAwPGd9cw+ku0Lnt_>tS?zNsN&r8#4I&=`1=Z%aW=DJH`w%~45z(frHX5l@6- z+>Fp7>9nrMiHZcTiPJ80Yr`^2HXX$iJF;=hWPhTU9md|aiGX{+no`Y-x3fDt?y4~_ z7P~KKiQ3X0@bALRJuF-qb6M9WU8W-Rs`BtNjjbVYKKN-GuBUeRlKu*fR9!E(2dFy` zNt8(2GhGf)h>Yf3ObtEV^f(rgllO^Tv$RVH|8)*uh9|^!;0a~7yRYpqGE@TqrvU%} zO?UBczs(n@O`c}gN~?QswaB&AV>Ln&%J@XBHSo`hAZy&pXYMLa#V?D~e;(zwWgpT; z%H?Mds$&bfNZf&*=H@0|Y5Zq2xEm`SXfIw53OKPdxvm>!QuXm;QBO)7XUjy&ed==w zO2kL-DjYzGbZ3D(kYZ9MXV+$v9%kFr@GhWjMs)9AQr`xHa!w^QX)v|Q074>_&5gXq zcgTqh-vK4X4ydV8x$awaAcO>kom%^T>kS%z9|U&w$r|=nHbU5d#Tm%BW=K;A=f^?9 zbDbud@KrNr);`g*Uul-qS=J0z`bM8Z%wyTmvZ0&pu_S?2y&}D)ywN@NPP}YaA5cKI z_{#>~V^8|<8V0+-i&mDAXlpFz1Vt@6ZT_z!A^+e#|J{xpjXNJ3BX-Qb-URhb;~5-c z1=A#SN2J4CICz1&oh8b_%i@nHS8b#g0B%Pc(I-N9UiM!tag zOp0e1>OK+;DbFjXwXtPz_O@Hcu)C_3+*E_VKyk@W_^9hF{K zP*#thtZYH<#H$yOoP5a@o?WXaiOWVHBPnV97?D@N&*Sgzt*i4=r@VAYieh=+zMKHl zDNp0#;0EIytTUb|ihMHk@j-0dq}D0kRaVt}QmgY%(Q(L35A@l)8iI1kyxMJ6^rAo( z_seo?tHHAA6plR4_@{Fua~|}S_e>avsjtNIl;=m2Tmvc?-(l4`dWT3(mR?~OSD^`# z?J*fh#7TUxPW7WyFnKhu=7n;D+_PkcRLZkxRLVjrF+SK3_4t5U)VjWQv56!5Vp{~?RuIpLY^{Pt3)d;;IpCv&UQ2sXl z3~d4CI=x+n)0Wamcty|L`km1xT`1J-Mo?ZJjS1Ubn%v^f9k@YxPl1X$(bxy^;C-_j oeH-h1n_Hl_ihF*|EP?Si)7PKPR{F8upUeLG;m?2V&+mi(2IpsQ9RL6T literal 0 HcmV?d00001 diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.txt b/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.txt new file mode 100644 index 000000000..b03ac20a0 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.txt @@ -0,0 +1,29 @@ +=== CUDA === +4.605500 4.604200 4.575900 4.704500 4.634000 4.522500 4.800900 4.807600 4.625900 4.705800 +4.870400 4.584700 4.773200 5.285000 5.075000 4.986000 4.843400 5.315900 4.963500 5.185100 +4.813800 4.679400 4.381800 4.726500 5.508500 5.427500 4.998200 4.788300 5.352800 4.815200 +5.005500 4.868500 5.653600 5.161500 5.161500 5.158900 4.890200 5.703200 4.891300 4.804300 +5.163300 4.468000 4.668200 5.157900 4.791100 5.295500 4.785100 5.287600 4.769000 5.002200 +4.607700 4.756000 4.464200 4.796000 4.427400 4.580400 4.422300 4.376400 4.660700 4.478500 +4.610200 4.409400 4.520500 4.131200 4.373200 4.561000 4.492000 4.512700 4.431700 4.509900 +4.408100 4.576500 4.327800 4.515700 4.364100 4.435900 4.538600 4.352800 4.364800 4.448300 +4.398600 4.398600 4.390100 4.132400 4.325700 4.314900 4.226500 4.415600 4.375900 4.281100 +4.583200 4.348900 4.366900 4.351000 4.382300 4.401900 4.349200 4.420900 4.401300 4.013700 + +=== SDAA === +4.605500 4.605100 4.602800 4.696800 4.589000 4.522900 4.831200 4.835000 4.672600 4.792900 +4.908600 4.605800 4.800500 5.228600 4.969200 4.998500 4.902800 5.385300 4.973600 5.261600 +4.767300 4.740800 4.446100 4.766200 5.479500 5.351100 4.987500 4.836500 5.347800 4.788500 +5.073200 4.795000 5.593800 5.038200 5.095500 5.117600 4.916400 5.822300 4.931600 4.759500 +5.274200 4.597200 4.595800 5.024100 4.709400 5.352800 4.876000 5.402900 4.869000 5.047900 +4.653400 4.883700 4.440900 4.846500 4.612600 4.783900 4.461900 4.499700 4.517700 4.536100 +4.661900 4.476400 4.500400 4.225600 4.343800 4.452300 4.432100 4.423600 4.391900 4.565900 +4.413300 4.627400 4.254300 4.585600 4.431200 4.431100 4.494100 4.345000 4.356800 4.368900 +4.397400 4.424700 4.323700 4.229000 4.253100 4.351700 4.308600 4.452100 4.401300 4.298300 +4.579000 4.402500 4.454300 4.313800 4.367200 4.409500 4.298600 4.480800 4.438000 4.073200 + +=== RESULT === +MeanRelativeError: 0.0033275151217559127 +MeanAbsoluteError: 0.014867000000000035 +Rule,mean_relative_error 0.0033275151217559127 +pass mean_relative_error=0.0033275151217559127 <= 0.05 or mean_absolute_error=0.014867000000000035 <= 0.0002 diff --git a/PyTorch/build-in/Classification/Sequencer2D/utils/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/utils/__init__.py new file mode 100644 index 000000000..f04000a2f --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/utils/__init__.py @@ -0,0 +1 @@ +from .timm import * \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/utils/helpers.py b/PyTorch/build-in/Classification/Sequencer2D/utils/helpers.py new file mode 100644 index 000000000..88f5dadbe --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/utils/helpers.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +from itertools import repeat +import functools +import collections + +import torch +from torch import nn + + +def rsetattr(obj, attr, val): + pre, _, post = attr.rpartition('.') + return setattr(rgetattr(obj, pre) if pre else obj, post, val) + + +def rgetattr(obj, attr, *args): + def _getattr(obj, attr): + return getattr(obj, attr, *args) + + return functools.reduce(_getattr, [obj] + attr.split('.')) + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def train_rnn(model): + for m in model.children(): + if isinstance(m, (nn.LSTM, nn.GRU, nn.RNN)): + m.train() + else: + train_rnn(m) + + +def normalize_fn(tensor, mean, std): + mean = mean[None, :, None, None] + std = std[None, :, None, None] + return tensor.sub(mean).div(std) + + +class NormalizeByChannelMeanStd(nn.Module): + def __init__(self, mean, std): + super(NormalizeByChannelMeanStd, self).__init__() + if not isinstance(mean, torch.Tensor): + mean = torch.tensor(mean) + if not isinstance(std, torch.Tensor): + std = torch.tensor(std) + self.register_buffer("mean", mean) + self.register_buffer("std", std) + + def forward(self, tensor): + return normalize_fn(tensor, self.mean, self.std) + + def extra_repr(self): + return 'mean={}, std={}'.format(self.mean, self.std) + +class WithNone: + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, traceback): + pass diff --git a/PyTorch/build-in/Classification/Sequencer2D/utils/timm/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/utils/timm/__init__.py new file mode 100644 index 000000000..269d3443c --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/utils/timm/__init__.py @@ -0,0 +1,2 @@ +from .checkpoint_saver import * +from .summary import * \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/utils/timm/checkpoint_saver.py b/PyTorch/build-in/Classification/Sequencer2D/utils/timm/checkpoint_saver.py new file mode 100644 index 000000000..ea657b0a0 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/utils/timm/checkpoint_saver.py @@ -0,0 +1,163 @@ + +""" Checkpoint Saver + +Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import glob +import operator +import os +import logging + +import torch + +from timm.utils.model import unwrap_model, get_state_dict + + +_logger = logging.getLogger(__name__) + + +class CheckpointSaver: + def __init__( + self, + model, + optimizer, + args=None, + model_ema=None, + amp_scaler=None, + checkpoint_prefix='checkpoint', + recovery_prefix='recovery', + checkpoint_dir='', + recovery_dir='', + decreasing=False, + max_history=10, + unwrap_fn=unwrap_model, + log_clearml=False, + log_s3=False, + ): + + # objects to save state_dicts of + self.model = model + self.optimizer = optimizer + self.args = args + self.model_ema = model_ema + self.amp_scaler = amp_scaler + + # state + self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness + self.best_epoch = None + self.best_metric = None + self.curr_recovery_file = '' + self.last_recovery_file = '' + + # config + self.checkpoint_dir = checkpoint_dir + self.recovery_dir = recovery_dir + self.save_prefix = checkpoint_prefix + self.recovery_prefix = recovery_prefix + self.extension = '.pth.tar' + self.decreasing = decreasing # a lower metric is better if True + self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs + self.max_history = max_history + self.unwrap_fn = unwrap_fn + self.log_s3 = log_clearml and log_s3 + assert self.max_history >= 1 + + if self.log_s3: + from clearml import Task + self.task = Task.current_task() + + def save_checkpoint(self, epoch, metric=None): + assert epoch >= 0 + tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) + last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) + self._save(tmp_save_path, epoch, metric) + if os.path.exists(last_save_path): + os.unlink(last_save_path) # required for Windows support. + os.rename(tmp_save_path, last_save_path) + worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None + if (len(self.checkpoint_files) < self.max_history + or metric is None or self.cmp(metric, worst_file[1])): + if len(self.checkpoint_files) >= self.max_history: + self._cleanup_checkpoints(1) + filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension + save_path = os.path.join(self.checkpoint_dir, filename) + os.link(last_save_path, save_path) + self.checkpoint_files.append((save_path, metric)) + self.checkpoint_files = sorted( + self.checkpoint_files, key=lambda x: x[1], + reverse=not self.decreasing) # sort in descending order if a lower metric is not better + + checkpoints_str = "Current checkpoints:\n" + for c in self.checkpoint_files: + checkpoints_str += ' {}\n'.format(c) + _logger.info(checkpoints_str) + + if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): + self.best_epoch = epoch + self.best_metric = metric + best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) + if os.path.exists(best_save_path): + os.unlink(best_save_path) + os.link(last_save_path, best_save_path) + if self.log_s3: + self.task.update_output_model(best_save_path) + if self.log_s3: + self.task.update_output_model(last_save_path) + + return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) + + def _save(self, save_path, epoch, metric=None): + save_state = { + 'epoch': epoch, + 'arch': type(self.model).__name__.lower(), + 'state_dict': get_state_dict(self.model, self.unwrap_fn), + 'optimizer': self.optimizer.state_dict(), + 'version': 2, # version < 2 increments epoch before save + } + if self.args is not None: + save_state['arch'] = self.args.model + save_state['args'] = self.args + if self.amp_scaler is not None: + save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() + if self.model_ema is not None: + save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) + if metric is not None: + save_state['metric'] = metric + torch.save(save_state, save_path) + + def _cleanup_checkpoints(self, trim=0): + trim = min(len(self.checkpoint_files), trim) + delete_index = self.max_history - trim + if delete_index < 0 or len(self.checkpoint_files) <= delete_index: + return + to_delete = self.checkpoint_files[delete_index:] + for d in to_delete: + try: + _logger.debug("Cleaning checkpoint: {}".format(d)) + os.remove(d[0]) + except Exception as e: + _logger.error("Exception '{}' while deleting checkpoint".format(e)) + self.checkpoint_files = self.checkpoint_files[:delete_index] + + def save_recovery(self, epoch, batch_idx=0): + assert epoch >= 0 + filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension + save_path = os.path.join(self.recovery_dir, filename) + self._save(save_path, epoch) + if os.path.exists(self.last_recovery_file): + try: + _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) + os.remove(self.last_recovery_file) + except Exception as e: + _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) + self.last_recovery_file = self.curr_recovery_file + self.curr_recovery_file = save_path + + def find_recovery(self): + recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) + files = glob.glob(recovery_path + '*' + self.extension) + files = sorted(files) + return files[0] if len(files) else '' diff --git a/PyTorch/build-in/Classification/Sequencer2D/utils/timm/dataset_factory.py b/PyTorch/build-in/Classification/Sequencer2D/utils/timm/dataset_factory.py new file mode 100644 index 000000000..dbac8b6b3 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/utils/timm/dataset_factory.py @@ -0,0 +1,158 @@ +""" Dataset Factory + +Hacked together by / Copyright 2021, Ross Wightman +""" +import os + +from timm.data import IterableImageDataset, ImageDataset +from torchvision.datasets import CIFAR100, CIFAR10, MNIST, QMNIST, KMNIST, FashionMNIST, ImageNet, ImageFolder + +try: + from torchvision.datasets import Places365 + has_places365 = True +except ImportError: + has_places365 = False +try: + from torchvision.datasets import INaturalist + has_inaturalist = True +except ImportError: + has_inaturalist = False + + +from datasets import Flowers102, StanfordCars + +_TORCH_BASIC_DS = dict( + cifar10=CIFAR10, + cifar100=CIFAR100, + mnist=MNIST, + qmist=QMNIST, + kmnist=KMNIST, + fashion_mnist=FashionMNIST, +) +_TRAIN_SYNONYM = {'train', 'training'} +_EVAL_SYNONYM = {'val', 'valid', 'validation', 'eval', 'evaluation'} + + +def _search_split(root, split): + # look for sub-folder with name of split in root and use that if it exists + split_name = split.split('[')[0] + try_root = os.path.join(root, split_name) + if os.path.exists(try_root): + return try_root + + def _try(syn): + for s in syn: + try_root = os.path.join(root, s) + if os.path.exists(try_root): + return try_root + return root + if split_name in _TRAIN_SYNONYM: + root = _try(_TRAIN_SYNONYM) + elif split_name in _EVAL_SYNONYM: + root = _try(_EVAL_SYNONYM) + return root + + +def create_dataset( + name, + root, + split='validation', + search_split=True, + class_map=None, + load_bytes=False, + is_training=False, + download=False, + batch_size=None, + repeats=0, + **kwargs +): + """ Dataset factory method + + In parenthesis after each arg are the type of dataset supported for each arg, one of: + * folder - default, timm folder (or tar) based ImageDataset + * torch - torchvision based datasets + * TFDS - Tensorflow-datasets wrapper in IterabeDataset interface via IterableImageDataset + * all - any of the above + + Args: + name: dataset name, empty is okay for folder based datasets + root: root folder of dataset (all) + split: dataset split (all) + search_split: search for split specific child fold from root so one can specify + `imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder) + class_map: specify class -> index mapping via text file or dict (folder) + load_bytes: load data, return images as undecoded bytes (folder) + download: download dataset if not present and supported (TFDS, torch) + is_training: create dataset in train mode, this is different from the split. + For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS) + batch_size: batch size hint for (TFDS) + repeats: dataset repeats per iteration i.e. epoch (TFDS) + **kwargs: other args to pass to dataset + + Returns: + Dataset object + """ + name = name.lower() + if name.startswith('torch/'): + name = name.split('/', 2)[-1] + torch_kwargs = dict(root=root, download=download, **kwargs) + if name in _TORCH_BASIC_DS: + ds_class = _TORCH_BASIC_DS[name] + use_train = split in _TRAIN_SYNONYM + ds = ds_class(train=use_train, **torch_kwargs) + elif name == 'flowers': + if split in _TRAIN_SYNONYM: + split = 'train' + elif split in _EVAL_SYNONYM: + split = 'test' + ds = Flowers102(split=split, **torch_kwargs) + elif name == 'cars': + if split in _TRAIN_SYNONYM: + split = 'train' + elif split in _EVAL_SYNONYM: + split = 'test' + ds = StanfordCars(split=split, **torch_kwargs) + elif name == 'inaturalist' or name == 'inat': + assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist' + target_type = 'full' + split_split = split.split('/') + if len(split_split) > 1: + target_type = split_split[0].split('_') + if len(target_type) == 1: + target_type = target_type[0] + split = split_split[-1] + if split in _TRAIN_SYNONYM: + split = '2021_train' + elif split in _EVAL_SYNONYM: + split = '2021_valid' + ds = INaturalist(version=split, target_type=target_type, **torch_kwargs) + elif name == 'places365': + assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.' + if split in _TRAIN_SYNONYM: + split = 'train-standard' + elif split in _EVAL_SYNONYM: + split = 'val' + ds = Places365(split=split, **torch_kwargs) + elif name == 'imagenet': + if split in _EVAL_SYNONYM: + split = 'val' + ds = ImageNet(split=split, **torch_kwargs) + elif name == 'image_folder' or name == 'folder': + # in case torchvision ImageFolder is preferred over timm ImageDataset for some reason + if search_split and os.path.isdir(root): + # look for split specific sub-folder in root + root = _search_split(root, split) + ds = ImageFolder(root, **kwargs) + else: + assert False, f"Unknown torchvision dataset {name}" + elif name.startswith('tfds/'): + ds = IterableImageDataset( + root, parser=name, split=split, is_training=is_training, + download=download, batch_size=batch_size, repeats=repeats, **kwargs) + else: + # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future + if search_split and os.path.isdir(root): + # look for split specific sub-folder in root + root = _search_split(root, split) + ds = ImageDataset(root, parser=name, class_map=class_map, load_bytes=load_bytes, **kwargs) + return ds diff --git a/PyTorch/build-in/Classification/Sequencer2D/utils/timm/summary.py b/PyTorch/build-in/Classification/Sequencer2D/utils/timm/summary.py new file mode 100644 index 000000000..9ed3cd36e --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/utils/timm/summary.py @@ -0,0 +1,28 @@ +# Copyright (c) 2022. Yuki Tatsunami +# Licensed under the Apache License, Version 2.0 (the "License"); + +import csv +from collections import OrderedDict + + +def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False, log_clearml=False): + rowd = OrderedDict(epoch=epoch) + rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) + rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) + if log_wandb: + import wandb + wandb.log(rowd) + if log_clearml: + from clearml import Logger + for k, v in train_metrics.items(): + Logger.current_logger().report_scalar( + "train", k, iteration=epoch, value=v) + for k, v in eval_metrics.items(): + Logger.current_logger().report_scalar( + "eval", k, iteration=epoch, value=v) + + with open(filename, mode='a') as cf: + dw = csv.DictWriter(cf, fieldnames=rowd.keys()) + if write_header: # first iteration (epoch == 1 can't be used) + dw.writeheader() + dw.writerow(rowd) diff --git a/PyTorch/build-in/Classification/Sequencer2D/weloTrainStep.py b/PyTorch/build-in/Classification/Sequencer2D/weloTrainStep.py new file mode 100644 index 000000000..13297c11b --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/weloTrainStep.py @@ -0,0 +1,692 @@ +#!/usr/bin/env python3 +# coding: utf-8 + +import os +import random +import sys +import time +import json +import argparse +from collections import OrderedDict +from pathlib import Path +import numpy as np +import pandas as pd +from tqdm import tqdm +import importlib + +os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" # 强烈推荐在 shell/最顶端设置 +os.environ["PYTHONHASHSEED"] = "12345" +os.environ["OMP_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" + +def ensure_cublas_workspace(config=":4096:8"): + """ + 尝试为 cuBLAS 设置可复现 workspace。强烈建议在主脚本入口处(import torch 之前) + 通过 export 设置该 env。此函数会在运行时设置,但如果 torch 已经被 import, + 则可能为时已晚——函数会打印提醒。 + """ + already = os.environ.get("CUBLAS_WORKSPACE_CONFIG") + if already: + print(f"[seed_utils] CUBLAS_WORKSPACE_CONFIG 已存在:{already}") + else: + os.environ["CUBLAS_WORKSPACE_CONFIG"] = config + print(f"[seed_utils] 已设置 CUBLAS_WORKSPACE_CONFIG={config} (注意:请在 import torch 前设置以保证生效)") + +def set_global_seed(seed: int = 42, set_threads: bool = True): + """ + 统一随机性设置。注意:若希望完全发挥效果,请在主脚本入口(import torch 之前) + 先调用 ensure_cublas_workspace(...) 或在 shell 中 export CUBLAS_WORKSPACE_CONFIG。 + """ + ensure_cublas_workspace() # 会设置 env 并提醒 + os.environ["PYTHONHASHSEED"] = str(seed) + + if set_threads: + os.environ["OMP_NUM_THREADS"] = "1" + os.environ["MKL_NUM_THREADS"] = "1" + + random.seed(seed) + np.random.seed(seed) + + # 现在导入 torch(晚导入以便前面 env 生效) + import torch + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + # 强制确定性(如果存在不确定性算子,PyTorch 会报错并提示) + try: + torch.use_deterministic_algorithms(True) + except Exception as e: + print("[seed_utils] 设置 deterministic 模式时出错:", e) + print("[seed_utils] 请确认 CUBLAS_WORKSPACE_CONFIG 已在 import torch 之前设置。") + + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + if set_threads: + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + + print(f"[seed_utils] 全局 seed 已设置为 {seed}") + +set_global_seed(2025) + +""" +通用训练模版(优先从本地导入 Model -> 支持 DDP / 单卡,AMP,resume,日志,checkpoint) +保存为 train_template_localmodel.py +""" +import torch +import torch.nn as nn +import torch.optim as optim +import torch.backends.cudnn as cudnn +import torchvision.transforms as transforms +import torchvision.datasets as datasets +import torchvision.models as tv_models + +import torch.distributed as dist +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler + +from torch.sdaa import amp +# from torch.cuda import amp + + +# ---------------------------- +# Helper utilities (self-contained) +# ---------------------------- +class AverageMeter(object): + def __init__(self, name='Meter', fmt=':.4f'): + self.name = name + self.fmt = fmt + self.reset() + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / max(1, self.count) + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} (avg {avg' + self.fmt + '})' + return fmtstr.format(name=self.name, val=self.val, avg=self.avg) + +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k + 返回一个 list,每个元素是 tensor(百分比形式) + """ + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + # output: (N, C) -> pred: (maxk, N) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() # (maxk, N) + correct = pred.eq(target.view(1, -1).expand_as(pred)) # (maxk, N) bool + + res = [] + for k in topk: + # 把前 k 行展平后求和(返回 0-dim tensor),随后换算为百分比 + correct_k = correct[:k].reshape(-1).float().sum() # 注意:不传 keepdim + # 乘以 100.0 / batch_size,保持返回 tensor(和之前代码兼容) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + +def save_checkpoint(state, is_best, save_dir, filename='checkpoint.pth'): + save_path = os.path.join(save_dir, filename) + torch.save(state, save_path) + if is_best: + best_path = os.path.join(save_dir, 'model_best.pth') + torch.save(state, best_path) + +def set_seed(seed, deterministic=False): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + if deterministic: + cudnn.deterministic = True + cudnn.benchmark = False + else: + cudnn.deterministic = False + cudnn.benchmark = True + +# ---------------------------- +# Argument parser +# ---------------------------- +def parse_args(): + parser = argparse.ArgumentParser(description='Generic PyTorch training template (DDP/AMP) with LocalModel priority') + parser.add_argument('--name', default='run', type=str, help='experiment name (log/checkpoints dir)') + parser.add_argument('--seed', default=42, type=int, help='random seed') + parser.add_argument('--arch', default='None', type=str, help='model name') + parser.add_argument('--deterministic', action='store_true', help='set cudnn deterministic (may be slower)') + parser.add_argument('--dataset', default='cifar10', choices=['cifar10','cifar100','imagenet','custom'], help='which dataset') + parser.add_argument('--datapath', default='./data', type=str, help='dataset root / imagenet root / custom root') + parser.add_argument('--imagenet_dir', default='./imagenet', type=str, help='if dataset=imagenet, path to imagenet root') + parser.add_argument('--custom_eval_dir', default=None, help='if dataset=custom, provide val dir') + parser.add_argument('--num_workers', default=4, type=int, help='dataloader workers per process') + parser.add_argument('--epochs', default=200, type=int) + parser.add_argument('--steps', default=0, type=int, help='max steps to run (if >0, training will stop when global_step reaches this).') + parser.add_argument('--batch_size', default=128, type=int) + parser.add_argument('--model_name', default='resnet18', help='torchvision model name or python path e.g. mypkg.mymodule.Model (used if no local Model)') + parser.add_argument('--num_classes', default=None, type=int, help='override num classes (auto-detect for common sets)') + parser.add_argument('--pretrained', action='store_true', help='use torchvision pretrained weights when available') + parser.add_argument('--optimizer', default='sgd', choices=['sgd','adam','adamw'], help='optimizer') + parser.add_argument('--lr', '--learning_rate', default=0.1, type=float) + parser.add_argument('--momentum', default=0.9, type=float) + parser.add_argument('--weight_decay', default=5e-4, type=float) + parser.add_argument('--nesterov', action='store_true') + parser.add_argument('--scheduler', default='multistep', choices=['multistep','step','cosine','none'], help='lr scheduler') + parser.add_argument('--milestones', default='100,150', type=str, help='milestones for multistep (comma sep)') + parser.add_argument('--step_size', default=30, type=int, help='step size for StepLR or cosine max epochs') + parser.add_argument('--gamma', default=0.1, type=float) + parser.add_argument('--scheduler_step_per_batch', action='store_true', help='call scheduler.step() per batch (for some schedulers)') + parser.add_argument('--resume', default='', type=str, help='path to checkpoint to resume from') + parser.add_argument('--start_epoch', default=0, type=int) + parser.add_argument('--print_freq', default=100, type=int) + parser.add_argument('--save_freq', default=10, type=int, help='save checkpoint every N epochs (rank0 only)') + parser.add_argument('--amp', action='store_true', default = True,help='use automatic mixed precision (AMP)') + parser.add_argument('--grad_accum_steps', default=1, type=int, help='gradient accumulation steps') + parser.add_argument('--local_rank', default=None, type=int, help='local rank passed by torchrun (if any). Use -1 or None for non-distributed') + parser.add_argument('--cutmix_prob', default=0.0, type=float) + parser.add_argument('--beta', default=1.0, type=float) + parser.add_argument('--seed_sampler', default=False, action='store_true', help='set sampler epoch seeds to make deterministic distributed shuffling') + args = parser.parse_args() + args.milestones = [int(x) for x in args.milestones.split(',')] if args.milestones else [] + return args + +# ---------------------------- +# build model (优先 LocalModel) +# ---------------------------- +def build_model_with_local_priority(args, device=None): + """ + 用参数 args.arch 作为模块名导入 Model() + 如果模块不存在或没有 Model 类,则报错停止。 + """ + try: + # 动态导入模块,比如 args.arch = "rexnet" + mod = importlib.import_module(args.arch) + Model = getattr(mod, "Model") # 从模块中获取 Model 类 + except Exception as e: + raise RuntimeError( + f"无法导入模型模块 '{args.arch}' 或未找到类 Model。" + f"\n错误信息:{e}" + ) + + # 解析数据集类别数 + if args.dataset == 'cifar10': + num_classes = 10 + elif args.dataset == 'cifar100': + num_classes = 100 + else: + print(f"[ERROR] 不支持的数据集类型:{args.dataset},无法确定类别数。程序终止。") + sys.exit(1) + + + # 实例化 + try: + model = Model(num_classes) + except Exception as e: + raise RuntimeError( + f"Model() 实例化失败,请检查模型构造函数。\n错误信息:{e}" + ) + + return model + +# ---------------------------- +# Data loader factory +# ---------------------------- +def build_dataloaders(args, rank, world_size): + if args.dataset == 'cifar10' or args.dataset == 'cifar100': + mean = (0.4914, 0.4822, 0.4465) + std = (0.2470, 0.2435, 0.2616) if args.dataset == 'cifar10' else (0.2023, 0.1994, 0.2010) + # train_transform = transforms.Compose([ + # transforms.RandomCrop(32, padding=4), + # transforms.RandomHorizontalFlip(), + # transforms.ToTensor(), + # transforms.Normalize(mean, std), + # ]) + # test_transform = transforms.Compose([ + # transforms.ToTensor(), + # transforms.Normalize(mean, std), + # ]) + + train_transform = transforms.Compose([ # 2025/12/3 从visformer模型开始 + transforms.Resize(256), # 先放大到 256 + transforms.RandomCrop(224), # 再随机裁剪为 224(更符合 ImageNet 风格增强) + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean, std), + ]) + test_transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize(mean, std), + ]) + root = args.datapath + if args.dataset == 'cifar10': + train_set = datasets.CIFAR10(root=root, train=True, download=False, transform=train_transform) + val_set = datasets.CIFAR10(root=root, train=False, download=False, transform=test_transform) + num_classes = 10 + else: + train_set = datasets.CIFAR100(root=root, train=True, download=False, transform=train_transform) + val_set = datasets.CIFAR100(root=root, train=False, download=False, transform=test_transform) + num_classes = 100 + + elif args.dataset == 'imagenet': + train_dir = os.path.join(args.imagenet_dir, 'train') + val_dir = os.path.join(args.imagenet_dir, 'val') + train_transform = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225)), + ]) + test_transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225)), + ]) + train_set = datasets.ImageFolder(train_dir, train_transform) + val_set = datasets.ImageFolder(val_dir, test_transform) + num_classes = args.num_classes or 1000 + + elif args.dataset == 'custom': + train_dir = os.path.join(args.datapath, 'train') + val_dir = args.custom_eval_dir or os.path.join(args.datapath, 'val') + train_transform = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + ]) + test_transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + ]) + train_set = datasets.ImageFolder(train_dir, train_transform) + val_set = datasets.ImageFolder(val_dir, test_transform) + num_classes = len(train_set.classes) + else: + raise ValueError("Unknown dataset") + + if dist.is_initialized() and world_size > 1: + train_sampler = DistributedSampler(train_set, num_replicas=world_size, rank=rank, shuffle=True) + else: + train_sampler = None + + train_loader = DataLoader(train_set, + batch_size=args.batch_size, + shuffle=(train_sampler is None), + num_workers=args.num_workers, + pin_memory=True, + sampler=train_sampler, + drop_last=False) + val_loader = DataLoader(val_set, + batch_size=args.batch_size, + shuffle=False, + num_workers=args.num_workers, + pin_memory=True) + + return train_loader, val_loader, num_classes, train_sampler + +# ---------------------------- +# Train & validate +# ---------------------------- +def train_one_epoch(args, epoch, model, criterion, optimizer, train_loader, device, scaler, scheduler=None, train_sampler=None, global_step_start=0, max_global_steps=None): + """ + 现在支持:若 max_global_steps 非 None,则当 global_step 达到该值时提前退出 + 返回: epoch_summary_dict, step_logs_list, global_step_end + step_logs_list: list of dicts with per-step info (for logging to CSV if需要) + """ + batch_time = AverageMeter('Time') + data_time = AverageMeter('Data') + losses = AverageMeter('Loss') + top1 = AverageMeter('Acc@1') + top5 = AverageMeter('Acc@5') + + model.train() + end = time.time() + optimizer.zero_grad() + + iters = len(train_loader) + step_logs = [] + global_step = global_step_start + + for i, (images, targets) in enumerate(train_loader): + # check global steps limit + if (max_global_steps is not None) and (global_step >= max_global_steps): + break + + data_time.update(time.time() - end) + images = images.to(device, non_blocking=True) + targets = targets.to(device, non_blocking=True) + + if args.amp: + with amp.autocast(): + outputs = model(images) + loss = criterion(outputs, targets) / args.grad_accum_steps + else: + outputs = model(images) + loss = criterion(outputs, targets) / args.grad_accum_steps + + if args.amp: + scaler.scale(loss).backward() + else: + loss.backward() + + # 每当累积步满足 grad_accum_steps 就 step + if (i + 1) % args.grad_accum_steps == 0: + if args.amp: + scaler.step(optimizer) + scaler.update() + else: + optimizer.step() + optimizer.zero_grad() + if scheduler is not None and args.scheduler_step_per_batch: + scheduler.step() + + with torch.no_grad(): + acc1, acc5 = accuracy(outputs, targets, topk=(1,5)) + losses.update(loss.item() * args.grad_accum_steps, images.size(0)) + top1.update(acc1.item(), images.size(0)) + top5.update(acc5.item(), images.size(0)) + + batch_time.update(time.time() - end) + end = time.time() + + # increment global step AFTER processing this batch + global_step += 1 + + # per-step print (controlled by print_freq) + if ((global_step % args.print_freq == 0) or (i == iters - 1)) and ((dist.get_rank() if dist.is_initialized() else 0) == 0): + lr = optimizer.param_groups[0]['lr'] + print(f"Epoch[{epoch}]:step[{i+1}/{iters}] step_train_loss {losses.val:.4f} acc1 {top1.val:.2f} acc5 {top5.val:.2f}") + + # collect per-step log + step_logs.append({ + 'epoch': epoch, + 'batch_idx': i, + 'global_step': global_step, + 'lr': optimizer.param_groups[0]['lr'], + 'loss': losses.val, + 'loss_avg': losses.avg, + 'acc1': top1.val, + 'acc1_avg': top1.avg, + 'acc5': top5.val, + 'acc5_avg': top5.avg, + 'time': batch_time.val + }) + + # if reached max_global_steps inside epoch, break (handled at loop start next iter) + if (max_global_steps is not None) and (global_step >= max_global_steps): + if (dist.get_rank() if dist.is_initialized() else 0) == 0: + print(f"[Info] 达到 max_global_steps={max_global_steps},将在 epoch 内提前停止。") + break + + # --- flush remaining grads if needed (handle gradient accumulation leftovers) --- + processed_batches = global_step - global_step_start # 实际处理的 batch 数 + if args.grad_accum_steps > 1 and (processed_batches % args.grad_accum_steps) != 0: + # only step if there are gradients + grads_present = any((p.grad is not None and p.requires_grad) for p in model.parameters()) + if grads_present: + if args.amp: + try: + scaler.step(optimizer) + scaler.update() + except Exception as e: + # 防御性:若 scaler.step 因某些原因失败,尝试普通 step(只在极端情况下) + print("[Warning] scaler.step 失败,尝试普通 optimizer.step():", e) + optimizer.step() + else: + optimizer.step() + optimizer.zero_grad() + if scheduler is not None and args.scheduler_step_per_batch: + scheduler.step() + if (dist.get_rank() if dist.is_initialized() else 0) == 0: + print(f"[Info] flushed remaining gradients after early stop (processed_batches={processed_batches}, grad_accum={args.grad_accum_steps}).") + + if scheduler is not None and not args.scheduler_step_per_batch: + scheduler.step() + + return OrderedDict([('loss', losses.avg), ('acc1', top1.avg), ('acc5', top5.avg)]), step_logs, global_step + +def validate(args, model, val_loader, criterion, device, max_batches=None): + """ + Validate on the val_loader. + If max_batches is not None, only process up to that many batches (useful for quick checks). + Returns an OrderedDict with loss/acc1/acc5 (averaged over processed samples). + """ + losses = AverageMeter('Loss') + top1 = AverageMeter('Acc@1') + top5 = AverageMeter('Acc@5') + + model.eval() + processed_batches = 0 + processed_samples = 0 + with torch.no_grad(): + for i, (images, targets) in enumerate(tqdm(val_loader)): + images = images.to(device, non_blocking=True) + targets = targets.to(device, non_blocking=True) + outputs = model(images) + loss = criterion(outputs, targets) + acc1, acc5 = accuracy(outputs, targets, topk=(1,5)) + batch_n = images.size(0) + losses.update(loss.item(), batch_n) + top1.update(acc1.item(), batch_n) + top5.update(acc5.item(), batch_n) + + processed_batches += 1 + processed_samples += batch_n + + if (max_batches is not None) and (processed_batches >= max_batches): + break + + # 如果没处理任何样本,避免除0(不太可能,但防御性) + if processed_samples == 0: + return OrderedDict([('loss', 0.0), ('acc1', 0.0), ('acc5', 0.0)]) + return OrderedDict([('loss', losses.avg), ('acc1', top1.avg), ('acc5', top5.avg)]) + +# ---------------------------- +# Main +# ---------------------------- +def main(): + args = parse_args() + + # handle local_rank from env if not provided + local_rank_env = os.environ.get('LOCAL_RANK', None) + if args.local_rank is None and local_rank_env is not None: + args.local_rank = int(local_rank_env) + + distributed = (args.local_rank is not None and args.local_rank != -1) + if distributed: + dist.init_process_group(backend='nccl', init_method='env://') + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + + if distributed: + torch.cuda.set_device(args.local_rank) + device = torch.device('cuda', args.local_rank) + else: + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + set_seed(args.seed + (rank if distributed else 0), deterministic=args.deterministic) + + save_dir = os.path.join('models', args.name) + if rank == 0: + os.makedirs(save_dir, exist_ok=True) + with open(os.path.join(save_dir, 'args.json'), 'w') as f: + json.dump(vars(args), f, indent=2) + if distributed: + dist.barrier() + + train_loader, val_loader, auto_num_classes, train_sampler = build_dataloaders(args, rank, world_size) + if args.num_classes is None: + args.num_classes = auto_num_classes + + # 使用本地 Model 优先(LocalModel 已在文件顶部尝试导入) + model = build_model_with_local_priority(args, device) + model.to(device) + + if distributed: + model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) + + criterion = nn.CrossEntropyLoss().to(device) + params = [p for p in model.parameters() if p.requires_grad] + if args.optimizer == 'sgd': + optimizer = optim.SGD(params, lr=args.lr, momentum=args.momentum, + weight_decay=args.weight_decay, nesterov=args.nesterov) + elif args.optimizer == 'adam': + optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay) + elif args.optimizer == 'adamw': + optimizer = optim.AdamW(params, lr=args.lr, weight_decay=args.weight_decay) + else: + raise ValueError('Unknown optimizer') + + scheduler = None + if args.scheduler == 'multistep': + scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma) + elif args.scheduler == 'step': + scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma) + elif args.scheduler == 'cosine': + scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs) + elif args.scheduler == 'none': + scheduler = None + + scaler = amp.GradScaler() if args.amp else None + + start_epoch = args.start_epoch + best_acc = 0.0 + if args.resume: + if os.path.isfile(args.resume): + ckpt = torch.load(args.resume, map_location='cpu') + model_state = ckpt.get('state_dict', ckpt) + if isinstance(model, DDP): + model.module.load_state_dict(model_state) + else: + model.load_state_dict(model_state) + if 'optimizer' in ckpt: + optimizer.load_state_dict(ckpt['optimizer']) + start_epoch = ckpt.get('epoch', start_epoch) + best_acc = ckpt.get('best_acc', best_acc) + print(f"=> resumed from {args.resume}, start_epoch={start_epoch}") + else: + print(f"=> resume path {args.resume} not found") + + log_columns = ['epoch', 'lr', 'loss', 'acc1', 'acc5', 'val_loss', 'val_acc1', 'val_acc5'] + log_df = pd.DataFrame(columns=log_columns) + # step-level log + step_log_columns = ['epoch', 'batch_idx', 'global_step', 'lr', 'loss', 'loss_avg', 'acc1', 'acc1_avg', 'acc5', 'acc5_avg', 'time'] + step_log_df = pd.DataFrame(columns=step_log_columns) + + total_epochs = args.epochs + # global_step计数器(训练过程中跨epoch持续) + global_step = 0 + + epoch = start_epoch + # loop until either epoch criteria or step criteria met + while True: + if train_sampler is not None: + if args.seed_sampler: + train_sampler.set_epoch(epoch + args.seed) + else: + train_sampler.set_epoch(epoch) + + if rank == 0: + print(f"==== Epoch {epoch}/{total_epochs - 1} ====") + + # 如果传入了 args.steps (>0),则把剩余允许的 step 数传给 train_one_epoch, + # 否则 max_global_steps=None(按整 epoch 执行完) + if args.steps and args.steps > 0: + max_global_steps = args.steps + else: + max_global_steps = None + + train_log, step_logs, global_step = train_one_epoch( + args, epoch, model, criterion, optimizer, train_loader, device, scaler, + scheduler, train_sampler, global_step_start=global_step, max_global_steps=max_global_steps + ) + + # 如果启用了按 steps 的模式且已经达到上限,标记需要在做一次验证后退出 + if max_global_steps is not None and global_step >= max_global_steps: + if rank == 0: + print(f"[Main] 达到 max_global_steps={max_global_steps}(global_step={global_step}),将在完成验证后退出训练。") + # 我们不 return 立刻退出;后面的 validate / 保存逻辑会执行一次,然后 main 返回/结束 + end_due_to_steps = True + else: + end_due_to_steps = False + + # 验证并记录 epoch 级别日志(如果在 step 模式下很可能在中间某个 epoch 提前结束,但我们仍做一次 validate) + val_log = validate(args, model, val_loader, criterion, device, args.batch_size) + current_lr = optimizer.param_groups[0]['lr'] + + if rank == 0: + # epoch summary print, 格式与示例对齐 + print(f"Epoch[{epoch}]: epoch_train_loss {train_log['loss']:.4f} acc1 {train_log['acc1']:.2f} acc5 {train_log['acc5']:.2f} | " + f"val_loss {val_log['loss']:.4f} acc1 {val_log['acc1']:.2f} acc5 {val_log['acc5']:.2f} lr {current_lr:.6f}") + row = { + 'epoch': epoch, + 'lr': current_lr, + 'loss': train_log['loss'], + 'acc1': train_log['acc1'], + 'acc5': train_log['acc5'], + 'val_loss': val_log['loss'], + 'val_acc1': val_log['acc1'], + 'val_acc5': val_log['acc5'], + } + new_row_df = pd.DataFrame([row]) + log_df = pd.concat([log_df, new_row_df], ignore_index=True) + log_df.to_csv(os.path.join(save_dir, 'log.csv'), index=False) + + is_best = val_log['acc1'] > best_acc + if is_best: + best_acc = val_log['acc1'] + if (epoch % args.save_freq == 0) or is_best or ( (max_global_steps is None) and (epoch == total_epochs - 1) ) : + state = { + 'epoch': epoch, + 'state_dict': model.module.state_dict() if isinstance(model, DDP) else model.state_dict(), + 'best_acc': best_acc, + 'optimizer': optimizer.state_dict(), + 'args': vars(args) + } + save_checkpoint(state, is_best, save_dir, filename=f'checkpoint_epoch_{epoch}.pth') + + # 如果是因为 steps 模式达到上限,则在完成 validation / 保存后退出训练 + if end_due_to_steps: + if rank == 0: + print(f"[Main] 已在 steps 模式下完成最后一次验证并保存,训练结束(global_step={global_step})。") + break + + # increment epoch + epoch += 1 + + # stopping conditions: + # 1) if steps mode enabled and reached steps -> stop + if args.steps and args.steps > 0: + if global_step >= args.steps: + if rank == 0: + print(f"[Main] 已达到指定 steps={args.steps}(global_step={global_step}),训练结束。") + break + + # 2) if steps not used, stop when epoch >= epochs + else: + if epoch >= total_epochs: + if rank == 0: + print(f"[Main] 已达到指定 epochs={total_epochs}(epoch={epoch}),训练结束。") + break + + if dist.is_initialized(): + dist.barrier() + if rank == 0: + print("Training finished. Best val acc1: {:.2f}".format(best_acc)) + +if __name__ == '__main__': + main() \ No newline at end of file From aac716812eb750d27d829d5b48d6aa606ceadaf5 Mon Sep 17 00:00:00 2001 From: wangwl Date: Wed, 7 Jan 2026 06:03:54 +0000 Subject: [PATCH 2/3] fix: cleanup code and update --- .../Classification/Sequencer2D/coverage.txt | 3 - .../Classification/Sequencer2D/readme | 65 ++ .../Sequencer2D/requirements_exact.txt | 89 ++ .../Sequencer2D/sequencer/Dockerfile | 30 - .../Sequencer2D/sequencer/LICENSE | 201 ---- .../Sequencer2D/sequencer/README.md | 119 --- .../Sequencer2D/sequencer/avg_checkpoints.py | 121 --- .../Sequencer2D/sequencer/benchmark.py | 606 ------------ .../Sequencer2D/sequencer/clean_checkpoint.py | 78 -- .../sequencer/datasets/__init__.py | 5 - .../Sequencer2D/sequencer/datasets/cars.py | 112 --- .../Sequencer2D/sequencer/datasets/flowers.py | 125 --- .../sequencer/distributed_train.sh | 4 - .../Sequencer2D/sequencer/erf/__init__.py | 0 .../Sequencer2D/sequencer/erf/models.py | 35 - .../Sequencer2D/sequencer/erf/save_output.py | 9 - .../Sequencer2D/sequencer/erf/scaler.py | 5 - .../Sequencer2D/sequencer/generate_erf.py | 291 ------ .../Sequencer2D/sequencer/hubconf.py | 6 - .../Sequencer2D/sequencer/img/BiLSTM2D.jpg | Bin 73526 -> 0 bytes .../sequencer/img/RIKKYOAI_main.png | Bin 99021 -> 0 bytes .../Sequencer2D/sequencer/img/Sequencer.jpg | Bin 65080 -> 0 bytes .../Sequencer2D/sequencer/img/Sequencer2D.jpg | Bin 28229 -> 0 bytes .../Sequencer2D/sequencer/img/anytech.svg | 33 - .../Sequencer2D/sequencer/requirements.txt | 66 -- .../Sequencer2D/sequencer/train.py | 870 ------------------ .../Sequencer2D/sequencer/utils/__init__.py | 1 - .../Sequencer2D/sequencer/utils/helpers.py | 76 -- .../sequencer/utils/timm/__init__.py | 2 - .../sequencer/utils/timm/checkpoint_saver.py | 163 ---- .../sequencer/utils/timm/dataset_factory.py | 158 ---- .../sequencer/utils/timm/summary.py | 28 - .../Sequencer2D/sequencer/validate.py | 384 -------- .../Sequencer2D/sequencer/validate_c.py | 343 ------- .../Sequencer2D/sequencer/visualize_erf.py | 48 - .../Sequencer2D/sequencer2D_loss.jpg | Bin 36221 -> 0 bytes .../Sequencer2D/sequencer2D_loss.txt | 29 - 37 files changed, 154 insertions(+), 3951 deletions(-) delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/coverage.txt create mode 100644 PyTorch/build-in/Classification/Sequencer2D/readme create mode 100644 PyTorch/build-in/Classification/Sequencer2D/requirements_exact.txt delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/Dockerfile delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/LICENSE delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/README.md delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/avg_checkpoints.py delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/benchmark.py delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/clean_checkpoint.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/__init__.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/cars.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/flowers.py delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/distributed_train.sh delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/__init__.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/models.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/save_output.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/scaler.py delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/generate_erf.py delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/hubconf.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/BiLSTM2D.jpg delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/RIKKYOAI_main.png delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer.jpg delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer2D.jpg delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/img/anytech.svg delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/requirements.txt delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/train.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/__init__.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/helpers.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/__init__.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/checkpoint_saver.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/dataset_factory.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/summary.py delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/validate.py delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/validate_c.py delete mode 100755 PyTorch/build-in/Classification/Sequencer2D/sequencer/visualize_erf.py delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.jpg delete mode 100644 PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.txt diff --git a/PyTorch/build-in/Classification/Sequencer2D/coverage.txt b/PyTorch/build-in/Classification/Sequencer2D/coverage.txt deleted file mode 100644 index 1755bf4c9..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/coverage.txt +++ /dev/null @@ -1,3 +0,0 @@ -all api: ['_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_copy_from', '_has_compatible_shallow_copy_type', '_local_scalar_dense', '_log_softmax', '_log_softmax_backward_data', '_pin_memory', '_reshape_alias', 'add', 'add_', 'addmm', 'as_strided', 'cat', 'contiguous', 'convolution', 'convolution_backward', 'copy_stride', 'div', 'dropout', 'eq', 'fill_', 'fused_sgd', 'gelu', 'gelu_backward', 'is_pinned', 'linear', 'lstm', 'matmul', 'mean', 'mm', 'mul', 'mul_', 'native_layer_norm', 'native_layer_norm_backward', 'nll_loss_backward', 'nll_loss_forward', 'reciprocal', 'set_', 'sigmoid_', 'sigmoid_backward', 'sum', 'tanh', 'tanh_', 'tanh_backward', 'topk_out', 'view', 'zero_'], total: 48 -fallback op: [], total: 0 -coverage rate: 100.00% diff --git a/PyTorch/build-in/Classification/Sequencer2D/readme b/PyTorch/build-in/Classification/Sequencer2D/readme new file mode 100644 index 000000000..eb7772f82 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/readme @@ -0,0 +1,65 @@ +```markdown +## 1. 模型链接 +- 原始仓库链接: +https://github.com/huggingface/pytorch-image-models?tab=readme-ov-file#models + +## 2. 快速开始 + +使用本模型执行训练的主要流程如下: + +1. **基础环境安装**:介绍训练前需要完成的基础环境检查和安装。 +2. **获取数据集**:介绍如何获取训练所需的数据集。 +3. **构建环境**:介绍如何构建模型运行所需要的环境。 +4. **启动训练**:介绍如何运行训练。 + +### 2.1 基础环境安装 + +请参考主仓库的基础环境安装章节,完成训练前的基础环境检查和安装(如驱动、固件等)。 + +### 2.2 准备数据集 + +#### 2.2.1 获取数据集 + +训练使用 **CIFAR-100** 数据集。该数据集为开源数据集,包含 100 个类别的 60000 张彩色图像。 + +#### 2.2.2 处理数据集 + +请确保数据集已下载并解压。根据训练脚本的默认配置,建议将数据集存放在模型目录的上级 `data` 目录中(即 `../data`),或者根据实际路径修改训练命令中的 `--datapath` 参数。 + +### 2.3 构建环境 + +所使用的环境下需包含 PyTorch 框架虚拟环境。 + +1. 执行以下命令,启动虚拟环境(根据实际环境名称修改): + + ```bash + conda activate torch_env_py310 + +``` + +2. 安装 Python 依赖。确保已安装项目所需的依赖包: +```bash +pip install -r requirements_exact.txt + +``` + + + +### 2.4 启动训练 + +1. 在构建好的环境中,进入模型训练脚本所在目录。 + +2. 运行训练。该模型支持单机单卡训练。 +执行以下命令启动训练(使用 CIFAR-100 数据集,Batch Size 为 128): +```bash +python weloTrainStep.py \ + --name train \ + --arch sequencer2D \ + --print_freq 1 \ + --steps 100 \ + --dataset cifar100 \ + --datapath ../data \ + --batch_size 32 \ + --epochs 100 + +``` diff --git a/PyTorch/build-in/Classification/Sequencer2D/requirements_exact.txt b/PyTorch/build-in/Classification/Sequencer2D/requirements_exact.txt new file mode 100644 index 000000000..7394b3319 --- /dev/null +++ b/PyTorch/build-in/Classification/Sequencer2D/requirements_exact.txt @@ -0,0 +1,89 @@ +addict==2.4.0 +aliyun-python-sdk-core==2.16.0 +aliyun-python-sdk-kms==2.16.5 +anyio==4.11.0 +astunparse==1.6.3 +certifi==2024.12.14 +cffi==2.0.0 +charset-normalizer==3.4.1 +click==8.3.1 +colorama==0.4.6 +contourpy==1.3.2 +crcmod==1.7 +cryptography==46.0.3 +cycler==0.12.1 +einops==0.8.1 +exceptiongroup==1.3.1 +filelock==3.14.0 +fonttools==4.60.1 +fsspec==2024.12.0 +future @ file:///croot/future_1730902796226/work +git-filter-repo==2.47.0 +h11==0.16.0 +hf-xet==1.2.0 +httpcore==1.0.9 +httpx==0.28.1 +huggingface_hub==1.1.5 +idna==3.10 +inplace-abn @ git+https://github.com/mapillary/inplace_abn.git@b50bfe9c7cd7116a3ab091a352b48d6ba5ee701c +Jinja2==3.1.5 +jmespath==0.10.0 +joblib==1.5.2 +kiwisolver==1.4.9 +Markdown==3.10 +markdown-it-py==4.0.0 +MarkupSafe==3.0.2 +matplotlib==3.10.7 +mdurl==0.1.2 +mmdet==3.3.0 +mmengine==0.10.7 +model-index==0.1.11 +mpmath==1.3.0 +networkx==3.4.2 +numpy==1.23.5 +opencv-python==4.12.0.88 +opendatalab==0.0.10 +openmim==0.3.9 +openxlab==0.1.3 +ordered-set==4.1.0 +oss2==2.17.0 +packaging @ file:///croot/packaging_1734472117206/work +pandas==2.3.3 +pillow==11.1.0 +platformdirs==4.5.1 +pycocotools==2.0.11 +pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work +pycryptodome==3.23.0 +Pygments==2.19.2 +pyparsing==3.2.5 +python-dateutil==2.9.0.post0 +pytz==2023.4 +PyYAML @ file:///croot/pyyaml_1728657952215/work +requests==2.28.2 +rich==13.4.2 +safetensors==0.7.0 +scikit-learn==1.7.2 +scipy==1.15.3 +shapely==2.1.2 +shellingham==1.5.4 +six @ file:///tmp/build/80754af9/six_1644875935023/work +sniffio==1.3.1 +sympy==1.13.3 +tabulate==0.9.0 +termcolor==3.2.0 +terminaltables==3.1.10 +threadpoolctl==3.6.0 +timm==1.0.22 +tomli==2.3.0 +torch @ file:///apps/torch-2.4.0a0%2Bgit4451b0e-cp310-cp310-linux_x86_64.whl#sha256=2e472c916044cac5a1a0e0d8b0e12bb943d8522b24ff826c8014dd444dccd378 +torch_sdaa @ file:///apps/torch_sdaa-2.0.0-cp310-cp310-linux_x86_64.whl#sha256=5aa57889b002e1231fbf806642e1353bfa016297bc25178396e89adc2b1f92e7 +torchaudio @ file:///apps/torchaudio-2.0.2%2Bda3eb8d-cp310-cp310-linux_x86_64.whl#sha256=46525c02fb7eaa8dafea860428de3d01e437ba8d6ff2cc228d7c71975ac4054b +torchdata @ file:///apps/torchdata-0.6.1%2Be1feeb2-py3-none-any.whl#sha256=aa2dc1a7732ea68adfad186978049bf68cc1afdbbdd1e17a8024227ab770e433 +torchtext @ file:///apps/torchtext-0.15.2a0%2B4571036-cp310-cp310-linux_x86_64.whl#sha256=7e42c684ba366f97b59ec37488bf95e416cce3892b6589200d2b3ad159ee5788 +torchvision @ file:///apps/torchvision-0.15.1a0%2B42759b1-cp310-cp310-linux_x86_64.whl#sha256=4b904db2d50102415536bc764bbc31c669b90b1b014f90964e9eccaadb2fd9eb +tqdm==4.65.2 +typer-slim==0.20.0 +typing_extensions==4.15.0 +tzdata==2025.2 +urllib3==1.26.20 +yapf==0.43.0 diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/Dockerfile b/PyTorch/build-in/Classification/Sequencer2D/sequencer/Dockerfile deleted file mode 100644 index 0edc4dca6..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -ARG PYTORCH="1.10.0" -ARG CUDA="11.3" -ARG CUDNN="8" - -FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel as python-base - -ENV PYTHONUNBUFFERED=1 \ - PYTHONDONTWRITEBYTECODE=1 - -FROM python-base as initial -ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 8.6+PTX" -ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" -ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" - -RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub \ - && apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub \ - && apt-get update && apt-get install -y curl git build-essential cmake ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -RUN conda clean --all -ENV FORCE_CUDA="1" - -WORKDIR /workspace - -FROM initial as development - -COPY requirements.txt /tmp - -RUN pip install -r /tmp/requirements.txt --no-cache-dir \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/LICENSE b/PyTorch/build-in/Classification/Sequencer2D/sequencer/LICENSE deleted file mode 100644 index 16e2abf83..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2022 Yuki Tatsunami - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/README.md b/PyTorch/build-in/Classification/Sequencer2D/sequencer/README.md deleted file mode 100644 index 583b4fd7d..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/README.md +++ /dev/null @@ -1,119 +0,0 @@ -**[NeurIPS 2022] Sequencer**: Deep LSTM for Image Classification -======== - -[![NeurIPS 2022](https://img.shields.io/badge/NeuIPS-2022-373737.svg?style=plastic&labelColor=5779eb)](https://nips.cc/Conferences/2022/Schedule?showEvent=55158) -[![arXiv](https://img.shields.io/badge/arXiv-2205.01972-b31b1b.svg?style=plastic)](https://arxiv.org/abs/2205.01972) -[![Support Ukraine](https://img.shields.io/badge/Support-Ukraine-FFD500?style=plastic&labelColor=005BBB)](https://opensource.fb.com/support-ukraine) - -Created by -* [Yuki Tatsunami](https://okojoalg.github.io/) - * [![Rikkyo University](https://img.shields.io/badge/Rikkyo-University-FFFFFF?style=plastic&labelColor=582780)](https://www.rikkyo.ac.jp) - * [![AnyTech](https://img.shields.io/badge/AnyTech-Co.%20Ltd.-18C4AA?style=plastic&labelColor=254BB1)](https://anytech.co.jp/) -* [Masato Taki](https://scholar.google.com/citations?hl=en&user=3nMhvfgAAAAJ) - * [![Rikkyo University](https://img.shields.io/badge/Rikkyo-University-FFFFFF?style=plastic&labelColor=582780)](https://www.rikkyo.ac.jp) - -This repository contains implementation for Sequencer. - -## Abstract - -In recent computer vision research, the advent of the Vision Transformer (ViT) has rapidly revolutionized various architectural design efforts: ViT achieved state-of-the-art image classification performance using self-attention found in natural language processing, and MLP-Mixer achieved competitive performance using simple multi-layer perceptrons. In contrast, several studies have also suggested that carefully redesigned convolutional neural networks (CNNs) can achieve advanced performance comparable to ViT without resorting to these new ideas. Against this background, there is growing interest in what inductive bias is suitable for computer vision. Here we propose Sequencer, a novel and competitive architecture alternative to ViT that provides a new perspective on these issues. Unlike ViTs, Sequencer models long-range dependencies using LSTMs rather than self-attention layers. We also propose a two-dimensional version of Sequencer module, where an LSTM is decomposed into vertical and horizontal LSTMs to enhance performance. Despite its simplicity, several experiments demonstrate that Sequencer performs impressively well: Sequencer2D-L, with 54M parameters, realizes 84.6\% top-1 accuracy on only ImageNet-1K. Not only that, we show that it has good transferability and the robust resolution adaptability on double resolution-band. - -## Schematic diagrams - -The overall architecture of Sequencer2D is similar to the typical hierarchical ViT and Visual MLP. It uses Sequencer2D blocks instead of Transformer blocks: - -![Sequencer] - -Sequencer2D block replaces the Transformer's self-attention layer with an LSTM-based layer like BiLSTM2D layer: - -![Sequencer2D] - -BiLSTM2D includes a vertical LSTM and a horizontal LSTM: - -![BiLSTM2D] - -[Sequencer]: img/Sequencer.jpg -[Sequencer2D]: img/Sequencer2D.jpg -[BiLSTM2D]: img/BiLSTM2D.jpg - -## Model Zoo -We provide our Sequencer models pretrained on ImageNet-1K: -| name | arch | Params | FLOPs | acc@1 | download | -| --- | --- | --- | --- | --- | --- | -| Sequencer2D-S | ```sequencer2d_s``` | 28M | 8.4G | 82.3 | [here](https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_s.pth) | -| Sequencer2D-M | ```sequencer2d_m``` | 38M | 11.1G | 82.8 | [here](https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_m.pth) | -| Sequencer2D-L | ```sequencer2d_l``` | 54M | 16.6G | 83.4 | [here](https://github.com/okojoalg/sequencer/releases/download/weights/sequencer2d_l.pth) | - -## Usage - -### Requirements -- torch>=1.10.0 -- torchvision -- timm==0.5.4 -- Pillow -- matplotlib -- scipy -- etc., see [requirements.txt](requirements.txt) - -### Data preparation -Download and extract ImageNet images. The directory structure should be as follows. - -``` -│imagenet/ -├──train/ -│ ├── n01440764 -│ │ ├── n01440764_10026.JPEG -│ │ ├── n01440764_10027.JPEG -│ │ ├── ...... -│ ├── ...... -├──val/ -│ ├── n01440764 -│ │ ├── ILSVRC2012_val_00000293.JPEG -│ │ ├── ILSVRC2012_val_00002138.JPEG -│ │ ├── ...... -│ ├── ...... -``` - -### Traning -Command line for training Sequencer models on ImageNet from scratch. -``` -./distributed_train.sh 8 /path/to/imagenet --model sequencer2d_s -b 256 -j 8 --opt adamw --epochs 300 --sched cosine --native-amp --img-size 224 --drop-path 0.1 --lr 2e-3 --weight-decay 0.05 --remode pixel --reprob 0.25 --aa rand-m9-mstd0.5-inc1 --smoothing 0.1 --mixup 0.8 --cutmix 1.0 --warmup-lr 1e-6 --warmup-epochs 20 -``` - -Command line for fine-tuning a pre-trained model at higher resolution. -``` -./distributed_train.sh 8 /path/to/imagenet --model sequencer2d_l --pretrained -b 64 -j 8 --opt adamw --epochs 30 --sched cosine --native-amp --input-size 3 392 392 --img-size 392 --crop-pct 1.0 --drop-path 0.4 --lr 5e-5 --weight-decay 1e-8 --remode pixel --reprob 0.25 --aa rand-m9-mstd0.5-inc1 --smoothing 0.1 --mixup 0.8 --cutmix 1.0 --warmup-epochs 0 --cooldown-epochs 0 -``` - -Command line for fine-tuning a pre-trained model on a transfer learning dataset. -``` -./distributed_train.sh 4 /path/to/cifar10 --model sequencer2d_s -b 128 -j 4 --num-classes 10 --dataset torch/cifar10 --pretrained --opt adamw --epochs 200 --sched cosine --native-amp --img-size 224 --clip-grad 1 --drop-path 0.1 --lr 0.0001 --weight-decay 1e-4 --remode pixel --aa rand-m9-mstd0.5-inc1 --smoothing 0.1 --mixup 0.8 --cutmix 1.0 --warmup-lr 1e-6 --warmup-epochs 5 -``` - -### Validation -To evaluate our Sequencer models, run: -``` -python validate.py /path/to/imagenet --model sequencer2d_s -b 16 --input-size 3 224 224 --amp -``` - -## Reference -You may want to cite: -``` -@article{tatsunami2022sequencer, - title={Sequencer: Deep LSTM for Image Classification}, - author={Tatsunami, Yuki and Taki, Masato}, - journal={Advances in Neural Information Processing Systems}, - year={2022} -} -``` - -## Acknowledgment -This implementation is based on [pytorch-image-models](https://github.com/rwightman/pytorch-image-models) by Ross Wightman. We thank for his brilliant work. - -| | | -|:--|:-:| -| We thank [Graduate School of Artificial Intelligence and Science, Rikkyo University (Rikkyo AI)](https://ai.rikkyo.ac.jp) which supports us with computational resources, facilities, and others. | ![logo-rikkyo-ai] | -| [AnyTech Co. Ltd.](https://anytech.co.jp) provided valuable comments on the early versions and encouragement. We thank them for their cooperation. In particular, We thank [Atsushi Fukuda](https://github.com/fukumame) for organizing discussion opportunities. | ![logo-anytech] | - -[logo-rikkyo-ai]: img/RIKKYOAI_main.png "Logo of Rikkyo AI" -[logo-anytech]: img/anytech.svg "Logo of AnyTech" diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/avg_checkpoints.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/avg_checkpoints.py deleted file mode 100755 index eace47dc7..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/avg_checkpoints.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env python3 -""" Checkpoint Averaging Script - -This script averages all model weights for checkpoints in specified path that match -the specified filter wildcard. All checkpoints must be from the exact same model. - -For any hope of decent results, the checkpoints should be from the same or child -(via resumes) training session. This can be viewed as similar to maintaining running -EMA (exponential moving average) of the model weights or performing SWA (stochastic -weight averaging), but post-training. - -Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) -""" -import torch -import argparse -import os -import glob -import hashlib -from timm.models.helpers import load_state_dict - -parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager') -parser.add_argument('--input', default='', type=str, metavar='PATH', - help='path to base input folder containing checkpoints') -parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD', - help='checkpoint filter (path wildcard)') -parser.add_argument('--output', default='./averaged.pth', type=str, metavar='PATH', - help='output filename') -parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', - help='Force not using ema version of weights (if present)') -parser.add_argument('--no-sort', dest='no_sort', action='store_true', - help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant') -parser.add_argument('-n', type=int, default=10, metavar='N', - help='Number of checkpoints to average') - - -def checkpoint_metric(checkpoint_path): - if not checkpoint_path or not os.path.isfile(checkpoint_path): - return {} - print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path)) - checkpoint = torch.load(checkpoint_path, map_location='cpu') - metric = None - if 'metric' in checkpoint: - metric = checkpoint['metric'] - elif 'metrics' in checkpoint and 'metric_name' in checkpoint: - metrics = checkpoint['metrics'] - print(metrics) - metric = metrics[checkpoint['metric_name']] - return metric - - -def main(): - args = parser.parse_args() - # by default use the EMA weights (if present) - args.use_ema = not args.no_use_ema - # by default sort by checkpoint metric (if present) and avg top n checkpoints - args.sort = not args.no_sort - - if os.path.exists(args.output): - print("Error: Output filename ({}) already exists.".format(args.output)) - exit(1) - - pattern = args.input - if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep): - pattern += os.path.sep - pattern += args.filter - checkpoints = glob.glob(pattern, recursive=True) - - if args.sort: - checkpoint_metrics = [] - for c in checkpoints: - metric = checkpoint_metric(c) - if metric is not None: - checkpoint_metrics.append((metric, c)) - checkpoint_metrics = list(sorted(checkpoint_metrics)) - checkpoint_metrics = checkpoint_metrics[-args.n:] - print("Selected checkpoints:") - [print(m, c) for m, c in checkpoint_metrics] - avg_checkpoints = [c for m, c in checkpoint_metrics] - else: - avg_checkpoints = checkpoints - print("Selected checkpoints:") - [print(c) for c in checkpoints] - - avg_state_dict = {} - avg_counts = {} - for c in avg_checkpoints: - new_state_dict = load_state_dict(c, args.use_ema) - if not new_state_dict: - print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint)) - continue - - for k, v in new_state_dict.items(): - if k not in avg_state_dict: - avg_state_dict[k] = v.clone().to(dtype=torch.float64) - avg_counts[k] = 1 - else: - avg_state_dict[k] += v.to(dtype=torch.float64) - avg_counts[k] += 1 - - for k, v in avg_state_dict.items(): - v.div_(avg_counts[k]) - - # float32 overflow seems unlikely based on weights seen to date, but who knows - float32_info = torch.finfo(torch.float32) - final_state_dict = {} - for k, v in avg_state_dict.items(): - v = v.clamp(float32_info.min, float32_info.max) - final_state_dict[k] = v.to(dtype=torch.float32) - - try: - torch.save(final_state_dict, args.output, _use_new_zipfile_serialization=False) - except: - torch.save(final_state_dict, args.output) - - with open(args.output, 'rb') as f: - sha_hash = hashlib.sha256(f.read()).hexdigest() - print("=> Saved state_dict to '{}, SHA256: {}'".format(args.output, sha_hash)) - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/benchmark.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/benchmark.py deleted file mode 100755 index 53fe1c88c..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/benchmark.py +++ /dev/null @@ -1,606 +0,0 @@ -#!/usr/bin/env python3 -""" Model Benchmark Script -An inference and train step benchmark script for timm models. -Hacked together by Ross Wightman (https://github.com/rwightman) -""" -import argparse -import os -import csv -import json -import time -import logging -import torch -import torch.nn as nn -import torch.nn.parallel -from collections import OrderedDict -from contextlib import suppress -from functools import partial - -from timm.models import create_model, is_model, list_models -from timm.optim import create_optimizer_v2 -from timm.data import resolve_data_config -from timm.utils import AverageMeter, setup_default_logging - -import models - -has_apex = False -try: - from apex import amp - has_apex = True -except ImportError: - pass - -has_native_amp = False -try: - if getattr(torch.cuda.amp, 'autocast') is not None: - has_native_amp = True -except AttributeError: - pass - -try: - from deepspeed.profiling.flops_profiler import get_model_profile - has_deepspeed_profiling = True -except ImportError as e: - has_deepspeed_profiling = False - -try: - from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis - has_fvcore_profiling = True -except ImportError as e: - FlopCountAnalysis = None - has_fvcore_profiling = False - - -torch.backends.cudnn.benchmark = True -_logger = logging.getLogger('validate') - - -parser = argparse.ArgumentParser(description='PyTorch Benchmark') - -# benchmark specific args -parser.add_argument('--model-list', metavar='NAME', default='', - help='txt file based list of model names to benchmark') -parser.add_argument('--bench', default='both', type=str, - help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'") -parser.add_argument('--detail', action='store_true', default=False, - help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False') -parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', - help='Output csv file for validation results (summary)') -parser.add_argument('--num-warm-iter', default=10, type=int, - metavar='N', help='Number of warmup iterations (default: 10)') -parser.add_argument('--num-bench-iter', default=40, type=int, - metavar='N', help='Number of benchmark iterations (default: 40)') - -# common inference / train args -parser.add_argument('--model', '-m', metavar='NAME', default='resnet50', - help='model architecture (default: resnet50)') -parser.add_argument('-b', '--batch-size', default=256, type=int, - metavar='N', help='mini-batch size (default: 256)') -parser.add_argument('--img-size', default=None, type=int, - metavar='N', help='Input image dimension, uses model default if empty') -parser.add_argument('--input-size', default=None, nargs=3, type=int, - metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') -parser.add_argument('--use-train-size', action='store_true', default=False, - help='Run inference at train size, not test-input-size if it exists.') -parser.add_argument('--num-classes', type=int, default=None, - help='Number classes in dataset') -parser.add_argument('--gp', default=None, type=str, metavar='POOL', - help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') -parser.add_argument('--channels-last', action='store_true', default=False, - help='Use channels_last memory layout') -parser.add_argument('--amp', action='store_true', default=False, - help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.') -parser.add_argument('--precision', default='float32', type=str, - help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)') -parser.add_argument('--torchscript', dest='torchscript', action='store_true', - help='convert model torchscript for inference') - - - -# train optimizer parameters -parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', - help='Optimizer (default: "sgd"') -parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', - help='Optimizer Epsilon (default: None, use opt default)') -parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', - help='Optimizer Betas (default: None, use opt default)') -parser.add_argument('--momentum', type=float, default=0.9, metavar='M', - help='Optimizer momentum (default: 0.9)') -parser.add_argument('--weight-decay', type=float, default=0.0001, - help='weight decay (default: 0.0001)') -parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', - help='Clip gradient norm (default: None, no clipping)') -parser.add_argument('--clip-mode', type=str, default='norm', - help='Gradient clipping mode. One of ("norm", "value", "agc")') - - -# model regularization / loss params that impact model or loss fn -parser.add_argument('--smoothing', type=float, default=0.1, - help='Label smoothing (default: 0.1)') -parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', - help='Dropout rate (default: 0.)') -parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', - help='Drop path rate (default: None)') -parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', - help='Drop block rate (default: None)') - - -def timestamp(sync=False): - return time.perf_counter() - - -def cuda_timestamp(sync=False, device=None): - if sync: - torch.cuda.synchronize(device=device) - return time.perf_counter() - - -def count_params(model: nn.Module): - return sum([m.numel() for m in model.parameters()]) - - -def resolve_precision(precision: str): - assert precision in ('amp', 'float16', 'bfloat16', 'float32') - use_amp = False - model_dtype = torch.float32 - data_dtype = torch.float32 - if precision == 'amp': - use_amp = True - elif precision == 'float16': - model_dtype = torch.float16 - data_dtype = torch.float16 - elif precision == 'bfloat16': - model_dtype = torch.bfloat16 - data_dtype = torch.bfloat16 - return use_amp, model_dtype, data_dtype - - -def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False): - macs, _ = get_model_profile( - model=model, - input_res=(batch_size,) + input_size, # input shape or input to the input_constructor - input_constructor=None, # if specified, a constructor taking input_res is used as input to the model - print_profile=detailed, # prints the model graph with the measured profile attached to each module - detailed=detailed, # print the detailed profile - warm_up=10, # the number of warm-ups before measuring the time of each module - as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k) - output_file=None, # path to the output file. If None, the profiler prints to stdout. - ignore_modules=None) # the list of modules to ignore in the profiling - return macs, 0 # no activation count in DS - - -def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False): - if force_cpu: - model = model.to('cpu') - device, dtype = next(model.parameters()).device, next(model.parameters()).dtype - example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype) - fca = FlopCountAnalysis(model, example_input) - aca = ActivationCountAnalysis(model, example_input) - if detailed: - fcs = flop_count_str(fca) - print(fcs) - return fca.total(), aca.total() - - -class BenchmarkRunner: - def __init__( - self, model_name, detail=False, device='cuda', torchscript=False, precision='float32', - num_warm_iter=10, num_bench_iter=50, use_train_size=False, **kwargs): - self.model_name = model_name - self.detail = detail - self.device = device - self.use_amp, self.model_dtype, self.data_dtype = resolve_precision(precision) - self.channels_last = kwargs.pop('channels_last', False) - self.amp_autocast = torch.cuda.amp.autocast if self.use_amp else suppress - - self.model = create_model( - model_name, - num_classes=kwargs.pop('num_classes', None), - in_chans=3, - global_pool=kwargs.pop('gp', 'fast'), - scriptable=torchscript) - self.model.to( - device=self.device, - dtype=self.model_dtype, - memory_format=torch.channels_last if self.channels_last else None) - self.num_classes = self.model.num_classes - self.param_count = count_params(self.model) - _logger.info('Model %s created, param count: %d' % (model_name, self.param_count)) - self.scripted = False - if torchscript: - self.model = torch.jit.script(self.model) - self.scripted = True - - data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size) - self.input_size = data_config['input_size'] - self.batch_size = kwargs.pop('batch_size', 256) - - self.example_inputs = None - self.num_warm_iter = num_warm_iter - self.num_bench_iter = num_bench_iter - self.log_freq = num_bench_iter // 5 - if 'cuda' in self.device: - self.time_fn = partial(cuda_timestamp, device=self.device) - else: - self.time_fn = timestamp - - def _init_input(self): - self.example_inputs = torch.randn( - (self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype) - if self.channels_last: - self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) - - -class InferenceBenchmarkRunner(BenchmarkRunner): - - def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): - super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) - self.model.eval() - - def run(self): - def _step(): - t_step_start = self.time_fn() - with self.amp_autocast(): - output = self.model(self.example_inputs) - t_step_end = self.time_fn(True) - return t_step_end - t_step_start - - _logger.info( - f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' - f'input size {self.input_size} and batch size {self.batch_size}.') - - with torch.no_grad(): - self._init_input() - - for _ in range(self.num_warm_iter): - _step() - - torch.cuda.reset_peak_memory_stats() - total_step = 0. - num_samples = 0 - t_run_start = self.time_fn() - for i in range(self.num_bench_iter): - delta_fwd = _step() - total_step += delta_fwd - num_samples += self.batch_size - num_steps = i + 1 - if num_steps % self.log_freq == 0: - _logger.info( - f"Infer [{num_steps}/{self.num_bench_iter}]." - f" {num_samples / total_step:0.2f} samples/sec." - f" {1000 * total_step / num_steps:0.3f} ms/step.") - t_run_end = self.time_fn(True) - t_run_elapsed = t_run_end - t_run_start - - results = dict( - samples_per_sec=round(num_samples / t_run_elapsed, 2), - step_time=round(1000 * total_step / self.num_bench_iter, 3), - batch_size=self.batch_size, - img_size=self.input_size[-1], - param_count=round(self.param_count / 1e6, 2), - peak_memory=torch.cuda.max_memory_allocated() // 2 ** 20, - ) - - retries = 0 if self.scripted else 2 # skip profiling if model is scripted - while retries: - retries -= 1 - try: - if has_deepspeed_profiling: - macs, _ = profile_deepspeed(self.model, self.input_size) - results['gmacs'] = round(macs / 1e9, 2) - elif has_fvcore_profiling: - macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries) - results['gmacs'] = round(macs / 1e9, 2) - results['macts'] = round(activations / 1e6, 2) - except RuntimeError as e: - pass - - _logger.info( - f"Inference benchmark of {self.model_name} done. " - f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step") - - return results - - -class TrainBenchmarkRunner(BenchmarkRunner): - - def __init__(self, model_name, device='cuda', torchscript=False, **kwargs): - super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs) - self.model.train() - - if kwargs.pop('smoothing', 0) > 0: - self.loss = nn.CrossEntropyLoss().to(self.device) - else: - self.loss = nn.CrossEntropyLoss().to(self.device) - self.target_shape = tuple() - - self.optimizer = create_optimizer_v2( - self.model, - opt=kwargs.pop('opt', 'sgd'), - lr=kwargs.pop('lr', 1e-4)) - - def _gen_target(self, batch_size): - return torch.empty( - (batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes) - - def run(self): - def _step(detail=False): - self.optimizer.zero_grad() # can this be ignored? - t_start = self.time_fn() - t_fwd_end = t_start - t_bwd_end = t_start - with self.amp_autocast(): - output = self.model(self.example_inputs) - if isinstance(output, tuple): - output = output[0] - if detail: - t_fwd_end = self.time_fn(True) - target = self._gen_target(output.shape[0]) - self.loss(output, target).backward() - if detail: - t_bwd_end = self.time_fn(True) - self.optimizer.step() - t_end = self.time_fn(True) - if detail: - delta_fwd = t_fwd_end - t_start - delta_bwd = t_bwd_end - t_fwd_end - delta_opt = t_end - t_bwd_end - return delta_fwd, delta_bwd, delta_opt - else: - delta_step = t_end - t_start - return delta_step - - _logger.info( - f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ ' - f'input size {self.input_size} and batch size {self.batch_size}.') - - self._init_input() - - for _ in range(self.num_warm_iter): - _step() - - torch.cuda.reset_peak_memory_stats() - t_run_start = self.time_fn() - if self.detail: - total_fwd = 0. - total_bwd = 0. - total_opt = 0. - num_samples = 0 - for i in range(self.num_bench_iter): - delta_fwd, delta_bwd, delta_opt = _step(True) - num_samples += self.batch_size - total_fwd += delta_fwd - total_bwd += delta_bwd - total_opt += delta_opt - num_steps = (i + 1) - if num_steps % self.log_freq == 0: - total_step = total_fwd + total_bwd + total_opt - _logger.info( - f"Train [{num_steps}/{self.num_bench_iter}]." - f" {num_samples / total_step:0.2f} samples/sec." - f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd," - f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd," - f" {1000 * total_opt / num_steps:0.3f} ms/step opt." - ) - total_step = total_fwd + total_bwd + total_opt - t_run_elapsed = self.time_fn() - t_run_start - results = dict( - samples_per_sec=round(num_samples / t_run_elapsed, 2), - step_time=round(1000 * total_step / self.num_bench_iter, 3), - fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3), - bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3), - opt_time=round(1000 * total_opt / self.num_bench_iter, 3), - batch_size=self.batch_size, - img_size=self.input_size[-1], - param_count=round(self.param_count / 1e6, 2), - peak_memory=torch.cuda.max_memory_allocated() // 2 ** 20, - ) - else: - total_step = 0. - num_samples = 0 - for i in range(self.num_bench_iter): - delta_step = _step(False) - num_samples += self.batch_size - total_step += delta_step - num_steps = (i + 1) - if num_steps % self.log_freq == 0: - _logger.info( - f"Train [{num_steps}/{self.num_bench_iter}]." - f" {num_samples / total_step:0.2f} samples/sec." - f" {1000 * total_step / num_steps:0.3f} ms/step.") - t_run_elapsed = self.time_fn() - t_run_start - results = dict( - samples_per_sec=round(num_samples / t_run_elapsed, 2), - step_time=round(1000 * total_step / self.num_bench_iter, 3), - batch_size=self.batch_size, - img_size=self.input_size[-1], - param_count=round(self.param_count / 1e6, 2), - peak_memory=torch.cuda.max_memory_allocated() // 2 ** 20, - ) - - _logger.info( - f"Train benchmark of {self.model_name} done. " - f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample") - - return results - - -class ProfileRunner(BenchmarkRunner): - - def __init__(self, model_name, device='cuda', profiler='', **kwargs): - super().__init__(model_name=model_name, device=device, **kwargs) - if not profiler: - if has_deepspeed_profiling: - profiler = 'deepspeed' - elif has_fvcore_profiling: - profiler = 'fvcore' - assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work." - self.profiler = profiler - self.model.eval() - - def run(self): - _logger.info( - f'Running profiler on {self.model_name} w/ ' - f'input size {self.input_size} and batch size {self.batch_size}.') - torch.cuda.reset_peak_memory_stats() - - macs = 0 - activations = 0 - if self.profiler == 'deepspeed': - macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True) - elif self.profiler == 'fvcore': - macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True) - - results = dict( - gmacs=round(macs / 1e9, 2), - macts=round(activations / 1e6, 2), - batch_size=self.batch_size, - img_size=self.input_size[-1], - param_count=round(self.param_count / 1e6, 2), - peak_memory=torch.cuda.max_memory_allocated() // 2 ** 20 - ) - - _logger.info( - f"Profile of {self.model_name} done. " - f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.") - - return results - - -def decay_batch_exp(batch_size, factor=0.5, divisor=16): - out_batch_size = batch_size * factor - if out_batch_size > divisor: - out_batch_size = (out_batch_size + 1) // divisor * divisor - else: - out_batch_size = batch_size - 1 - return max(0, int(out_batch_size)) - - -def _try_run(model_name, bench_fn, initial_batch_size, bench_kwargs): - batch_size = initial_batch_size - results = dict() - while batch_size >= 1: - torch.cuda.empty_cache() - try: - bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs) - results = bench.run() - return results - except RuntimeError as e: - e_str = str(e) - print(e_str) - if 'channels_last' in e_str: - print(f'Error: {model_name} not supported in channels_last, skipping.') - break - print(f'Error: "{e_str}" while running benchmark. Reducing batch size to {batch_size} for retry.') - batch_size = decay_batch_exp(batch_size) - return results - - -def benchmark(args): - if args.amp: - _logger.warning("Overriding precision to 'amp' since --amp flag set.") - args.precision = 'amp' - _logger.info(f'Benchmarking in {args.precision} precision. ' - f'{"NHWC" if args.channels_last else "NCHW"} layout. ' - f'torchscript {"enabled" if args.torchscript else "disabled"}') - - bench_kwargs = vars(args).copy() - bench_kwargs.pop('amp') - model = bench_kwargs.pop('model') - batch_size = bench_kwargs.pop('batch_size') - - bench_fns = (InferenceBenchmarkRunner,) - prefixes = ('infer',) - if args.bench == 'both': - bench_fns = ( - InferenceBenchmarkRunner, - TrainBenchmarkRunner - ) - prefixes = ('infer', 'train') - elif args.bench == 'train': - bench_fns = TrainBenchmarkRunner, - prefixes = 'train', - elif args.bench.startswith('profile'): - # specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore - if 'deepspeed' in args.bench: - assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter" - bench_kwargs['profiler'] = 'deepspeed' - elif 'fvcore' in args.bench: - assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter" - bench_kwargs['profiler'] = 'fvcore' - bench_fns = ProfileRunner, - batch_size = 1 - - model_results = OrderedDict(model=model) - for prefix, bench_fn in zip(prefixes, bench_fns): - run_results = _try_run(model, bench_fn, initial_batch_size=batch_size, bench_kwargs=bench_kwargs) - if prefix: - run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()} - model_results.update(run_results) - param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0)) - model_results.setdefault('param_count', param_count) - model_results.pop('train_param_count', 0) - return model_results if model_results['param_count'] else dict() - - -def main(): - setup_default_logging() - args = parser.parse_args() - model_cfgs = [] - model_names = [] - - if args.model_list: - args.model = '' - with open(args.model_list) as f: - model_names = [line.rstrip() for line in f] - model_cfgs = [(n, None) for n in model_names] - elif args.model == 'all': - # validate all models in a list of names with pretrained checkpoints - args.pretrained = True - model_names = list_models(pretrained=True, exclude_filters=['*in21k']) - model_cfgs = [(n, None) for n in model_names] - elif not is_model(args.model): - # model name doesn't exist, try as wildcard filter - model_names = list_models(args.model) - model_cfgs = [(n, None) for n in model_names] - - if len(model_cfgs): - results_file = args.results_file or './benchmark.csv' - _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) - results = [] - try: - for m, _ in model_cfgs: - if not m: - continue - args.model = m - r = benchmark(args) - if r: - results.append(r) - time.sleep(10) - except KeyboardInterrupt as e: - pass - sort_key = 'infer_samples_per_sec' - if 'train' in args.bench: - sort_key = 'train_samples_per_sec' - elif 'profile' in args.bench: - sort_key = 'infer_gmacs' - results = sorted(results, key=lambda x: x[sort_key], reverse=True) - if len(results): - write_results(results_file, results) - else: - results = benchmark(args) - json_str = json.dumps(results, indent=4) - print(json_str) - - -def write_results(results_file, results): - with open(results_file, mode='w') as cf: - dw = csv.DictWriter(cf, fieldnames=results[0].keys()) - dw.writeheader() - for r in results: - dw.writerow(r) - cf.flush() - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/clean_checkpoint.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/clean_checkpoint.py deleted file mode 100755 index f8132eb6e..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/clean_checkpoint.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 -""" Checkpoint Cleaning Script -Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc. -and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256 -calculation for model zoo compatibility. -Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) -""" -import torch -import argparse -import os -import hashlib -import shutil -from collections import OrderedDict -from timm.models.helpers import load_state_dict - -parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner') -parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', - help='path to latest checkpoint (default: none)') -parser.add_argument('--output', default='', type=str, metavar='PATH', - help='output path') -parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true', - help='use ema version of weights if present') -parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true', - help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint') - -_TEMP_NAME = './_checkpoint.pth' - - -def main(): - args = parser.parse_args() - - if os.path.exists(args.output): - print("Error: Output filename ({}) already exists.".format(args.output)) - exit(1) - - clean_checkpoint(args.checkpoint, args.output, not args.no_use_ema, args.clean_aux_bn) - - -def clean_checkpoint(checkpoint, output='', use_ema=True, clean_aux_bn=False): - # Load an existing checkpoint to CPU, strip everything but the state_dict and re-save - if checkpoint and os.path.isfile(checkpoint): - print("=> Loading checkpoint '{}'".format(checkpoint)) - state_dict = load_state_dict(checkpoint, use_ema=use_ema) - new_state_dict = {} - for k, v in state_dict.items(): - if clean_aux_bn and 'aux_bn' in k: - # If all aux_bn keys are removed, the SplitBN layers will end up as normal and - # load with the unmodified model using BatchNorm2d. - continue - name = k[7:] if k.startswith('module.') else k - new_state_dict[name] = v - print("=> Loaded state_dict from '{}'".format(checkpoint)) - - try: - torch.save(new_state_dict, _TEMP_NAME, _use_new_zipfile_serialization=False) - except: - torch.save(new_state_dict, _TEMP_NAME) - - with open(_TEMP_NAME, 'rb') as f: - sha_hash = hashlib.sha256(f.read()).hexdigest() - - if output: - checkpoint_root, checkpoint_base = os.path.split(output) - checkpoint_base = os.path.splitext(checkpoint_base)[0] - else: - checkpoint_root = '' - checkpoint_base = os.path.splitext(checkpoint)[0] - final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + '.pth' - shutil.move(_TEMP_NAME, os.path.join(checkpoint_root, final_filename)) - print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash)) - return final_filename - else: - print("Error: Checkpoint ({}) doesn't exist".format(checkpoint)) - return '' - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/__init__.py deleted file mode 100644 index 085412ed2..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) 2022. Yuki Tatsunami -# Licensed under the Apache License, Version 2.0 (the "License"); - -from .cars import StanfordCars -from .flowers import Flowers102 \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/cars.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/cars.py deleted file mode 100644 index da93940d1..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/cars.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) 2022. Yuki Tatsunami -# Licensed under the Apache License, Version 2.0 (the "License"); - -import os - -import numpy as np -import scipy -import scipy.io as scio -from PIL import Image -from torchvision.datasets import VisionDataset -from torchvision.datasets.utils import ( - download_and_extract_archive, - download_url, - check_integrity -) - - -class StanfordCars(VisionDataset): - base_folder = 'stanford_cars' - - urls = { - "train": "http://ai.stanford.edu/~jkrause/car196/cars_train.tgz", - "test": "http://ai.stanford.edu/~jkrause/car196/cars_test.tgz", - "devkit": "https://ai.stanford.edu/~jkrause/cars/car_devkit.tgz", - "test_anno": "http://ai.stanford.edu/~jkrause/car196/cars_test_annos_withlabels.mat", - } - md5 = { - "train": "065e5b463ae28d29e77c1b4b166cfe61", - "test": "4ce7ebf6a94d07f1952d94dd34c4d501", - "devkit": "c3b158d763b6e2245038c8ad08e45376", - "test_anno": "b0a2b23655a3edd16d84508592a98d10", - } - - def __init__( - self, - root: str, - split: str = 'train', - transform=None, - target_transform=None, - download: bool = False, - ): - super(StanfordCars, self).__init__(root, transform=transform, - target_transform=target_transform) - - self.data_dir = os.path.join(self.root, self.base_folder) - mat_anno = os.path.join(self.data_dir, 'devkit', f'cars_{split}_annos.mat') \ - if not split == "test" else os.path.join(self.data_dir, - 'cars_test_annos_withlabels.mat') - car_names = os.path.join(self.data_dir, 'devkit', 'cars_meta.mat') - - assert (split in ('train', 'test')) - self.split = split - - if download: - self.download() - - self.full_data_set = scipy.io.loadmat(mat_anno) - self.car_annotations = self.full_data_set['annotations'] - self.car_annotations = self.car_annotations[0] - - self.car_names = scipy.io.loadmat(car_names)['class_names'] - self.car_names = np.array(self.car_names[0]) - self.class_num = self.car_names.shape[0] - - self.transform = transform - self.target_transform = target_transform - - def __getitem__(self, index: int): - img_name = os.path.join( - self.data_dir, f'cars_{self.split}', - self.car_annotations[index][-1][0]) - - img = Image.open(img_name).convert('RGB') - car_class = self.car_annotations[index][-2][0][0] - - if self.transform is not None: - img = self.transform(img) - - target = int(car_class) - 1 - - if self.target_transform is not None: - target = self.target_transform(target) - - return img, target - - def __len__(self) -> int: - return len(self.car_annotations) - - def _check_integrity(self) -> bool: - for k in self.urls.keys(): - fpath = os.path.join( - self.data_dir, os.path.basename(self.urls[k])) - if not check_integrity(fpath, self.md5[k]): - return False - return True - - def download(self) -> None: - if self._check_integrity(): - print('Files already downloaded and verified') - return - for k in self.urls.keys(): - if os.path.splitext(self.urls[k])[-1] == '.mat': - download_url(self.urls[k], self.data_dir, - md5=self.md5[k]) - else: - download_and_extract_archive( - self.urls[k], self.data_dir, - extract_root=self.data_dir, - md5=self.md5[k]) - - def extra_repr(self) -> str: - return "Split: {split}".format(**self.__dict__) diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/flowers.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/flowers.py deleted file mode 100644 index bd684172a..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/datasets/flowers.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) 2022. Yuki Tatsunami -# Licensed under the Apache License, Version 2.0 (the "License"); - -import os -import shutil - -import scipy.io as scio -from PIL import Image, ImageFile -from torchvision.datasets import VisionDataset -from torchvision.datasets.utils import ( - download_and_extract_archive, - download_url, - check_integrity -) - -ImageFile.LOAD_TRUNCATED_IMAGES = True - -class Flowers102(VisionDataset): - source_folder = '102flowers_org' - base_folder = '102flowers' - source = os.path.join(source_folder, 'jpg') - url = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz" - image_labels_url = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/imagelabels.mat" - set_id_url = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/setid.mat" - filename = "102flowers.tgz" - image_labels_filename = "imagelabels.mat" - set_id_filename = "setid.mat" - md5 = "52808999861908f626f3c1f4e79d11fa" - image_labels_md5 = "e0620be6f572b9609742df49c70aed4d" - set_id_md5 = "a5357ecc9cb78c4bef273ce3793fc85c" - - def __init__( - self, - root: str, - split: str = 'train', - transform=None, - target_transform=None, - download: bool = False, - ): - super(Flowers102, self).__init__(root, transform=transform, - target_transform=target_transform) - - if download: - self.download_and_arrange() - - assert (split in ('train', 'test')) - self.split = split - if split == 'train': - downloaded_list = os.path.join(self.root, self.base_folder, "train") - else: - downloaded_list = os.path.join(self.root, self.base_folder, "test") - - self.data = [] - self.targets = [] - - for i in range(102): - for file_name in os.listdir(os.path.join(downloaded_list, str(i + 1))): - if not file_name.endswith('.jpg'): - continue - self.data.append(os.path.join(downloaded_list, str(i + 1), file_name)) - self.targets.append(i) - - def __getitem__(self, index: int): - - path, target = self.data[index], self.targets[index] - img = Image.open(path) - - if self.transform is not None: - img = self.transform(img) - - if self.target_transform is not None: - target = self.target_transform(target) - - return img, target - - def __len__(self) -> int: - return len(self.data) - - def _check_integrity(self) -> bool: - root = self.root - fpath = os.path.join(root, self.base_folder, self.filename) - if not check_integrity(fpath, self.md5): - return False - return True - - def download_and_arrange(self) -> None: - if self._check_integrity(): - print('Files already downloaded and verified') - return - download_and_extract_archive( - self.url, self.root, - extract_root=os.path.join(self.root, self.source_folder), - filename=self.filename, md5=self.md5) - download_url(self.image_labels_url, - os.path.join(self.root, self.source_folder), - filename=self.image_labels_filename, - md5=self.image_labels_md5) - download_url(self.set_id_url, - os.path.join(self.root, self.source_folder), - filename=self.set_id_filename, md5=self.set_id_md5) - - image_labels = scio.loadmat(os.path.join(self.root, self.source_folder, - self.image_labels_filename)) - set_id = scio.loadmat( - os.path.join(self.root, self.source_folder, self.set_id_filename)) - - self.classify(set_id['trnid'][0], 'train', image_labels['labels'][0]) - self.classify(set_id['valid'][0], 'train', image_labels['labels'][0]) - self.classify(set_id['tstid'][0], 'test', image_labels['labels'][0]) - shutil.rmtree(os.path.join(self.root, self.source_folder)) - - def extra_repr(self) -> str: - return "Split: {split}".format(**self.__dict__) - - def classify(self, set_, split, labels): - for n, id_ in enumerate(set_): - cls = labels[id_ - 1] - filename = f'image_{id_:05d}.jpg' - dst = os.path.join(self.root, self.base_folder, split) - path = os.path.join(dst, str(cls)) - path = path.strip() - path = path.rstrip("/") - os.makedirs(path, exist_ok=True) - os.rename(os.path.join(self.root, self.source, filename), - os.path.join(dst, str(cls), filename)) diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/distributed_train.sh b/PyTorch/build-in/Classification/Sequencer2D/sequencer/distributed_train.sh deleted file mode 100755 index 067bf21ae..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/distributed_train.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -NUM_PROC=$1 -shift -python3 -m torch.distributed.launch --nproc_per_node=$NUM_PROC train.py "$@" \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/models.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/models.py deleted file mode 100644 index 634ba360b..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/models.py +++ /dev/null @@ -1,35 +0,0 @@ -import math - -import torch -from torch import nn - -from erf.save_output import SaveOutput -from utils.helpers import rgetattr - - -class ERFNet(nn.Module): - def __init__(self, model, attrs, channels_last=False): - super().__init__() - self.model = model - self.save_output = SaveOutput() - self.channels_last = channels_last - hook_handles = [] - for module in attrs: - layer = rgetattr(self.model, module) - handle = layer.register_forward_hook(self.save_output) - hook_handles.append(handle) - - def forward(self, x): - features = [] - _ = self.model(x) - for feature in self.save_output.outputs: - if len(feature.shape) == 3: - feature = feature[:, feature.shape[1] // 2 - int(math.sqrt(feature.shape[1])) // 2, :] - elif self.channels_last: - feature = feature[:, feature.shape[1] // 2 - 1, feature.shape[2] // 2 - 1, :] - else: - feature = feature[:, :, feature.shape[2] // 2 - 1, feature.shape[3] // 2 - 1] - features.append(torch.sum(feature)) - self.save_output.clear() - - return features diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/save_output.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/save_output.py deleted file mode 100644 index 796370a03..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/save_output.py +++ /dev/null @@ -1,9 +0,0 @@ -class SaveOutput: - def __init__(self): - self.outputs = [] - - def __call__(self, module, module_in, module_out): - self.outputs.append(module_out) - - def clear(self): - self.outputs = [] diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/scaler.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/scaler.py deleted file mode 100644 index d26b27e1f..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/erf/scaler.py +++ /dev/null @@ -1,5 +0,0 @@ -class MinMaxScaler: - def __call__(self, array): - scale = 1.0 / (array.max() - array.min()) - array = array * scale - array.min() * scale - return array diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/generate_erf.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/generate_erf.py deleted file mode 100755 index 212ddcf77..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/generate_erf.py +++ /dev/null @@ -1,291 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) 2022. Yuki Tatsunami -# Licensed under the Apache License, Version 2.0 (the "License"); - -import argparse -import os -import glob -import logging -import time - -import numpy as np -import torch -from contextlib import suppress - -import matplotlib.pyplot as plt -import torch.nn.functional as F -import torch.nn.parallel -from timm.models import create_model, load_checkpoint, is_model, list_models -from timm.data import create_dataset, create_loader, resolve_data_config -from timm.utils import natural_key, setup_default_logging, set_jit_legacy, random_seed - -import models -from erf.models import ERFNet -from erf.scaler import MinMaxScaler -from utils.helpers import train_rnn - -has_apex = False -try: - from apex import amp - - has_apex = True -except ImportError: - pass - -has_native_amp = False -try: - if getattr(torch.cuda.amp, 'autocast') is not None: - has_native_amp = True -except AttributeError: - pass - -torch.backends.cudnn.benchmark = True -_logger = logging.getLogger('validate') - -parser = argparse.ArgumentParser(description='PyTorch ImageNet ERF Generator') -parser.add_argument('data', metavar='DIR', - help='path to dataset') -parser.add_argument('--dataset', '-d', metavar='NAME', default='', - help='dataset type (default: ImageFolder/ImageTar if empty)') -parser.add_argument('--split', metavar='NAME', default='validation', - help='dataset split (default: validation)') -parser.add_argument('--dataset-download', action='store_true', default=False, - help='Allow download of dataset for torch/ and tfds/ datasets that support it.') -parser.add_argument('--model', '-m', metavar='NAME', default='dpn92', - help='model architecture (default: dpn92)') -parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', - help='number of data loading workers (default: 2)') -parser.add_argument('-b', '--batch-size', default=1, type=int, - metavar='N', help='mini-batch size (default: 1)') -parser.add_argument('--img-size', default=None, type=int, - metavar='N', help='Input image dimension, uses model default if empty') -parser.add_argument('--input-size', default=None, nargs=3, type=int, - metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') -parser.add_argument('--crop-pct', default=None, type=float, - metavar='N', help='Input image center crop pct') -parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', - help='Override mean pixel value of dataset') -parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', - help='Override std deviation of of dataset') -parser.add_argument('--interpolation', default='', type=str, metavar='NAME', - help='Image resize interpolation type (overrides model)') -parser.add_argument('--num-classes', type=int, default=None, - help='Number classes in dataset') -parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', - help='path to class to idx mapping file (default: "")') -parser.add_argument('--gp', default=None, type=str, metavar='POOL', - help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') -parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', - help='path to latest checkpoint (default: none)') -parser.add_argument('--pretrained', dest='pretrained', action='store_true', - help='use pre-trained model') -parser.add_argument('--num-gpu', type=int, default=1, - help='Number of GPUS to use') -parser.add_argument('--no-prefetcher', action='store_true', default=False, - help='disable fast prefetcher') -parser.add_argument('--pin-mem', action='store_true', default=False, - help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') -parser.add_argument('--channels-last', action='store_true', default=False, - help='Use channels_last') -parser.add_argument('--amp', action='store_true', default=False, - help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.') -parser.add_argument('--apex-amp', action='store_true', default=False, - help='Use NVIDIA Apex AMP mixed precision') -parser.add_argument('--native-amp', action='store_true', default=False, - help='Use Native Torch AMP mixed precision') -parser.add_argument('--tf-preprocessing', action='store_true', default=False, - help='Use Tensorflow preprocessing pipeline (require CPU TF installed') -parser.add_argument('--use-ema', dest='use_ema', action='store_true', - help='use ema version of weights if present') -parser.add_argument('--torchscript', dest='torchscript', action='store_true', - help='convert model torchscript for inference') -parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true', - help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance') -parser.add_argument('--attrs', default=None, nargs='+', type=str, - help='select layers to output features') -parser.add_argument('--result-npy-dir', default='./erf_results/224/npy', type=str, - help='path to save npys of ERF') - -parser.add_argument('--seed', type=int, default=42, metavar='S', - help='random seed (default: 42)') -parser.add_argument('--num-batches', default=32, type=int, - metavar='N', help='number of batches (default: 32)') - - -def generate(args): - # might as well try to validate something - args.pretrained = args.pretrained or not args.checkpoint - args.prefetcher = not args.no_prefetcher - amp_autocast = suppress # do nothing - if args.amp: - if has_native_amp: - args.native_amp = True - elif has_apex: - args.apex_amp = True - else: - _logger.warning("Neither APEX or Native Torch AMP is available.") - assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set." - if args.native_amp: - amp_autocast = torch.cuda.amp.autocast - _logger.info('Validating in mixed precision with native PyTorch AMP.') - elif args.apex_amp: - _logger.info('Validating in mixed precision with NVIDIA APEX AMP.') - else: - _logger.info('Validating in float32. AMP not enabled.') - - if args.legacy_jit: - set_jit_legacy() - - # create model - model = create_model( - args.model, - pretrained=args.pretrained, - num_classes=args.num_classes, - in_chans=3, - global_pool=args.gp, - scriptable=args.torchscript) - - if args.num_classes is None: - assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' - args.num_classes = model.num_classes - - if args.checkpoint: - load_checkpoint(model, args.checkpoint, args.use_ema) - - param_count = sum([m.numel() for m in model.parameters()]) - _logger.info('Model %s created, param count: %d' % (args.model, param_count)) - - data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True) - - if args.torchscript: - torch.jit.optimized_execution(True) - model = torch.jit.script(model) - - model = ERFNet(model, args.attrs, args.channels_last) - model = model.cuda() - if args.apex_amp: - model = amp.initialize(model, opt_level='O1') - - if args.num_gpu > 1: - model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) - - dataset = create_dataset( - root=args.data, name=args.dataset, split=args.split, - download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map, - ) - - loader = create_loader( - dataset, - input_size=data_config['input_size'], - batch_size=args.batch_size, - is_training=True, - use_prefetcher=args.prefetcher, - interpolation=data_config['interpolation'], - mean=data_config['mean'], - std=data_config['std'], - num_workers=args.workers, - crop_pct=data_config['crop_pct'], - pin_memory=args.pin_mem, - tf_preprocessing=args.tf_preprocessing, - ) - - model.eval() - train_rnn(model) - # warmup, reduce variability of first batch time, especially for comparing torchscript vs non - input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).cuda() - model(input) - - random_seed(args.seed, 0) - segment_ps = [] - for idx, (input, target) in enumerate(loader): - - if args.no_prefetcher: - input = input.cuda() - - input.requires_grad_() - - # compute output - with amp_autocast(): - outputs = model(input) - - ps = [] - for output in outputs: - output.backward(retain_graph=True) - p = F.relu(input.grad) - ps.append(p) - input.grad.detach_() - input.grad.zero_() - segment_ps.append(ps) - - if args.num_batches == idx - 1: - break - - for idx, p in enumerate(list(zip(*segment_ps))): - p = torch.cat(p, dim=0) - s = torch.log10(torch.sum(p, dim=[0, 1]) + 1) - s = s.detach().cpu().numpy() - - os.makedirs(args.result_npy_dir, exist_ok=True) - img_size = args.img_size if args.img_size else 224 - np.save( - os.path.join(args.result_npy_dir, - f'{args.model}_{img_size}_{args.attrs[idx]}.npy'), s) - - -def main(): - setup_default_logging() - args = parser.parse_args() - model_cfgs = [] - model_names = [] - if os.path.isdir(args.checkpoint): - # validate all checkpoints in a path with same model - checkpoints = glob.glob(args.checkpoint + '/*.pth.tar') - checkpoints += glob.glob(args.checkpoint + '/*.pth') - model_names = list_models(args.model) - model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] - else: - if args.model == 'all': - # validate all models in a list of names with pretrained checkpoints - args.pretrained = True - model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k', '*_dino']) - model_cfgs = [(n, '') for n in model_names] - elif not is_model(args.model): - # model name doesn't exist, try as wildcard filter - model_names = list_models(args.model) - model_cfgs = [(n, '') for n in model_names] - - if not model_cfgs and os.path.isfile(args.model): - with open(args.model) as f: - model_names = [line.rstrip() for line in f] - model_cfgs = [(n, None) for n in model_names if n] - - if len(model_cfgs): - _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) - try: - start_batch_size = args.batch_size - for m, c in model_cfgs: - batch_size = start_batch_size - args.model = m - args.checkpoint = c - r = {} - while not r and batch_size >= args.num_gpu: - torch.cuda.empty_cache() - try: - args.batch_size = batch_size - print('Validating with batch size: %d' % args.batch_size) - generate(args) - except RuntimeError as e: - if batch_size <= args.num_gpu: - print("Validation failed with no ability to reduce batch size. Exiting.") - raise e - batch_size = max(batch_size // 2, args.num_gpu) - print("Validation failed, reducing batch size by 50%") - except KeyboardInterrupt as e: - pass - else: - generate(args) - - -if __name__ == '__main__': - main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/hubconf.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/hubconf.py deleted file mode 100755 index d0eedab1c..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/hubconf.py +++ /dev/null @@ -1,6 +0,0 @@ -dependencies = ['torch'] -from timm.models import registry - -from models import * - -globals().update(registry._model_entrypoints) diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/BiLSTM2D.jpg b/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/BiLSTM2D.jpg deleted file mode 100644 index 942aa712efb897dc3cbe8163f84c29ecb50b576a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 73526 zcmd42cU05q*Do3?f&$W;l2N1!8I@j=5eJYUARR)?edk!2tboj_op<}`##{~k^lc0kK4jnpp_|TCfhYt&+?+bnh z9hNvEdFI09qf(AHgwBRbUwrW7`7srX*Y9PV2HEE>-HdoBEFvo>ub`-^rmmr>rKfLT zXk=`1#H_!XBSsDcMng5-!1>!0fDHX$f)R;SajUIM~O+vxW_4}**Up+`2|n^ zEPO#IBbJk1k}KZS)=}#l-ZnP1(>pr5y8r6w9U2}P9UK4rg~4Ra{yjIpu(-6$+5Gl> zi@Oc{*!jn<1E53y?y`Rw_J6ZW;DiGQ4<9~sSm+{7_G$`;HOrjtRH{kjOR%$6cXuzN_dzVN zTBcg-8-U0%tvQ0ZI2t{D5KwPdKKq!`@Q7zzy%N(93zNWD2YGc~TvYUFOTy?fylEOE zW_HXy_PR)!G2z2kraXyV*`vl-9s=CPN7n-SQ=OxNA_ksVmDt$SWNf~F+XY!I=c~8o z<<&%g$F6>v!Kbu|(Uh%c_Cb&FlDC0c5=zdXk3G?Kk9TPw)ZSgh{Jvw57hWFNCQXf) zALyLAz@8_lkqMBDwclR1a*#NRp1E_!*f2n|!@7^&B%~A3-FbUuH09t8nL+cl!6}BP z4$8_ggQy!tljZArX=}FC4L8kCRvO0nv^}f5^vi$LiT{IMzeK(7Ey7^9{4gfsdcO zy5F4OY{q^2-ji^*&D8xd@{-Hll_%z^PGNsm^Ip4Uk@Q*}vMHTeB~?%f6V#FxRWY|9 z_L$Y+_XT{-oqZ4xs1v|X+7tNwU}z;|E4g9s(9k~UUAZ5>{w8cuGK(+t^FAo~;TU!i zISgZ~Eoc2#5%9`w75)Q_eb7v#`2Tg8m-`@(VC*Mz2v6k3fAT7qdT8bV-3^#!LuRet z+ngP4xdiSKPn?#Z$+o3PPA#}&#Yr9FLMzHrAt6!ut;0uOGOmyI?<{pY4R(eTu*K9*vC!`3|{ zAl>Wj`51BEDHA`9{~?EEKBMAynu`_m~;1$uZ9U+}3*RT_Be7E_B zGZw@B=kwKt`;KmvC}#9(x1AfB7bwoUFJ8XvhG5=?w+8x&=vtt{?d22a~%?F?DI;)u$!CNmpHqChMl4`eS`;%CQEpAo z74sII$UdEKsTL*hCuH|2d*OB+IBGa8a@)?&qHG9dJq1XUj`haGY^!Lh>qHF(?TyAh zUrIO*G%}H=(cP@9hf)3`Dx**-pnzT~JGQ>zZf|!9LE~7{@gf+9;q?}Xc{MRE{N)k{ z9oeOL%RSPEff`mL9ND=X7ValQdmf!}%^dEs=$h9UezKUA_u~b5jC`3)lFnHmX`h=T zxT7XDb5-Tr9Z;P;VNtnrLjC2LocrG!e8jJPdm;Jw#OohxO4rUz(NHO#jzo!txD1~` zg?9{vJexjH__v?T7873_I-d}4HGC|#rtqel3+mUnDL?lptGfot{!S6^q&^(^CWXyw zHR~@8D0mz8rKFh~5Ys=cyXt2apmzW<&u5G&eJSa{In>8DKRpvT#XRd#mKUiI&G7fQ zwTWp@owHXu@EBzf030u3T7vf=xn z5LANn7*8`~R=$f&T9PwqK{E0Y=6}hJ-SeL!ff=m)W_jfQks*a&O zJ1d!H;AL5b0#6j)Zk>gE*_g%BH;zp$WrWCll{<;1cPk(KWOi|WhY?sl{G31fjVgWj z4xzNJ=3yl+y~KuzKi}xDh;GpdN_DUG{g{Uuux_4vvvQMUbPveAcfyvwcCtAIL`$@s%4w3#t0r)0An*y|*g#uidONc4W_j(rxn{ApB(*3R+xt>*D9QEfX-30kUSIH_MT zdm-_DvGZJUVey6$J-@JaJ~ythtjO(QZj&@2T0N@KDJHakk{DP`@m)jRDq4)T)wAF1 z%I@0y#r1ZEk0&$W^4aqh!hwBVz00Q#{`EfD<lah0PAPNE>2tKt=~fIW-2x(uw{lP}L+)9M-@*!-0~O*iQ!f7J|Dz26oa zUn}OBvO`lQt6NLhHFwFJ(fAR6wMVa|wPS6vVH6+P9_@b~s+AVfm4}FYxVGk}dak9$ z&NL)uY7Kwc{S)5fKO_45TMPqV}1ZQS;;lbZ~B{U5TVuTmY$zrK@W+j^}&?jN^0^gGW4#jZ; zf@6G@IR$C@c3$>%Kx`!%<`kcGEO?xzN6osH)_%__rE0N2u&WHKjNp}z(&1^st~VF2 zXPh@zl$rJHcX_d>>uwS?t6(ztlHw#%G#{<1(PQy-fc(;x8^L|`FvG|qDCFCfC!TLR z)jyQA{!OU4gqn!#NVt^HXEL4<0-Jf>CPf0Y{a<1tIOp3$w`#uZb{f=kciK|*rX|o2 zE(Z9u-ViCwbB#WVCkKuzT6c;HIo96ygFm7d6wa+D1va%vGCq4CCZca}2Fo{7k4ugz zDl?4U8q5lbJZ-TYy@#TzYDqcd+XrcdgzvAYXH|BY)7Up?Qmzt2!`h+I; zVR9BHjk(dWJI#ogW$8X)%g&EGvcuLcvBNqmXS0MbXNr(O63gKcLx+^8hm}JYE^r@? zC8*g(#xk2J$6YywN#ip@^npW+<$FHFe2V@$?&arTL@8B;KKkMnJpHvz#nbbaFn{Nh z&#mNBOtMKw5;LT8M+m}up;UJTLnRxdE)|kSyt1{sjU!pv)Bcky_);ntt~6=oI~Gyf zRBLxG1}e3eNBR^M3{G*w9;1Ap&V0QOdT#|i$GeO@eTA?z$h(|2(~!Ckx)b)eAvRJy zWdwVtlfCT)TwwL>xh>G2AgiB5mIIHj(~4{jcakP-O0LD4mB>8~JHD4?mGx#mp>IWR zb6Hc@A!#gAt@tJ>)L+9=)AOu9Vk4*YJHJwXb8>Un(n1r}aPeh!YfV(%YKHisgg!f_ zogp);BgBw-9)awd-HpO{bK25*@v43*`mMm>?mT82Qr|~f_RQ3JQ6BV?C;v; zdfK($HRyepceQTO_3LS5HD-X^`p{gRs&rw*ppCI-UlH>KNwiv~J+@mwmrmuNgxYT`QUBsrf|TY6?BE#%G*#fY>qQyaBH50~(r52O#pJ^}jY z)*HQl6fm16?`(bgb~>0F1*9-*KCgeX7&dd{O z9_J)9GpB{TI$xlJltONM*@<5t{Jvsm>sffb`RSeWh0BJ15iyy%>EDiMUJBN-IlSTNfoc6hr>z7zPWgF?2sjJ%|7>XaVWagZL)9`K9_mB)feY!5JyCm`#4lD_(h~%9a#4^Dg>(#hf>``<)tGt;F zbKzuV{0;c&jCbszF#2)NzD$3<%nEU~-v=empdEe@yxOl4n?9#s^eDaet$O)4_UmFT zH;unZqz1KLtcCHIJ!8!v%)%R%cdm}Ky@FodU% zVpa|)qD(qRrj^a?Sl~3jYd{7GJ++LK^jPA?nDkFQXNk&n_-Ynh9XaiN&3n1G{$j~J z{R>fAzmdtkNcY5Ga$`kAs)xrF@p~7`8kIsdEv`Szj(AyPX!5#P)lHHhJ;6oUnZBg% z);&iXElvbmWe2qIWcP}m0s5Q(t|LeM_XIglKv$vdws?xK#NM+O8^V&w{bT|QsgMjD zsQ_Sn*-Y{say)BVRy3%OrL$g8y%G_0kJ8#don}@9_>ED|wa$0ULSN&$+bOY*RUwK` zufpxRqy5duz4 zhO%S}M;3>Ax%8^FpPQuGQ9gDKqSKi%)S=swd)?ROc|2OxqgzKFA9yjujT2dQilcYN zDc8H=dIW~-(_1{W@CMOn*`=fVb!@DMw;?gNb>S%@`eo2X9)?OThyO8j>%R}oh#azw;()% z*E0X*Y`%qkES&u#L_2c3?EGZy@7nq0LV{j)^ylwH^_E7bu+gr5-um6KzPmCD1Mb2W zk@0;`G-1X@UwVEtzrpdekq5D#UYGBKrX}}59LB)zr33q*U+VY^{M*RSUyyqT9Cz_Q z{vTqn+MmJSk2|bm=fVG93E>Z8-&;k2Q~45mg;1@aeNZix^eEw!19Kn|rjT$Jy)qm6 z8qEDjWv~1Kgv^+3NHI`&@fOJ*tzbTs)4wW8L?PxY&-p5|b@TkHf&BGp#P|*N04_S? zrGK0sS;kDq*!OzsLnpYsJ)?Ose=YPoLYq2}(!&s0vkP9OvKLscZS=qv^WCdJ?E5JdPC+tpcvUYfHQ|((4^%uYL8kH3_kLSDPG~Dr zTRS=HeS$HyaD9V5WiYsE2n1grk4npuLR+qW7mXRV7(83T`6v_I!5BdOOA^<6VQidTPDA)&O4(|Q=yC8@lu#x+qpa1_L zH#BUf!1PfJk1h{X^e6W2^(EF@m%?`D^k}!91hnP%l>VS-{`M-@f5288F1e&C{dcQU zGg3{kf1gk3H7+s(hYJ}&!adkz`*oOFW?@t2r~jE42#o}M*aJ)fu&Xzz(p9PsV0VpI z67<;(>DWL~Nc+@zz=vrk=QNBlegQPIYBF_Nu8>fQ+~+q8C}rsGj-9X^p|t}jl8+Z7 z7%78g8l~=-J~g1+urEHBJ0~I*UJ|X~oEoI%s{SFju&KC!GZYhTydL9Dag)q5a_zDz zUSj&2L@(RGBS;oSk8+(I^NdVev{7$Aabs#tCY1y&h7fa<{Q|(w;MzrnF+u`8PXZ3_ zRtOk<_U)&%6YPexvQnfhI=KB|pXKl9SUO)le}3NKRoJk4sfc)Km}A1z90a%@;iSs_vqeTn{J*fKG& zRGM?RK~6Hb^z<;azbt6T;nMV%uX@!UE*jNdj%AHEnM%$N=MrfeR()6ds@$?Fgl}@3 z7UuuaK+B3^A2bR!_wdRt+ko6FJKq?f-D}_$cpvvp{P!If#3@DOdsyPVJIeBG-&*!G z{qARsAv>hlR~d+789-Rz1A*Zai-YAkGp*4*_XFC;4o+VgmCvWWF3t{g39LlqsrCuk z8<1rBgfZ6(N7z-)^d*hXe&-Mf3tgph|!NLr0w%T0gOcPj0$(ck!=$WzZ;oVT5-&=q!6}7c-8C zA{ELgYK;+X5MtR>Bb42S9jXlM0g2xFg@Y#bW9-j-b9J4&HwxBA^Yn>v!EWYSu=`Pd zXZP^X1N77B*Leb0OCyl<_>D(}x55RUz7@VSeT-@uJwlWD{37tVK!;^*70UL5wT=Y& zX7})L;$QNAbETv9`gJs$Y`5bC;Db7$9+G&7>0};3wS#p@m!wez3Aj1Dr8-d zEshu0&}^3&2BMYjarFvIr@eI4-AAn!(1+6HY8$iWy#tQdB;XDSJ(yZJFwKc{)@}g zTxij3NgLf3o3zj_6=XszeM02@)W(thlYm?{!Q<-4W~!(fbitJ&NMXK=jYSlhylccH z2I;!IC@a{7)N>OCve#5?depwUYvpD~YQ^Es32Jn=JV=N$*r7=J`Qu`8ug(1N6DuZD zLxU0dXDpoHm$F~Z3fshiJO07IxH9ZmoSHm*l>jO>+S}g1I_$2r zKLOO;fYIj|S&sO-Mog?f+u4m>Uo%s4Ac&E-HqO&hQd(I%-hn8|ED%slCs6JQsvHO0 zosCHHjzRuy;z0|MY%7TK$-V-0G=3 z?_QB|Xf}CGB{^SBo@m4JGa9LsCvGL@J}a)Uho3C+&74x%E6|}1q%#Jx_(~YbVg4x; z@(>?{F>M$OYrDOnv^1@to1loXA8qkrPK)tQa~2+$S+j>G5aTVHESSV%p*@a0FoVUJ znUjOYn$i`M%4?aX!L1H-?&h??X_M-x#fqSeW1gQ1D_56|t@KF3&MgGZbL7EvFZ-pg zbv3i6@^Zx2M*Oy(z3y+CzGR1yQCv%BjQpR06o?QgT96g6D*bN_K zJ7cURL3FXHjKT0o_yAP{?h8DWGpoIkW|Ex691v%qE3W~jEaW3ByTTvHQ~zBLe5{`Y zly}>VnQ04v>RnrQ>&KFs%jXp>`-xw(@JCtGZ|u4BUa(uv%Mrg1WOzX>w>Z@x+ukhI zApZE5+u_AGwo)vufX}Wwd-^!1;;+hQ<+H+8?^jj$iaFw`xf?=pYN-s%g;gfUnYG=H zJb_l9*?t$MfKlBe?1TKpGZhpDb=9Sj*Dj0c~FR{!<547(HJ>cCCBeSR9HzoRNvyn*($aelo&iUAT zdgN*VNe@lk5yEXUNSXXg1z!>>4U?qkH`G{~Z_5^`cloL^Y2P2QT_6vfDD5 zV}g)s=$zI?Q)K(805CdDCv)4X#LVV7+HuyJJ@%w3Z4zd2%W7zC4gOHbD!s9h?1Wjq zP_p#?TNn6*fAogNUhaBoh&2)IZ6Lr3ySD0)AJ3$4wh_0uQ`uUNiS)*Rxg=uoOk z<%hQh8W4e!Uccu1UMv2i?UigAbOBw-C6_OhuInFXrD@g6)QZ=2nVZxp~2joefAg9)~ zN$Qv#P;H?drg9cX>b#!J9uGTWi$bX5~H8>gKDU>-Rv3|@>eNdvca7>?)#Cvz$R8H$eA z`+n9HaznBrwxOFhcMz{k4jgj|9h18#Hh-}JWEZj z1zNLaj?)Qx!c}JQ$3HV{zwCOop?dlTV*BHZvpVp&1R*x3nUj^o6WuinOt=Unaj@L` zEx-d-O$tA@_xdYK&EyI+u8RlZWW6Q4Pmp;%ejK zNCaHxEnVE$lQrTrPgqlGHZSV*)nSt!M2T8=CKz%IaV@8r_m#nQ%lva3H)=T$|H}|% zTkAf-aZEqH$lRps^eTD&>(^h4{&bJID)T8Pw8vnoN4u@@RLIN9p0fP4y6hjde`{v} z8o1Tk_;Y_?OgQctG_&5Sp3?)X3+!0dLU)#^-PxXVzknYjPhezNu;hBjp7Zuke3nZu zym}Lv>{WuhAM|SLUgs5FL^^m&(c>FWr9CD za~sEh%e#zo9$Oa#q&6Jm?)jtBR1fqDw_wl=b*Fh-JBfW~^F`PCO)YL4s-|+g(k>1X zxDV3s;;5_`GBN_@_lmJ5DSV;F{K0*Y%swaxpNX#>@6LLp4BktN&Jqhk%0W-?905Pl z3RD~T!MnaqiZedNIuv8e5Puvv+UVQiT z@MK(Ik9Ti8EKH&od7+t4TJR^A`H~H8e^k^R=Z?8KLP}H{uO@CTS~l#CeoJiowa+7^ zEa$n7B;zi&$m$KBF{L;Bi}KHV$!);{VlQMosiLP6`e71L^?gTBRnLIvU2CAAGiTzt zztEbmJx!(2!uEf3Yk6NPmNC#c@{k0yA=F^UWN@(I`nq>z2k5vjeLwa}m5 z0FP#~q%p@@ORusHasL1uIDTBrAaAK`lVuyG!1HD!6I+BT%Ca^oV=1f>`@RpJHw@3l z$3J~ufUBs_f&r=ue2I{nLd)}Z%C&a|oBE_gU$Ck67qY$%y#9U~dzs@VK*G}Kic?Qm zF!2@H*HcR->yOBuD~bkWe_c#vtEuzw{M$FJ)EuF{xAmnSc%k58H#@!6VY{e%mu?N) zlo#GBFB$f|#@+{s@l4sU3^N~&oFk|0L0dqUtS|b~>=b_1;UH4bM3`8$vNnVPk^^AN z?j7pu#7l%y z(N5E)DW-}FhYx&-H@EK@tF$kXfmiDml9QsA$sZj_PGYUMmj)e!i$uPMUTliqt9?Dn zz<>a8QeU~l4YmXGbncFN))p*vM{f)-%2Nu_eZsLt_!JIwP%d7Lx)m=DUhdfpO1MD1 zfKJ^@@S<2bb~+I(5hG-6yHPiisFwM%j$YA<{sy!Pm(~fr#W6}I!u3@DDYcLB^!zu1 z;jr69+j74uV*lwXt~H(7`BzuLm* z3M2$!fgt`cw+3>rIN`(TS>+V*A=*LA@lQo`$_tDvU`G%PF1b^HZ|!zOj4?Z*bV`=f zokA#tNTLVm<-=Bqi>2C}!|qbu6d>)|u5L}GwOJ1~?(Q3-+b;*b>xp&|Jsj60Ui1C!~mXT*fyLlAg(Fs5c0bE; zjPUkPeeI&WJi17Rps^e(Z&cN^7-2Q4LQ);yZ2N1|ZdXlT7w7;(4W!h-f{$6$=m%wU z{~+;q4~3`?lIUSg8%ZG~h5}NXLq>>71kC7Y zUu8${gSyFPkz(Wuo>76_YukYnK$=g^`=$+^+BU;KKf|E9U@Rp3eg(Ckg;+Rtots%4 zIGpy3;H2aH@OpIP`qCHQiv{Nz8SrgHb8S%Gh0;!Uaw^4W_SE8t6UEKx9&0moHYme4iO{W&Zmn^>*yD!t-qi;orgy!)Hdvu`TpUp8sgc)s+}3VMN| zAddSDsl3{I43!Fo9pV{>l=J(LVi-$i+`3NN?Y)D2_JA`pB#eFu6Wcnfw=xQ7mN72- zW1ows%W)Z_CA?ChhPte7K_sp+4686ROl|TKwi(QL^?eRJW2qG$&h00yX4hs>V;!=; zu5Z)I%!M;NNZl=J# zSft@c7U4hVlDkWj$Xy|16DRM={20tfVNFv~ z{G}JfQ$v(%*ECuN6of|;wMV!f^(!t!`QE}Bp^sme)a8zihF-VV@zlFlCy~%Tb&%80 z9+q66-}VOoh<0X?e`-XqOAn@w*3|;G&(IB)Yb3{-yM>2Hcl6X=jLNIpUvwnLSA-9~9PoPi#ldImXDlNcHC{Dk zLR7FbvvXVNEv|5+mY|xuoC(nZ29u^W$%`aE$m(%HM+ZhLF%9xzx57?f?Ag_L76jKO z20gX(il;ot3ucBuIyZg$Sonj0Y5Q18SQU`a*ZF22gtDQakyqcm!F4$4z8H^@t2zC} zg+>%^&)%sEvV==7)!ZVQwEg3@hC{e-C7OgHLnjwH$0xj_|? z6PrE>i_MP+p-grdF=TAymk{VTM_+&1#5ma`>^K5@bz}pO4nkv=jv8Rd}7SrrWw|!NrUW&*N1R zU#t$sx^s}Lk6s8uK|v~C6r;OW0@VbpJFq7)=DX+6VIDvxw$-XRiL3UwC2Cqj5i&+@v;z7_uOB~c6 z9vCUW+W%Q>^9f;2%+b^*!AdR=8rH z`^`rrLnhd%j3Q)%DAp_AF0UIBuaaxpA*8T44uwGRmHCRw-mMB~5C;f`Z;n;nb4l1) z?6Uj^Z%a0ne-4~sKh&seOWvLq())`A1ke06J4}-ZN?jpiv|s-^Xu-f3j<{KSZ5*0- z%)1a%&6>6S<3i7>#?o#f!a1s7ok>)0)i2MT_P{`1-4B!Az}TxyRlSzn+p-In0D{}% z5q(7Wjp}T1AYkD41aC)d`yasNIWOM%w}8T7tblG+n4`1dYo4Keu@3y_WTEI_RVC+;sLz72TK8T&&DyDa*D zE`R(pRRmbss$}tDV5~Kr5&j8dJyx0=*#CO4nXd$%7d@PAQ&N85sqy7kr!GoeI zKmYc245Ne!SAO7E-f1>=FUtAsVft`ZxoQss*-KKfd(Qp?H2XnO*DIW_)$Jo!$> zT_E)v#Y#qdHo)HF<#_JTTk2XL>k6GHRu^;9I&FNc2;OUq)t%5Lv7i3i@bo6^;e{W* zM;ei}7zlNs1DQS)hZHLR#*>|j?Sd)WasNcBhh>Ht?$xsDV4E8pC#=H6Oq_>;BLJIY z?9CyW7;q<^Z@*362RRsjOm`48C)4YK3UWhaRF6L7CMYF?gTyIE>`EIE#ms-r9SU_} z6$MQ}@IEND4GfjWIKJlim#58?=$ILYafaWG%NjN~Vce;J^{f$JJ7M7U%q=t3RUGY+`MeozPxTi0lf%c8uj?MR*P~ zU}@g{Ebi}3BfMBxr$9|5Al9{!6O+Rca%0o7n4;ocChByGZ#-5BHGXgUAtt=?hYnNf zK9T+st=%~U+|;H7L&#J&CwTF5t*I}KyVgduXhRtpRiTY?rXhRMS8}d-G42IA&|+(8 zG+AyrRWI)dJltgteksq!IWKT3H#cY~V~ccs*H-|S+*_koy$(Y8#hzCTd(>i0_E69b;jLY9AEs0@ zjC5`{Bo~ZM%4mFxkM-3d^+l)cjQE|S)(%|^_F*)phkx?;%<>oBg;ktC$0}Tyi;L)6 zFltzz^ci^=W8bSo@ywVN|KiRn$TG8JIixD$)A`^)taMnwUVe)o^RM$!r{jItY9+G+ zUuHXylF<}vhtWG58{b>Q9N7h_f+G*^d*+2?_%7|N=bSZsd?P88LWvr8qK_049Y0FE zm3g%w*Y zl^9JpkAaTYIi&cWGl8Vx`LeRlWxCQ{0Fj*(BlhhYCZN}9GVY!?LQEmce{*>lM93it zyOQL@>|N!YBwbZe@f(&37+p>6)J*k57Lk7wT*;2tGw!{jkCPQk2kl)|EUNsiPL1q? zB3cD>J4Z!bjn#RPeZ30D}Nt2p|BQY@b#Iq;)7puV;}*Fl$UEkU>Z`bz(KoO6NB9c5+O>8=nBP3Pj^ z$Zu9BvvcF(&abywRkm#Y+dT>KpMcNVHq0=w$01Nz|n%9bywAxN!aXbX96Tqg4Jn~*)%V~D)Y<0O=!0z<{p-${X09^wD)#B*`gCbwO z6F!!n9T6!S5ayi%v}^35nOTQ<7luNg zzPgc!hIIt)c$`hp+?4Lu<_JPZ7Hz|8s*0K`U@LW}120c%Pzd@7QH_nn#${L?GLz>l z)GhuHc7zEG#O@mMpqy^{J}BK+Z-lS9m_IG%+(A1h7?`41GxPAtG&yvzJ3wLL6#%7Q zF9kv&oUknYN}Cq#3Ur$9keS@szzB~(hIJb0rf@{BcgvblrRf*-_@2$WDY9lq#T%K)hMC!SLi!gYn^304Yu6xZT{joup&mC zL#I!P1GdaoL+*p|7E4xGDhNu^Z@(QJUpNEHjh3 zsq@28Ymx7scui};m+_Y(!^1-za(*3ZL5xl=?}kKu`0=%*wc)%U{E{8 zqWtsd>25{!jCJT)0Nh!s-N=qEC(UG-(l^o0cB4Ggs`nBj=h8ad3|GSR#Pfw;ZjvKi zi;au0BH=cbdNq{w^_HZ5C%99@-Ee|PG1=5Z zYl6=qkMqw3Ca3^=teP~FwT86=yEjmZgTXuvOUWi_v@Oc^>MX2#cB^(2ync{L2 zUjgoykgZAIiB6MzBJ=V5$dR@Obr2CCVAu55de;WG)YQ48!uj#9nT7Rr6L#^Fc~_X( zkr9|J=d`)u?b`9Hr0VsZ7@I&yH~0ka{2sm~{`t!EoA4aw!dlww>0m452?1xQR_cc1 zEDMcQd3obz8{ShSr&ivF0|)eqin=-OYsXm-@jzcyz^fd-LrdX^^{zepwABN!_J}2V z_%@xH)wMpI(;IbCLCD@-Y;$2$Xhlz@g_6rCvQ)7L~+R<*a~3g#+%u4dT9M@ znz$+J?~8G5;%eWbcRyfM0atNG!i92&FMJJ19#;>SF)gZ-p!C&m`v=q&mf9e|p3=Bb z_1y|==RSy30$X8}$4Apn7*Ho)?SnRd#8+-_O}^S&jBexcqYJK#YdaVJmz!fy@OaxT zurS7Uj|hceTmd3;x7C?Gbq0uLJGV~>HxAN%0%qGAUTnV~Z<7hdc*+v}W(FhxY=07# zslon+`nHU{bFOvy%TrLJRD|tu#iyxMEwx##D8h03@YuAMbJ4=qKM29olq|SH65Pnl zTtv#TSo>yxBRo$lbyVhv{l_wOUQSa+lkbl5A+{+u0+`#?^We#Il>cPc3PP0})j>e` z-CKC)6WM;8bI59t3On?1T4qa-ahDiVI2J@^!uxYjsUoMesPYR6FCmaE6wrlcXJ#s& ze$SHGD|8*YOR%Uv?UYRtU<5vL%2CCzIL&h6x#9F-I1z8yD5@G_!twZ^dx7=a%7C~6 zYc@5>!C%v=f_t)TR^u{BVK_X35Zx1$C#MJ5xHrB^-ql7o&lF@}WU2w$M7!J&;LN=O z__N(az33gS^*EN#C6+@+n-ZGdt}J1@VjJQ%@eW~0dW=ygpcCc;_2x(Q2E>l1zu^GBq`z-ufo)SzofrPLb)M)z2DV5U3-rL)1)6j7h0liN|oXTwV8A$J>Fp z%Yy{_5_o>7ZdI`sJZK}|Ij?NEnR8lYCTvZUDHtvU*M_aGu<#FekR4eMX$OHYzn7Tb zKAFKdu=X99fezT?ov>6$s2Pm?twYE+`=j%dbPhJt~h-79<5 zX4n_#Rg6Er3lUxVoPGPa0P__v^7Rz(W1jIjr(gk1i*HYx2GN??rQ7dg9SFgO>hu^! z^7EPC47F{q)}~x-o12;%mQ~kA_G&KL=P?DenQ3p-SBMSWdcB+(pk8vyS61r%O;xub z4-qHgRPZd8cxL)k!T_T=STSSi_f}dfX%Ts1>uKHMIh|;Q+TpdCd>GW6dfu#9 zn<%KQI-RTP$9^smirEMKX;swl!YaJm+4XKjBwqDIQ@t$0T-~Wu*NlwxXmrcEG~qVY zCELhu;67!~4XpjitT})&W7m^|Iat>EBhEOpt;5GbF#Ot*)?d%AS>0gTxwpsAL^uVw z7E6-g(AO1(CG+*iT8^`_WCNQ99_Y2ndZAUEN2{nx>LaVGE@YO<*2{K5c&`d}emN(^ z)v)Z&4$pR#t1U__E0%6lYHD&70W_-s*Ffw9BV-_lVaRupI#CaUMUih z;prT$B3n|#IjzkN+#^R#Y=D7*>+DbhtJT(RkKA?}%s^fp;<R&+@D1(#0BXF3(o%T~Qw;ikC<8 z)aJ)(7?I!LF07L2)jAz*x>Ub>^6d;o4>t$>n|ZkkDxNWWb_AB;J^L z1l73xzd6i!=r8^;`g!FejhXjh%<2LN@4TQ%)yKD^WRb_jVYA>YHLs#%>nHBrhJ8QL zfXZW$Tx_Z#^uq5+7xDycszG_{PLq?MCWY}zHk|pTktOmaL6s&$dmq7w9hZGjEb)aP z9oYVRle#dwy}Gv>!E@v+WMBjCt9t?jwm8dwqZ=ZNF1XBbe?NTyqs?nOOsJ=PRZeA z$CoQ`$2<+^;s6_skKP{>hQedQZAkLdg-aCtr<u$}&Yjcf)mJ0@HIm~{2(!Rk+^Zpu&`$9nS--GjyBruumUya}6ifVZ;dkGY z?3#Ai0U*tqZC^4gei9wj-2v4Y#9ZMx(`mv(+6Xqw&6PgL`}O0)O07D zw*`N$$1=pbp^BfNih#+~0>Yrwv}tJF>+PcFjYKJ}c`aFczP2$uZ00)AzvDP*NJ| z=7?e%!M&uje|TOnXstGs6Z$4C616E9pQ($5f0!fX!z*%`sq`jzSzyK2wGA|_4}s9hBg5O);$aE&N^|Ky&wn>oncaF64M=LW*Qg3 ztT~Et{M5$OV(SeC3MiU@M4s1d*2xv9v;b9k-lD`Fpik*>Ldw&*;r#?`Xt3|;!RUgm zx6LLl%VSx!NuKcHn%tpyHAhHr3G#Y8e3&Gpz5DfbF=1BOWbY0Ctf#{+CJ<53?vRQUGt=JH0Sr44AOGYu z)5nkpTm02B{GRcS7qOmke&vgdLk)E{Qnp|xrM(1K;S!Vu$v9}JYr%mwsJ9PFaaR@^ zQ5}ErX|q%-c!fMfwl_leR1CWK`zu0+i=K?O6gMQsIiZNoPo^fN-z-CRb!{aq>NwOh z3OC9*Epbo78JbRyikTYDu0hVxs;6a&y=%9oCmiUC;%U6d7XxhEtmVMN1Ua5N zoAe(4cpoGm^1bFj7LG5+Daa^zhr9gbPBru=I!C+%s+{TYm@kut9LHRm@$ns}V?*<2 z-8{~J-?N|p3;mXC7q3+rtc@M>`k5mO@mB`QO!eK<-sT#s+f&^Knom>GnO|e(O29GIxg_eH4blG9Y;R$9vO=gTqQDB+zN(z zuzvm+D%x8_bdm|QwOfMMn6dD0f3)T}xJ`==Uq18@e!pM#x4rBO?cZh>a z7HeeTAJ8O6qHDy^1kcgxeNZ4o8oz40GI5&GmUyS`udsjyY%K#_|M4<68D6iZ7amP= zN~irFbiH{rTkHGxe};}))tZNv)2f;|ZKY;8)uQ5%iaCZHMM)GjRgiMpnn`_XJT>JQ zB7#suD1w>~ijs(VNKkVnEs;v({PyQrzx6z8J?s1Y>yN$c?0xV1zTVgC{l04Igl=MG z%)gdOuH0CNj3$vDWdjU)YL3*88jh|B1Pig#ShS2yNS_@} zn*8aAUho`_8Jj4$Ao$q&I+3w-vX5Om5G%DCa|IL05)^xpQ1Wst!wOgqlsIWi$E_A|y9*jRh2S!XI=Dbz?;lInpr>~Hm& z=C%Qaoa=&H_tc^+eZQ*VO7GzMBlFWG63QYkZFCouOAlPZdHTvWg;t^6x0*&sPNfTA zo$;^DX+YdXVV-15naK-A?5sNHR^!?Yyk}?5~Ica zg48hT;+#tjN^;btcR1TqZdyl3jE?sXq8O)mWk476tA#{uaooHApoYRlcji&r{3@Bw z-d?uIPBIUMvfX$%&ksJ%=(z5Tw(E{2s^R+PUky8 zA(5*UHc>Ke6wXRz)>N_YEkn~3>!@GCwSkZid4N`Cv|D|Mxjr7?L)BVoo;bKprC62B zZ_EPwad5E2qsT`O6T znF}dwf46$6_S=3!Mk!R$F6-;*9Gp7wlF&N z1Fj(f$Fv&u6Fp;tCkg;xib#hL-<+=b2NrJVOmSk1^+~;P9WkzMjpI;_%sp;6HhP}H zH@yaG(XQ9FypmMs2RG&kFzda__~YkUp32`cO`5otERp5r4*|Nc3Bd*aO;%9PKjFiF z)lw`X86%<-R5$ix{Q1W!2SL)@HyMbF+#VO_^lYGO765gal>McSJY(h{4GGPstgg-O znQbLUE1Gpykk(}LP_YDb~Z_;XJX2~UTml4JCBT4Q@$6~N5Z+ZNh?dOJ-WsB|)5 ze6@IxNa{Z|0*l`Auh@c{P5nU$Ik6YCghar~La`D7sB)kvB695Jx?qLl>mKw< znB@Be%x~v<@s5r>dE?vv{(va-`E)QuTL<4c2%1YB5Gb&vlF6D*jP!1OV4siXq$hHo z*?O|~Y@x4@@CQr1BVvs3=9ETg96Y_-5GvyTw7h}TRXNXnMrC5)Z&|w?#m{|=cMl4p ztk-|;8@-howiyjVOe0-*j~C3KZ((6A9yMRo@ZSUJm1EPdT_6wRybzam*Iylde(!ML zj5l>y(09T{=yf>1C$<_Nx;iik{7L~rYCTTd~2Ka zsV`JZ8mUGkf$vUm`Aw_i8nQ^giM951>`H%TDc2uLXW)Ks8hx!g*+3C@cl%v5xT2h4 z*5R1R_0DCBe@B>!@9Xxi4PU@Z$kr1to17{|;m_J(e}}mZ4uZyPtY_1R`c-gPekv>m zhmVD^Ap1T&8154&15ykk-RP4St}Ql(67)0kV3+s_CnP(G0W+8oHt3$}97Bb=Q4 z%SyAazie|<$5YL+b$+@)$IFzux<=A<$|$tZRD{4BlsJz@7VdF6xo3S#xQcG?*t*F^ z&bW*F)bCFKsMdmxu5aCY)gm?uPFW8MR`~DHy}j zv0^kJ#dcq<=sKMOX4+aaEAXfAs?qn?B!t41O`O{--MqMblI%03tMxsO(|$0+7V?_3SI72V!!f+E>oHsLPH+51qVQG zSy}p!i{sI6htl+KN>tdFs&Uak<1V-HP|3xb_L<_KblSG8yo8~52OYr~<2rb9pPl_CB;<3}rfC(&e%nS()X72iVFm{LwtMMAvZaR+FI%Esh91E&9hz4~8c%ws0_GwUrWMhH$IC_OcJ1&S(Kph_4 zX<2dif0%E^(oHl`;bz*he0yS(J1Z!8~Qp07r{2wk5g)0VLPWF3!EQrhVbpwxkb}6pUdx?o{Pp9Fe}B*k5@+e< zWi31~)l{KnysCnL@Up6{_3>w|9Se{ZgM;0|S}uEo>fmLCzSv6`yZ796sU(d?R}j~S z_Z#;c!~A8KfAsdVHr8JJ{@8hs&BNA~c3SwQV3M^KIjlKX@= zbLI!Nec@q~wbhSGs1ZT#?z=v#(ybT&en)u?onxpv;cnYd3(E7b8I8{;y}ul``I@nO znOQi=FoY~>!Y{d4H!en-@6Lt)Vttn^K~v-+R*oaOZ2f&^hyI!EXpnrJ7nc_^7hjk0 ztl4#reD}72gK;me_A(^yGG$&|%;Zx`x zx#XSnT*1W{I#^nuci#xX+Fm~F#))C{X&753^mLCeUhwHBXqtkEb|coxeLQT$YX$nJ zR*amhU(v;P?`iu<>1jsB#oVbrc|DeO{LU@p;$=3yygAQ0-B`mKzIb3@kGCFrSWn_j z>&)6zU(qS4Y2P3YJ%v6PS3 zB3@}}f}FsX9hzLigdbO6BG}DLh3;A@z8v=~qfn6{S-M;WO_IOPS{mp~iczH%Ee&{x z21z2t(1xZo;ChI&bN+U&ALyLCGy{4IBQC)y&k9*9DlRsuHk~|Ks1GZZu zjD|C`;(}%t28LFdzrI&ygl`gwm$hDD&WtObw2#J16 zEIVm(?6fRm?q`97&a$?9#1EM(h{hncLUMJZf-L$Om}~tDYuL@~?Q&VdfF(aQQ9!scG$0b+OI^nE%%9RUBHY{~{!aS*y= zJY7zFfo_aN;abmnq)9~X=kK|U2)f4(MSbco4ZF>w#d!BErl!27-6-H4wb)Jzr*$0{&TrP zq7aaaC-RTy_pA8IUpkqwq2DPr0dZ8Z!~lBw9&tO7-g=nR=-6DvPyJpUwT23f|9?i9 zoqof=)WlPM5$GuZr0TL2rglHlq>t6~ZFf-u0bY?xSLRtSjD8+_9mHR1=G!HLH#)_> zU_AX_tSRzax&}sh*fmlz9p5qT^l(C(Yn--WMtQB zr?^o@W(Q-k4%^G(<3K6Jmt9ZN*zLm#E<+-(I-fj9-z?#IT=-3hOfo zuo_yIZX3ju9%qN1YOiobm-Ya`M$d&C1Jt4CV5qWS@f8y@>lQn#Ju^R}KJU;+O4WrB z6#32EW4FGW`cVZIru4Gbz+;@ALV%ac+g7YdL%tHUW5m*~j!3p-zIq z-6zAf<2svs2D+`m{Y7V37B2+X;eLP+=|Qa6MlK6qfS%ikxdrr+bQ3kg_7Y8uV$YE~ zUPVNgZOA6Zm69hK{jrK&aGPR6YD;F(l{=eW&oc_H3MHFsc9;zjq=vHfzs}q#S@9;F zm>iIG9yC|Klnpq^VI;T~@JI$`hi4&$556_SbB$NC$}2r!Fyi%{dVvx^z&rH{+j0JW zImM}a$69&;_6Jg4#(MUWJuW2nL^+ZogQ`&qXS7YuO>=ASu^o=ErlVJMdqOskvhq&k zWS%>YfFwkM&>C8`joa}sXC%e+Xx{6*wXV>cHS2{54g>;q@SFKM=h#gCw9#_~qIcu# z%xKL|6x29zlv&_-^^u#Bwck7e{cY(1WWE_5fhEQLdXeZSplw>;(_^Ad`-KxZ+9uq z+S6ZOuQHYbcnia*WEa~ieVfdX6px+91nG*x$CS_j z)f_w^XAQhxSD^8#7qEm<*Y6RMx?omi*f-6-Q7thZ{M#SLy)s1mZasQBU6Hp{SGnY? z;9EisTBvP*Bjl2%=2`h162N%(t*0;c7Ot66)C$^NOofI=_$IpZFCXXoatEaPY_xIv81sX-N7 zG*`hYwTB)$#&p;1iHga0CdYF+rnVopl6EsWPea>Up8|@`kkGB*my}2;?cS0%6ZpS0 zz>|vzhkj>7fA*#7IgLMAd_umsnNA!6p()h3rIhxka>?->8wl(db&|GI~HPJe{N}=xF zjeKzqb-^R-VQoU`X_4ygU|44SWPz8een?Y?`kmulEfT@0FSUE6&cPw5f=1A_DL}#9 zhC&HvsOy$63#Vb@VQqzqOp}ZW|5k7k#u&6)Ln`;DH}!9Z3*R~I{o|619T8HX2;Cl}rn!ke zO!F>%oL#ADUm8u+41ArpUqvUX{rki8SBEti$ZgFrT(w2j90n`Ze)h1iz(;Zyc{{c{ zgs%%t?i3a%^4*Uc13GRFvpo4;y4^ghrJuOK`pqWoVKLwchOm_*yQ7ogF1d)1K;7iR zkG5^B0z`YdJmlIs-iEhZ0SRx|gwz{ZE3m{yg2DSqr>t~(sP#hl+dQ&=|5fO9RI?ro z*}}MT*-W9@d^$bgK#>WDx#Pl<-!==Cs&>%(p)rCCrW0o;yNpKS#8#lM99C;^ZEL8<2ghFAI^HRGz-7|1p#Ji6*lbR zgr0ooA$s3aRD;OP%!Vq~03_(JF*dmj5OJN3jSyeA1w|qGj6#C3zr`_5v@@huH%dY$ z7p0Bb!^=PW{|`Ywe)HXpC__4d8^F6#BsXLp=4dpVr_Yb5qk@ptBs`Fjs)=AMl5SM_ zdd9FplZ7>e8@c5s#X&(D!5$rjkET==;jVw}mn}8E+*lVjzFzI?pzUir=U{`!xj))F zoOwRr#ZtIG9tKFKhzsG-!irY)lVkv4#|e8+g)26RUSQ}Zd=-( zkCJ`84>LZ}n<|@F(u{{MA1~=-@58cl1EMbExxIPj^c1@k8y*-fVd)7F;gJeGhCvP7 z+?CAO$9w~-V3HjohtvIVh(k>s{LcR`)y2u}%bJhrV@ptQb#pHKwisC#zW)0;!JEu% za7t6w)Lu*v1@~oh+lg#5EO^elUD8JRwUz#?XYR1_Q~KWa=Dl`#(!W29t-FL7>)K6* zwTpJI+tb6W!H*JJ0WXDA|<=s{R_XXT9k$GO9Gy(++cJir5gS^evxX4!0(+cjE7&gh-Pnz}Hy1d=B-eHl{5nl`WrJI*Hxy$Ac1 z>h)WjKP*6vYF)66>W`r4`&coi;(<7s7QSH>x092}g_5`_b4=Y`U$#PTvx&kuc|q(! zJFOF$YL#GRE%kdF&c5YcJrHUhsqfuEBg<8#ipLr6PBQW$CG>66)pKrSW)PE!$`aS} zd!N~GY=f$z!k>hfCg&9(mv7#DRr+V?Nqtk8)v3w(yfy0)rFzJ*=qKnEwBjs>+4h?DVli|C^n7*{tVkZ$&{+7O8`*y;lcK58xY<6?!k_E*$EdNr zI~1X!9$PTX1HqKF&H6)@&q2MS9u05VZST*>%2GPpSD7MzY~hr5;VS^TYXv?&#($ZY z72kYxQ;1|_BX)VSm9{lOlrp0qFPvd~S5r5|sS|8)%GW#wawqvy0*||N(QuhMxS=HlurxtI}1EhLp z>E!_!{WH`WG1yecSTHgi)eeG7#59;Yl?pky!csBySq0146Q4u;vpXZKeWs-1NgA4N zh98*3IWfCS3AS0v;+1w-kWDon2|9VE1Ks~^5-tl`1&lSk*gxfkS*J2t5uC(hT)2X$ z1@DGnEVjX^*J>;&$~Uzp%+NT3^PDx7WCR8lLbBju-8fS!YPad{qF<*CP?qMkhtS4^*R6@jDGfFtI4bAys}ZH@MJ z6v*vrO-|S<`3q%Cf=gv>%`t;p#&-c%eNcQvhb%R0??vw7&xySPbuQyn;c_8jxi{uq z2ZcpW?*X6VW)JCXCpBL5xlJ^{y znPTszkB5H>YmrMK$fyO_*apzD+?Zuk8X4Hur;BuK*;l@4evXpEDj`0Ec-f*664b8tJhQ1Jr(U`& zF16A4)Nr4yLXzg5&U7QbZi0N}r0d;IYE4Zb$WWabe$M??^smi@_QB%Fp=MB&jf9#( zT|@4zkTm{x{(G>xfi0gAA#IJQf@`5GE>qUN33DKiw%`2TMTZ>fXZI+eL zUt4ajcb*q0qthACWQu#)23QvC)9%U$N;NUWJm41Vw6isS$bD1 zi=fK5$*zT-Gf!{ZxuR^KQ)+W-^4r(JK3)1$-DlAad3iRpuQQ>;0Enk}#ku!>%0vc; zAX>tn-c?*iB*2$m0#OQAHss};`FlU16V`3shG{B5+3k(~K)aIa;(yc}PqAQ>Mg|c0 z#-DDqQo#&`vAnEGYQZ)`+osHSmtpg;*XAZqv?&?N$cF`I1!jZ7hdY(fp4%{d+(&zV z+d?pLk|gw(8mw^F@hop5Y^l>s&-`g&e${pS*gXCb$U8NCf zNto{P`&!cck*H|u?$Gf^+0P?UZyZzHGY}zl-@fJQtZBC3RiE!RjK!GDJqL1G`q~gX zONsCS9BaD|pyzz}9uxu4A9&t&uU+9W_f2$5%SofTW+O|ICW=TNv=~Vy5T#Go5&XFQxG?DAaXnVTH~5$UzZxyipzVK z4?yZ~8O|XM94HwNGp`qW89l3+3DGxc;gjVvh zF}jcKyWBoC6LSq=;aM_6s_hFpV>7$><~mM8VDw4iLLH0V``BtvVFn#Y*Ipcoi_Q*D z3ybTFw1T`IjN!=E?hQowSJQ02q9c%Fi&Y}?j~q6t1{dSahP7yT7X7)ZegobA-hZP+ zO)$r#*wslevX-Yi29O>{HAa?BP;M5Gn4oa1Dl;jNfF2!!kCh5*yR^S6g3kbI-(OsS zUbl6>ktI-#O%^-}6P$^C3C=J!D0>H7&nTZks&eIKu7nnfeTe$4joTnGv4A!>G92;m zRf`elYrf(IF`pp6Aq7SYKl`zqhk0Gg!U`X9?_F{?KV>gao4xB%!-@zwUN8pyv{td# zUbOq7J=?eH$>-oDjh?)ty`1zY_&0NMUi_ zmiFkpoY=OXc$_orNZ<2vy6NsQ7L)tX<>PP@V6iLO1TkdzIdh~KrzDzJAP zwX(E4jA(VwnO*3t9s2cY4+A;5W}_z~HVAlK^?3A$$7Gu3qqb&^AAh--;C=n4-}Gfb2ev5Nw44dcTJxIpG2hcR zp+I0F1y4p31*16gZjc-Zo|5+Xum z`3~&OMsu2)j*B%-r0=qHACf-_uJg~ZC-?ze7v+BKAJKcNeG zklR@jyX#4T1Quv2fnpXBfn?`M?-sLsa&RQR$&~gNaxWEsaQ^oeGS;9VV=c?AYzMyK0EXh>oQ_fzu^O^#T|Yrmz=2B zSVIc^d~2+W2%Z~YbJ?CQ>9=oah(}_U~J83&8R813Ig3 zWmo~=9e#eU(A%1Jbh%Z0HoLEsTR*8|g#3dzDr|F`0DIUwdMkVbA0ru1C<17XGme-mszWl;oEqVMqC>tatl=|GnbjfKMd2+sFOANpQSW z`S+#)%B?Q0#TVp2)>+cbOrA3D&}z4INPG9G=bn_S&;~8u%%vae!dNhKcX+y9=o~sx z#@d7cAj_T-Xc^IP*taCJB{+isgqg5iIcv5;yonsfl6@y$x7YYw@1HZ&^)T~0Q{%1tk<F?B?oD-=IuX>LZl6SSrF3w%@Ro6^!J8Pd&U8g{q%g_V+t6gZX zUM)bG!XggclunVO=0*v74)H3K+ua_in8k_6^hrj_>q~7FAOnz>G=8u0U1d?~!X|ks zy;BtMZWM|=pmeaHN!0;@tC*`?_Z4Hg?j_-v>z1DQZNx=oR$yu^N;g(YJA9I5@}dUW zx}M6Q?T;loa^!DLda_n!F|zaq;lm`a&hcw{klT1wC7AF;uMq9QxyU)V_`B&M=dp_$-DUXHY^bMAaG{T$C(Ym5 z&ekq3+f415hTsmy6KBbeOEZ#teGkuI$)!*c-;XA$taDxBB|cZ2Xw8U60xX-o|8j zA%Kjk&uY20usN3VHF&`5vbsai`I2u!Z)4=;he#_**rP^uI@fA_0#oumy zEzy$Y2$f2o{h5>Uj@?nqaF2jbVANTXy{Hb+iQ4^hhHVxN+{}vZqr}96E784&vUe)3 zcfR!|yGJ}sh`#DYyiXr~&QX8DHW}OcOBZrG&c{ytJId0U5T$NeaTj8s87XGNrcXsJ zw>NVVykx)OVNdaYrW>n3@=()nwNN2Mhi<1#>a3;S*Fo3(>?o-oYbcDuCGgCrJy23p zjt1b@6yczb8ySE)CVP1W(JWmED_$-VV-!^N8eOhEc+!kRLq)ac3qz`ZJ`ecH&!Rea{*gp ze_>VqpH@_X3w_;BRT#p(V!w+`xgL<~5R@3HXM7+iwbz!6v@%k#^JS2|5 z>t$~0!cx+Eq=xpiwJ(j?lq-w5GuO=}@G4$U_2`Uv{AuZ-LO1cs{(R1@wCr$My<9sN z8>8{HL?EeD=v6n4HG!L6ngdJ3*8Tef!x4b2!jCh7ll|061Sg1p9l)WAfEc+>PeA02 z=OF5jbC2bCazlb%9{5Xa1En*k{%8aWe6c@$<_L!Kx8A$*z&_E4>BYmT{FTmG0~3N3 z51NUS8azM*4Il6$w8=+(EiEy1{;oDwA+9!d-4u1UGE?#e0Ta$r=eButvUU6SQV}v5 zF(})7gFDIvMz`uYlx{$~p6Vl@L;JdTw-QpL zD}{Z1$}X#X6Uwu@lskzvdAtifYwgKaJ6nNQEi1`hn!(i{i~s>!BJmAdQRP)6go5z( z2-%tzqKCh1AJ27h(UsI+T8R>KLm(C?!ODf<;YJM~KTuwOw5-*=oY6P6miDOSOR024 zpS!Y;gm-R+qj5E9aEU)Td<;^j*l2Hr7Kn!;W)Lj#d~RZ28 z4g_mT0SHW0>>peNR6u9nXgJ1I!WUv6`xzh#gN}=<7QZ8WsXG*I3^n|^ZDexi(=@dX zEJ~qU~U5l-oUPAZ6sJy zaCY%HmnqV{^wxb|Pk&8GcmyG}#BK&#jfPqxzaV=}zdQd=Pui+BIj?+05xq%{7q+3V4XN&rOVa=BacW%u@@NHPqU4dSg*5zB(c)c;SH*yEGi9HT z_tM4<-yOU3VOkU*;Aud0|9p(brkW&QPXtkvihY7fI-wgKR6q*#KW5h7p z03wqBKd##|8DQ;o-RbnN<0^7e)oTHM|M%2gNgx)p+otGwHP2`+vPt``nBuPcw2*!K zX3SRr1z*Db&2XJh{8ELv$&5ipp?kj<&c6Swc zw=?-yps&0%SFjPh+fL)LO%zY2yDLu) zQ7;&sYQ*hcL2XR>nv6t=ViFf-xLGbWP zH6?h}1rEs75dDOb*s-}z_B#_!pzL2`;oq{ zVzl_?<0kTGukP8{R8KOS%B8M!yoqG&NuqU`vAc3?DYL{6MFubD%vpZ~o!*K%v8Y_f zs(Hh-K5fko>WdXclN*PTS`@=_i%8~`dkFUsYDu|#T9r*1aWS{Pdl0a?OB>wcH0Ea` z7FE9*WW+5P8L56Vw6Qk*bq&)PjLt(x*_XPg36>O2+mJ6idL_Rsc2}`I{zz=C!wY4t zxJtpA)!Oxl_SsOsw%QYzP;N83!HVU=MaRfqyGLN+2`x3cfi z7UOTCjreM*rKwF>3y}uP%66`}8**bsJ8D*Q!>bOx@$jf~lJzYIk=RcQ-=vYwLNBSk z!>WWhmaLqC#4SM5XF)Plh-zDht3XP!m;rs=Vf0BOAgqFNkFD;&{jqFhMZL;B$Q1~0 zn*oiT+*0tt*|JY&MQL-Y1uC?^W6$%AC$L{$=_Zqp??F<&?3t&0zG-~yV&6THAF^6{ z*)3aV<+7O#q;m7_w~)$U%mE&$au+h>_-uGP1E1b&my+JtJEr}rnzkNSx~noY>Yz9{ zx-}TBVH8Ph%~m=wFXlAIj@Z4oQhRpWX;C`Ar%1Yu&OggW^o3#^JSA&SmZ$S{HzMRk zXiWC!93pWc(nP7AmEISdIE&@C7E=8pnq^P-p8chf3ZtuAt09+>_9=MkjEC~%J0VBEl#)WN}qk1uW3ShWUX0J%hvBnIWYJ09u6a0Z#yT!Z6>`eq!P0V zmaZ2X-Lu8E{LzZrJL2J5!AAvUndn6dPE% z@!{Jfsjfw(@y6qGzp+8g1$3$%`!LDqUa=ns?or2x6>;(Vx-}IMRej=VmVI6xQR{){ zq@}^WpItoaUAWp^NSTuAsL&^@ifKql+UK>QEDNI^F%_a+wsbV)uR+_F-OS){lqc&z z5t3~o1oD)&tK5S*r(~;k|5~Vqy8Ja1E|B2vvULXp*I3y8bP>Q`yF3&7xHc}f(IQnN zNasp)pNU#~?m(``%>cVDlPeufY;eN0eZW>`<748Z6t=te>Ezc6K=1R4X&&^?yQ$Uz!SPCD#g2O#eqA0EC+u#?~UE?eJ&& z!?NCil%zw4swdGEw8qK`+rtc$WL{cKJo_1e=*o}fs^eX^q&^{|a4%x+xSo!@4^Z=EdeArwfm9xLtX22HcrgRxDF8mxP+_O)ceqeE@KX5@^Y#K3U_wrqqg!RwMXfnohK} zf^$8Erjt~wJu)(y-6UzxFptPmp=IAM^=OiLq%w3fz1<2P?smPPR5ipdGYbwwx>)63 z5t_Uy{@u$?%v{{HNJ_#n=~J?2jX%0P^PAoM*qHp59Xj1{ z@{2a*o)p$Z0Rv`5`!|0bt7^hB{r`GQE1DQp*S~Sex!|0~s0h$CO|G(eNH_zri8qo0 zsZ^M&-ldY@1T{CxCy(7ZB#o#ciIj;(Rh_gr{i4)SLfwIQM#KHd;Jr)Au+Wf-Do@t_ z^Rj%Ll4Ai$LTX0vPgEzCJYun8Ubj-{>d^)aH)lf6pgF^L>9w_5*I4G+>~me57_A8- ziSp@rsAfmDNP>>Loz=LuXWi>r!(AwpG8yP8H8rIX`cV;bv&TH^kF2Y(%mPslx;@Gu zq)OR)bzMpI#-1NpqQw0B{q0fbnW;hxHL~9nR)3IeDxGqZBy=e?;4Un#_^irYo^7aq z-tRmZg*t0ea}10cLe|cm>jhtlQAQ&ixKfo{u`FVY0$QpII5ozcPRGXl1k}zbg_C4s z_21gZ!ASpM%~0#sPVSpZ{zKY6f%+tTSM7kJ%gURwrgccQXUXg2_U9EfbFz>V&Kin# z9tX04cB>&6!H7o@P6*418fMN8#vqcbTt4fqZZ;1lRTp1C_vsnblii&ttCN{<(^0V0 z?bzo?0J{JI7#zo)drb64oaHMs(l4Pyg0M?Kun=pQxs=ujM2sy=B^jMQ#xss>#`-;O zjpxikr1oxX4Z{@PD3JkHe_9 z34{PY8DQq)Dmn}1KjC;#kK=NX_AXJIKX?ZSqx<&8}unyB;fEvlVl zo13|IyR|7X=M&wzv24dF(+Lf!K98HM3*=(s19EmFFe?<)ky_RDw=)89%_1~X0w}~n zFW|0xoUriW@JfFo;C{ty6K$~0>dk85;!Xu6^rD*nY0K^$brDBL2#5#?5$rt952{*o z^8Hq{G2Wn9rC2H+_SE4`)_;bEep4H|d*b4mUtZou+=xj*?zBQXR)P?lH$^ z)|We9hXeB?+0Z|Auuisn6d*m{>}CIy8GW^px<}F&NDgx~*0!!6=ReHZ zW@>5;KDj}HJxmlI3l{HeU;r7ob9r|dt^|1Q724B|7 zhuKKh8=yUvj}n|{o!m3Jdj0J6kd52hoGE(V)U=PwiIOWYVe{P}l24>&jSMsA0o>K! zDZw38JL?e=GiheuIrk9RF9=4ueCY_}yrU`ZL?X?iPwU&()AK9vGGE(wl~hADi?CH? z^xqXubx67*;epAgUl=NAECl^_34k^)9b{cXB7<(z4n0 z(KjW1Nqt7v5vrDxI2~g2S;K8Lqe}Ua!*n&s0!---R^_Fa#nz_M=_FI5R*OrcThV?s znd_Mu{*U)R^-TdTlG;}$DpTQL(d~|(C-x3VUNGvx@OFR8WWXfl9{xtYWIi+1-^!J% zQElYnb1J>u)sy*Slt>F;y0f+iR(zx}lT5wiQ&co&s^t+R0L`P|!Ca>!=u7R%Ov z_wLQZp$2cC36U%vh^Ds+)?@}3{SW`MW?Pw&k%sCsRdq;l&=hYW6;F+^<~~GtT%N*Q z4QE)(Zugknm^^qM>oFcklKeaXn*uGMprWWe%J}byz*2CMQUJ>EpWj1gwo)YyGUw*+ zo`17i1c4J0J)|OsqNNZ+hNI~vx2rSTXf@7)W4}7>94!cF9xrugcL=r_N z=sEL>?@sJ&YXQ8blVDwO#iW19x<0yMzuj@@tasBpa#Q1L^VE+Q@5)MEHFE`|9GH6s7meYUzgB6$$d>FDF6-37qEWEG-4PTMGJ%WODD?PjM2v#=z?X6~BiJx(}R zu@e0fA&%hdvR04!CU$BPw+;&!%UxexxU|Gfi~o0?}k>pJF+OOFfNo-H*T zw;d}7E-d;Q*7bRgX+eD>21mw*sd@BHQlmk8fiZ!+=0VMz`F61S`SaTT=PBAcNg#_B zq96W7`va&}uK=P2<=zTdg8OW5hesIaTd+kBU1*q~s_B1rMueA=gDCf^=eM3*81$jt zP)jm;m$U7FLejDcLSdh4%Zkm+bxL>@DrOerGSzpkKnk)|RAEFoNjJdLPE2)(vNAK7 zZP^m>+4tVx=vZ@BQ@SUBZiyi2M>eVW?tuMc1KO>**>3O1--Ay*{-eYD^?cnL_WrtB zMreLW7CGR{k*pm-=poi1_+gwwsYJc&jDjUF!Ray$$Pip-6gJ_`4TX(~Iip{*X_%&? z*hC-M_Ky>F()X`T{s9Z_Kr)kUzDqWDb&ZLcX*Ym$SBv++Lj&@@Fddd&u5utMgrJ~r z??%+_5G3QsIGO$bkG=PfYBGJ-Mwy~0sHlK6B`Uqf0)n)R1&|;hy_26JB}9ny0D;Vi zfKo^5NQ(#|QbI&RFBz!=482JZVuEx62_*y);`ch=x6j&V?Y+N0&N}O?^{uo2iiUUP zZO?N*_j6s>eP5dl$Vrs92*=bwj-Dn5h<^=D*NwC$A=O&_BI{(Jhr>9ACO4+p@)v)C zz9}4?$3F}Hd3hf)Q>Zbff%_Q9o=iF@U96v5aehnQlY-0LWS( z9omHH#PfB(39Q^3Br?k}Q;ons)fq9zF}+e80CsxZ^C#>eaYYNyGUr)sd*YdQcxQO8 zRwfj_|KL~53DV|!K%NT-Q<REwKcQWtVt*<+8LxbCn1prG$h^qi`uWMVHM-sv@qG;37awg3hHS|;6~uJv}K zHAR=Ka$EH*OvSi(VMrFO3S=ER-1D5Jj_iZ&<+C3B#TE*4IL|S5v1{nif! z4jcN2kOFBF)6=DnswXW}cYTASR2qV3^6rrsVO1r$qZ?gQ?GIUQg>$)O>!0&wf|Am% zK^+1EBTK4?K93)I@P6dn#FU_dZ{^U~za7n>$pKKW3%uG54tki@sdL2xl|HdFTBTlv zd}3e~g+N5nQ}iF?uJ&}+XHV|A07dqJCu}q-Jrtlp?6IJ-{=r#X%-!(|CApTzYmcTs zwRlH*o&R3)=Iwl0+1wOajQ898o6*HK?}&mD$`k1}6;e9V1SM;;k_UNgBJpIf5UGhn9Ae)hB>bbwQQrD_uFFUTX3F75D-!26oen!;cCxjgY&53d z3x49H=T&G>83Q+@BW2d+7-8&ky~ZNyZr!`|Lc+FtbASI+%>q4`V?s>{PCC%idg1xP zfSy^I;&Sh%alIK%yw|r&vzto`@0bNlN)=NgXeZCQt94PrUX^f?tXz%b$E?Rl!p0wl z3HMJ+rcY=geCcUW#3g^l!iUdXO%t6ihYUCpq;5k22&oZzY7agzXSo^U!}8c`Zd)8} zr!7do&yv)2NWNraP-l~(-G9Uj5u~OZG?U$1q4hF=FU=A9Cg8|2AxF{NW!Icp#5n(! zGxS-LsD~OuK=(;sZH4;bTE{SmU$ybXu9(V2uza}1`PAiCszSB)lDF*AQ&dw4T7|#A z+ea$1W7cD6E!xK|31&921(DD=xHbW5o;$uYXT+;tX4IyJJ8?vrX|NB>_`GtCglS>N zpSeOD$M(rUnSM2Y{r04DIX|HsoSp;_GHjVGBfd>gcT~881V!W17{g*Bxm!Uyu08~w#` z9W=%|+ytwkJCZ`gDMN$fw8I$3pe3I@Nwyz8n5m11@Q!hA(O{=n_-f3(Uqdw1JVqAC z8MoW&Qs-sQTR4nv_1k`64wU4Gc+m)I59-G?yB5=mT;T`ye~%`WRn9Fcd#5fXcG|__ z^*&@O1cWd)w@CCwMnta`K}AQQcHP+E=&7uG05XCX`aOspHK8M=ALFq^Ndtyc(K(7 z?p>su$>QW$N9B_i>c)QO=lTMK!WJEptg?CaHWu)3q)i3w3a84{BmLdVc94FbrU84g zCi(5*hSO|>TxRQG9{;%C4LyxG>aQV7vggg0LoA?bhXFI4dj6$l!3vATcvnv8lx;$1 zEVQjDYeSQiGvR7c=uS|m5VNSOZYcRW;kY^7cOxk_&>`t*;MYZSV@(f4=(TLu-!dOc zB&2QZXUgNWZB(stt?liyJj@K;ZtlA-hu2|63`8ClaXdR*FmWVzNC*E!+t@BdXnFFS zwKJR|&d>DLi=M;6{Vpd;5$5O-KlvLyt}$0{dz^m#e1Tt;ZC;s@-plTiyp~cNqI8a+ z4{lg?iaZmiYK=QKR;=8YDJL#DmI>~oXQlgXF3v>$&Pk&Yf8}`?ShguK>oN$$WFDjl z<=Cb2?iU}6;Ep+|@!DC>Uq+F?xAoMH)1I;aVkc^@{FGsn;jd5;&>Qg~&+Nu5u$f|;`BDGkrx^%;A?W`CjBd-K z5_k}*rX41-Lq1|kG)&2F8Ys+v5+X`~pUbV!L|z)*)iCckuwOV#7R41XCbLRE4~eCJ zqO`zV&2(+>7UcNT7BKT}6VdT0>Qr>!IF!J|WR+3GqXq zs)%=Xs($~kO~!xPwxwsk)_nDb-@^_@Ikxjp0PS<0B_j6bjhQi(O>;@=E_JQk`+o0H zFE_K{?Q3mUpRpkl9HP~=qa!@~sx_B8T|a#yo%zGqFVs=};l@;0Y|~^jdTPpmVytzU{THEA;7yU&p zmQQnP@+P5E$=E1&-TKKXt7^uVn(|M+CRt8Vk-%hJTqu(yWUY^lj@@R1I49EX96l@tdWY>|n973x;kaV1DxP?SWirncr9TO#lM0@xT5BnUsd?hyZ8#@mEYoBM$&* zj6v<+1UM=a7)}%9Lpf&X6Xwe`k=^*T!=EMq((nz4xq*?!jF>XXe4$VnUo-9Bt}XVz zFYUj6YUYAG(Y!8#79yAh?s)Z!+wsS-cgHsr@A4H>k82Xr9e;YO-Fc}=N`fG)igg*T z&vZR>TvY1Rs$|5!quyN-YYP~p1{;0lNN{hWKwm7fW_*s#ziI4Z#Qa_gc?>;C zO$YMgPp<7JBE7SHQuPyRQ_U6|a&LI*nN@Lm`ISAqqBU|9}jW;H|uYXwSh!s#?r2%$5oAQLkEZ7QESS;HOHZj4vlMB zMT24j4+Mv>wM1Sno}v9sz58O3&iS&)HL>_-u%>6R(pF8ljWzT#aQQyAl!vj*u_LV&T!%SyCc zf^Dn~bEC4UuxQ>lhX!sZT;Wu7-{{xE-(}qmUa3J@gyfiq8A&ng<9Dr(TUh(#srro) zHoA2xqPk{>_Clt#xaO9w81+X71>!i=!yJR9{)7d;zy-T+0vHds%-Pmu7byNl8*fR5 zTjz%CNt?!ggj^^hwT$B%nR|&lYr$|lr`R%1Q{hHbq4E3ib%k`vP&MVnSBr7?C#M`X zwuGbpD$YeM^oTykzpU}^hC5pz@ZL>TGuk5sy65}aj?{{k+hnwqnN7p?i{e50E z@Q=|}O2D}5{?YUd+Kf5YDfwc~LdGu9#Et2pUFc;ZSN`&`0P{lA6!tL7mLJpcP2h)D z>s6PEr%u}NZIgIU$nw|u7l!7o4wU&40JU#f0mQ(O|Lf4-zX|+z?D#({;tGypej-02 zALXe|R-)bMh8|Wj9OC^~0U;R*&)}3(DX4@H$4N?*MTFsEv0q1M!R~Bd4^r9no%*Gg zMBx{a%;{*oYqt5VC6m>oBS{fEMe@JD>!# z6`^HvrDWNqqv8Gd?$L%ge+{MVuw9bFZpi0tcaQkrJXCLT*2N+X7O%LzH=IikmJ z4yLDGt+&aHD7URO-fX{t9!+IAg!}cZxL-Q*UpI1D#CK_^aV2gYrd5(@qRgdd^}sMU z);=`linaq5jPj9psWWH5k~Dg`EM!Mps#~uUate`gFr6dnWrY@&{hY zmGbe`DyNRwDuz_B2BLl-W}9q~Go6_qDFV{;8wZgJES@IimP9CQ&^%==VGDSq&++I6 zlj~g5755)iV5=@+IX^UwZ?2Qp9(=SII2=Fs^ytm@*H!YVS9$~m>MTzI=8b^QZgX8a z&s4I{awI8(AvVK(HHc~NU9*Ykyd;CdOve9r@thC4&C-z8NenEf07?l&LqqD^_6 z?}8{i;Aob>Q-=s(hPMGl3{Qmw<69I{MuFBf029ju(4aq}VP^mjH8so^AUtnf0_1>z z{%*&B0|B@)e;}Y___9glB?g3d3bF|L?@RrE^i<{Oti9j7Up-3bZabUpsC$pjl1-CO zjTS%0tO5P&`2Tzzng8o`6v{SDjFjgT0BWmO6UCK@RmoMUMI*!Ujt&A9YCqml`|;St z-*5eV-XY#q)|`cf@;@=B;{E@b7s+iyK)cw&io@4*|8V}9HA8)_#P`J?$uZaBQm=t) zIcXmufVl2?bBEr!Mc^~TV$Mj*5fVm%!ChH$(odYybW>8_@b?*d3OV*QrL=zX_~1 zuoT=OblYzN=P^4aK8_rF$l7PXdM`VS{4@D$GEeTCzz5GS;6vk(Tw4x~ue4u1#<|2l z#=p0-n*QAeU<(rO;>bnid80%dI{8h&pXZif&mSAEr81I+&ie!96cuK`jns5G zcs#~3Q&6ekbt303=6zik`4+P@j<}Cu8E(t><&r9*s#5@sS{85t;TFMW(XZkC{k-a= z)ewg)TD-?-@IuGdR-Zw6&gNSG^vA=7sJXGceZ2Bv#^mF4UC#)wSCGw`QL&RR-=I&-gmz?wL;{tSAg2KheM zfv50&u2UVW*u~#|!5X=AC5nWA$PE z?#{=LkLvzaVWAx|=04WM9mys00{^K zK=@-;NxM{zG0EnbE&}AKI5qxFyOm!uxQ4*H%%rxr4kdn72*fvH`lWd^l#( zKD1pk;?65>gKtkp&OUr*x*Zm9I+;ImaJ9nE;lL=T%x^EMtse(e1nB68ZEUF%zBc9r z&?_2Bo=61$kpK9%0Tpe@Mxe`K%uBbYN%_S`T1wuwK3*qZ-`ebs(V8*{Cb%J_2gW}j zv#qL~UP@K&^gEz?e0(I1y_7!Utnf*pG4#6SklJ;}B%6|Qvp%bl5c3a4g(;E`o;^S7 zq6v3(HX@LXW;C)B6rA(^)BFZx!XF}^2{n}Itn(0QYs_7`H6_e}8=1#XLeo zr+TZX`FHKsuBL~w&czJ^dXT*7ID+zkZ2&9#B}Q!psZ;~IKJfPyR>4h&L}-cl49w5a zc4Kmd?Z7~m@~^=)DuGtwVf|kUBziF3oEoO1gPwOQv4_@gC>3G_Fi{g}DFou<5N<&2 z7c1a>w7b(7Kgj&e(rhhWY5K7QOHtI>C?`sC6EUqN43!|9-+T1lDlNdbhoMzGzV&o# zA)iXhQXB5!FCtfw!bj-chkKr zTzdzj3k&q3&TchaRFu>Z(m=0Ci+|vuuC{|Jj%>^p<9RX@KnTD?cM!(k$UBg;)&&$ z%m@D)|5{%8;oDjfjy>!o*(=3|FK&b{1)L!;A8*$V08I;f+GmD1&U$R(VKW^`F}LB* z015EeBQEj<8h!}umyc6lkid07W`1mvNLZ6T`iWfK5{J^|W_k8YhuhCKHWm}YHGMQf z3-&48YZ+<3{OuE&=`*CqMj8LTvSqeKE0Q%nsaf_<^tocoH7($efS3<3=Q-EuIqjh1 zcbF3MUood3zDK6`C#k6G-7vwScmw|;RJ*&J5M)lb<`uoO^ZTZ-@lX z@Ev@zjD``u<2gSIc6+%~;Mg%I-;@ayiX{wL0u3T&EmtaBvsQ{RUb~UnDm!?wB zJqGa_(BTBNxDwpoScA|B1y+S`>(?Fly5k_o7e!@=6ISP>53wY4v-jXX#QwTv9Ckh#dB>ZI4@b!-20n(k|YT|_Iwn?llR!~sh>YuIg*WQ?A7?M_kGWnG5?xGa-D^OYL;b1 zlWZI7@|+sduN6vXIa>Oa6q$I8*kz^%;gkojJ_{Qui*nHkbLn%iY_CWm8atG(+^$lT z?HYZ4zF2y=;O3xzuzOd}r$dJFsUFdm^{(*K0ml zC~tc2Uo@KqdvQmIk6y-eF~`d(FfZANfvqu%ZkwQV#22%usq@tv(4dlK)tJcw)lfgZ z^w9I~RJTJ`Y?BHcGK(bJH8odD@<{62ybD}AptEsx30jE6iTINCgYTRQhoHnz!gJEs zdUQPWNF%&Up_EBQn&DwZ+LqQ$i{VLDuYr-BK&N*4i_m;hwNjMYU6k4{1hcz1j$bS^G$ufO`)+X@(X8KBZ@iRmcF>oXRvUXa38XQ)vp{@ZctSzD3cuf~hGT%H0rzj<&4e9+eA!Cn5z(i5T!MSQDq!)KE{Mb+y zAizFunVxfPamhNl;B&y+1u0`h2r1Ly`V7Q#JsBb(%;pVNNZi}1m z21O%->MOmru&IjJPHYXV6LcQsI8SD11Hdj#Cpqm|9+aioT*lL5QJ-lN(Z*k10}$|; zkV91&SESTxE(`#^5bu^{uebSIZo4w;UubFEwg`65O0A9>6A3?a44QhOyYl9yWcly8 z)>yJd^Ollwt+JYO-!-@D>8Im+%q+hekB#bnFkpJUU zDDv#8bahlS0ZcQEH&Wu@=?-~f!5s=2r~vy|#&NAW9oySiI&C_GZA;6ccJR@siD~Me zuZkunx_DHUndEH_Y&*Wpy07pAq~J1&J4RQXN`a=LcP8#Ds6Yc;8iwQGlP)X0xXH9i zVDV7q0Nj)g!;{tjU>t2$HpDCh^2V#gcI5HaKlO}LGrowt@VLmat>To+{7WKwFNuew ziAb%oDX~2=ZXA#Kj=-)Z*IOqS{&vI*pZggMeWQv=i=WP9)88w=oHiA6%ptGqept*x$<2&cEUHZt+nDaDNN5H zFDlC5#unn1N3A>|+(ykIeS$CpSD39!Uk#?`Iz71){-M}h2a z`xEQ=5BS1A|C!3nSfd!_y@{~J_(u3M?2B|HS4_exjHl_o#%lJlOJveS`{$7~3f#18 zTla~*?jRC-2FgL-N1m6x9N_j30<-R3?7$xk=AO>Pg?dgwXst$qnBf$zFNO~6_rAOZ z9SUa=tFe(Vj#0ul0re{fU62I;5PbeAjc*5H&S8HdFPj{aAS?ikU|NgtLLAI+`F~-h@FYdV4R> z;)E4x!-Sbo&~*BcRSnm>C<2LOS8$tM^)c56ni`({9N|y|g+2|itHc58AI9|Y7-ox^ zm{p`2oS5SMSw8)sPR@ZoI`?XfrwP20)0|*hP6ts00TIFY3mbagW-2`o@pl;FUjSC_ zBgY!Jnm8N>V^!T&$2BIdcNkXkjmOUdgV9RsEQ5ODgGt{4G&8?syv(_7YIAw-n*cF3 z4yA0YJK2;BfH&Tq0RVG{Hw_2MSy!0){O8D-JHP@=27dYUH-Tk0+`7+P4qv3`n?S$N z|2L1?l-bXo;an&IGWzYN_ONg-=S%U!nVlt=tZ+EJy9D1V9ILWI;r>^5c-8$1ouW+g|77X zQ&eHTb9JiuMe%N&#QZ;EJRD;?Z`m9kMnvgCJh^{y=SGHLk>7VmhLgL^fV$a*{9KMS!i@8HXgY7wGLTAWD? zm__xtMwP}RmTHClJJM*$WQul4;efmLVxCpeu4Fjc@FNBv0j#NL1RR!2*?1XdnK=S@VR`DWqC4uG}-7K&cF~9WLLU&(=O4{jX%)N zXnTD47BO|Fvq)vI>T;-2+q@OvopaCeS%-<3pLw?I(2Qz|f~-Sph6QeLmefH`HDVdT#PYepazs zqj5c|fpj33Tvcp>a|vlJRLx&9zHJj!I&b3i;&w%(Rc^k*fUdI%+*n0LMbQ^itJH}U z=D2>Lc9K)uB$+z#{0qE)F?Eu8UK&HONc5d@Kafa(R$AN*h-8a4Z8qw`I4u@BNz?`( z+j3Wu6_iEe`Q07Kf$;rEJLs3Bs@pu&nyoqfV)b_cCwph7G1y03S!^S2{O~XAu zukNqZYn7L7*C~n>L0k2*AYn^NX5ERMx6cNQWQ5RMUsu@=NOtSA&Vc|t=n3)snzU?= zXB9{A&$$zvJR0^We;9afD>T5%Zwq+}LV>$w zuW(>U?hU`f3*<3Ej3Q5;>6o&P@g=^@rFP`W+)GPqQ4Rg15hA>K!f(L<@urJTJcX#X zfQQ2Ukr6)S%jXDRXalBm`IqeJVi_T;mZ#!$$C00NG`^O!_6GIoQB6t~yu~8|h0_b5 znK7kbMhVaY<0OKrN(}pa@2z%NOB(6<4qOp;WVuZdFhIQRLGDaT$Tdo0rgOW1oj{w| z3}=Y$QLI=@KNK{XBJxn!)3A?oXE~@rZ8gR@3y3ijh3z!bVX48KFW4MVE`U2sAdH`i zJK+yJ28%=OR42Zc0+s^rqKqQOr=c4$>vBF`m33`3gv0=Xa(8TjkPyy$_MV9)v6daO zg0{SoqCZ%&Q8k*Zv(So1<#bA0@kGlzx~0Qv-cdLe_YV^pX!D13$&E$5*7_IfZATT$ z5S6&?n!!-XP|4B#fLV%X&|B|E`wF^U^PiaQx?d{kz5hr$d#hO1%nY~Lrv^9EEo*Lbl%F4*y5_gMKTivNZgiCuR?kd=0ZyZ@ zn8hAsrkl}qOgvQ_?dl1BxpC?@`UM^6WDA1Hows_%yEQLkVQN;>JQ+d|>^3?(f)2#I z`IAD857jpmzU#V}V>egvHQBFQ5?XEhm7V!!KYF>Y$R>IIXr5W$*{3$6X1z0(P!+o} zRnfAvA|)$Il=Osz{z9gwYG$?AlC>^gxf`cD5$HAyR425~_m`ke>keN?^mu?cELIhj z9^Ofo=E*a~Vy;dZUGkf|n9<%#hf#>_pi@iZJblkH!bJ9K6km2+vAQK&A8x!*nQ_O! zrlcyRcBREf{+)HmN_*wLoP}v#j<8;KhJ$~wT2=S?S_gNA+{;d`--NOaJG}zXL(juU z7oTN~!{lnl?3=u2_ZUhn5WT)jWLf~_V>iwdFNQZ^pBEn%8(hVIKIWN6Pe`M)sPR)Y zkMh1PDHyM`P{ib)L1sWtcBA^AzX>et+B?X0d)VP_*I#RH{u&9#Oyq8hJW>8hg6#mX zqH!E9J*=4pvz}=Ci9fuJHsHXCuDsJsYzO8io>uygoB;~u`~v&=>U%-F@N{K{+3C~ks+&GJDQS`;RSl-TSB?Z zoNIlkmw${R>b2FG#DD^O$*HnqA8q&E&g+EhM9Xgw#NVbA%w!klarFUWs(s`z1M_4p zjbmTm257^c56w?8Qh~}Ev=xwM#36zS7+((7If)M>95SoeCrHknFXQ3SNzP<$ApdVt zE+S3$$zT04GGm!HJsmh9jS-EW`W>|3(cX3HU1YZ9#LDs{9zZ*4Xcr zfk1X>4szYbPlvOFxmNshM~jLguq-nwr*Nsg&<*mb?Y{Rp1JY`hvDIwVzFi|$Dx`KND`v1P_(gd+byF*+M;vd;^=y)f6zU!_T#b9>D{~}4G`DG z{I4DNOX{T+8O1ia0fX1jtqXml`#bK$#lAO1=Pjf^Kw0^sIrc}iC|mBZD7E#pK_O;Z zd(x&*>W6C}kD_&9#uM`ny<kOzlbVabJmo9EPzhayx~!>W16Vm{=;I=lf5yVrnU(O8XIjq5b09IlhW+m9+Xf;} zXd390PG71fjb5(Fc9Ji9q}oe;t$-%8z5Gh-v1GK zN;Cg_#05v;@iGMvQhAV&xg?*tAg)+vvlwXB=h)qRn8_FP;CvYCmN1f-^w_gxeFcf_ z5KbCIvxG!`^rfDqOjD0X!>#x%>S%b)tu7CM>f=`)K zXWg1qc~qg?5Ui#frK1T&Zxl2}DA?u{bZMLo3M2Uk+@p91pI9u{g|2?*sa(o45h}r4 zr%_wBbzauzk~jJJ_<@*+?QpCZUxXt%eVG zv`puu2knIwA%o}y>4 z8Qqo-%G4cH?caP&&ryIogg%r~uJSeUJ6qp+E=<~I{g8=;a-s-4aBN3-PRv=B5wO2M zE|?f!O(#mMwJ89#krs2WBLT7}5glB)HZ`ZiI#;)~+;rEfd`?kb*s^q&Glye zC|%lA`KoGuFH|l2MzWk__drm$km{LKqOn-j%Z6cffOxNG0@Q%g>ybxz0EzJY32N8fP;tTCB6u$wxU=(2HUQy){;agC! zYuPttlcGw@EfLGbJ7g1PrYFC$tKnL?Ir5=?s}$|KpwK%?#>JpSTXr2|L&P|v%iUnSs1fT-;0XFz2v7NALv>Qi>i1DNXOY|AZ5G#(U zkp@a1P|`f#*A~$eVWTw*-8)0eba1=1>NNP0ZxuoVHEWUfI>UT&u~^ zX!QHIO|8HOgn~%({o~B1qhIr>wIRra&YstID8a$QWgstvk-ZDQcZly^c5^o9^LJ<2 ze@|YD$Z@>h0BjrzKq{7a7jF~_1Lw=3f7RP#chx5XwUBvqZ=u0tbycj9hLw%O z5NlB@^7Yg7jBDHrH@r$iqkcp8mDHCmR~!;YG;$m(R;LT}9+Z?6$=cr>EU69HE>luz zb*=q6^WlT5{Hw40bBEbR$5A(S=6u_vCv#;mC)Wx^nEa$lAeR++!Mpm!N*I-`73)6S zCXI3hXrXbQA#=)V-2ZZd1*%23bor$J+ZpHOb6`Y~(=@#9z+BYDZ!~=p8ssN7uhIuO zQ4YOo^KNX48q^$Mx#j=Iog?w3o08^VvUI!c{RYK~3>Ac~WPVmIO?N0M^2Le!mgLye ziN8UPw;-8o9hl=h$1iIZNZQ_WZ>Amp0&{nPU$d|>iZ*N*+b~c{G0`wF^jOsgt@k5b zKOsYf{|IGV-6W-*HZ6xomg0TFuM!=^6&$YnmF(}Aq)$;`dFIX@j0$k*boYQFsZcr& zm^pfWc5hZ1J~qPFbDvJ~20xc+&Dc05@4uylgkq*=zI3;~<9SW&!hZbp*$+dKh~GgT zNB^tU!qly{ft4LcW1QXY$Te&k@+to%B&u2C?Ebo_BVaskQmr0Fu^qywQ#8{wlAIDp z>}>LCkKY`1Ncx(Yn=$K`=SZrmjr7~R*8cD->ui?Roa!08`<&T$@*|qq`jnb=*kYPG zDTUfB6XasLp2XBKYn$427&Y*(TAfp`W}0B>t*$`&CTRpl_1mztAScmYptKlJf7KkS zxJQpfiKE8y?tl2atdm|(c@nZVYu>SnlFu zrOPw-9?j{@xU73#%MI6WW0PBoSD7MO4}BVElk)Rdp&R+TNs%R=jgHa}$SPYBoZMD{;;NxqfoTzTVKh}$!+yXkXu)vcQVeO{H^6nHlarWBZyRW*`%=pTJ z>n1~HoN9;8IM29tdC{#-yZQxMwz`;SpL90QH5h)r5A^E)vLvMo?Ts3Sw>h=W%ecE~ z2yHEvNxAo=yJe}`+N;dgT9)H=%imktkW^KSgDn)`y+$2;@tkK6niH_m$>Rg$W)czz zRUn=km}3ed)nfL~0@X`EU~uJ%a$$^b0yEStKpplAf0RGYpZX?{L##inIlNp<0rtGE z{B0~H*cpxfJXPBmqv4MTsZB9Vfq3r5z&>-qp1lzmk&KL-Tpm#2w79qywTj{wa4(`o zsz-Lgr`#z#JCIEbFQv4p z_^Nhq2f?gyX=Z`rzp%0i zbXS5T$`5iDZOp*}y{e~PgiuciH-LxYNrp&zT2c$FY+3KbUR}0h{egE{uotod=HvwP z>yM7`Lhvp$C66Z}VPd^JbVjN?%#)AJk2FsfrtadZd5NI@UwxG{;T zfLi!vxoml$BiISlyJu^_N0=>wN5g*+l zzbw4S6nWNiW*#Q`Nj|%tFF)EMfqWVwY#8nTHFeA}@0UEAiHoD!GHdW%HDR^h9jOmwZ0W%=`p$qF%|O;8It*I+b|jGJv*^P~?wr-<+rAm1f@ zG&1LxCwx^*!1vm|uq*(}n|i>q;$39R8gRoCwUaQMyPGYZO8|G%4)&?q$4*b_ZDK{( zZaO02i%MAy29F~Ey9(9(Pmf#kaxNFpn2m>78jVc>>P>Ec{9#3DI^WO(ep1A6SF^LC zsD`08^xsWyy8t5mAOHQT{eKrAi40&8=PqQlQ$Xv{CB4jp8CnvACuZt8X&UE`wwwbe zI`J$BF{5n?NnH$rR*iTgC+%D%_6$ACzNLVvq8p^3? z42JOwVH9n)TO**rH;N=FFx>s0zGA1!4t?n`ksZphP~PH-gX`Yl*=eJRKcmSkVRm3G z>nJXqz%jS38>u3eL?sb44{ky}%&nSes880o%4ayMSKdz(sB3(2&$V~UQ|)Y^eY;9; zU!FD6QKFVx;lCV-LU#ccA5M+$mT?lB&QEe9p|cyp=LsO8%%c5T35@L_>+&w%i3kZ!h@Y zhyEY>1Y+k8bI@V#cblf3r(f%c>WQd?uD14dQ_e%z^;NQd;4_O??J>DnDwuy4QmY&i z2jd+Da#_cOrRG{ixKP5(-|~iZ#FGjY`#xvVH;SX`*e!)QQIuYVnC5i!W9E9H zB9j_l>B))qXDL+1sPiOPXGQ{AEa$|M?xX=&ka)`_x>U?X!a@J>@qid<<5F^3CGTou zPFGIMh1iZ1vHr3@DCIr5SKSInC*aV}pX(ONq=Ko$ntHu7O5nn(PXbnc@vx#eQu2>r zU|NQrj!vK#CV#4ELo*AoYpAT|HDWL6H-rzIFgj@75 z)(Nth94rnZj5dh5IOinwSoU=m$X;vcIda(GOSyCA(Z$A8<7_QYnU}>*>B@wJ6n|vDHts-s zn%J953(V-4(+D05;MLb8ZAw%f+E?z4x$@S5dW6lipRQQ=w5z|?cis%meAJqBakxat z>gk1F`K&cJ?7syArQ*Y~JO368VllLw7R)z+TNpurckgidCa@TjR%Xxl!=TCg0{rog zZvqNP-cf)lhy3?}|EE8mU2+1brYk3ISIq*`Ndb)D@eftqoIW{s$w#{5KHryBD^tkV z=-PST;H=Sc2O(zjE?G`81tl68WOKlsDwP?g8n7L zn&m+BD?HuA3hMs}QKQI0Z&0_0a>0aibL^+tH@_k|WHzWo949=t>Z=r8lT!r|khy_cl}TF%%fglo zOWVDi8f3O=pjl2*f!<)2*<^)YV%6Dv*%Yq^$7d47qmP)NprT zf5N}S(yVIxX&il3zNV)XfB^fE=j(6cU1t~ZW**m;YT^CaFfWrGxgiO5$^Q9B6O#|A z;!&2+PcL1b0`8RK^FxA2xN3IS(Db0*)@`E+EAw%D^hq_~O0IFjxkwtZ{iI(RgyF_Fg&qlV9Ls31jHyjD#li)l_zV zbcqjcwf(@T!nTBmJ7TPtZE zle~~RMql^)CNSM5Sp5p9!vD+Fnu84ccx123`~`_Dy3#iYXr2VMZ~yif>}oYT%9laz zjg1oTfj=WGcFRg~cn1&r<0dDnLEYAATy4XrfNf$0&;T>6vgv`_aCG1q5e6}>(9M)D zPHY#^;;A=GELm~f`PaARIh1nhV@|XfmN}GH%5lE#F9DJ)RKuL3lzFFp@tS^6=~2() z;&}Ss1f((DDK;jyP=dS?`C&+W(1m+;CG91kAxw8m}tRMxA1zzAWDzcpu zihx|7_ep-c&DW|k8u=nzKo(Hsg4;QbTQOi`_K^6{Z{#m=Z78rSKYEmZVQLf4gMH;6 z1xg!NHa-CIsZ0HFqr26hoTc{|f34Akb_P)9dJn1#3)`(AQg!=2`2~#Y*`;x97Ap^i(eK@_p70v1WwAUb z2CyBl=c4^0zwWe}-ZIAmA%c`>*ACbp zmV?vJta#jtBB0T!kkc3RftVm%=HGJR?HtD*9HYY-vPyI2{HE9Q51Nas>!Y07sZN-7!d)fSqey%2ud$mRs;kDM35E*=_1lJ3M4j&((*}F zT2z`eBfS$L6a|q^q=p0$X_8Rl3n`rG-QT`@tzGUI=Z2 zXiqGY|1v!!w5wgVPuF92s+vCSUb{*+j>#lzpX z&ocxJT_Jv}anbMFXVEPOa`nf#IYZPbg$DhDTWV|(dZoE*j zC{_D*yITaNJG1hLb|SY*O(K2$2ytOtpH5qJH`JX%xaQAyDkl#x_f;^Lhr>dFc>Ugj z68+vo69@bwx3wU_@j#1UMvQ+;o@&31UMr^#ue7$)8me$de5)8q6kG~DyHPV<61^6U zzk^lpDi!e^ zTfr<%%yLwBcJUw=S5`M2;#V}nDr5FdSUz_@>?`DrpQJG|)|O&rjx)wGHRINS z7NK3!ZqZo&cp^-7A7d$3wojA!Y>JLkZ8Ou)6 z?4Ze|AIZ@fsK}j6Q%j%iwaU;N%(4y+I603f>FTjR1 z=%Z}mG65R~HCStZo_L$1ZEkft=Ef3SIP7Lg!t_YBJ@qHvwB$NsqxHV*iBL{DET$I~p>y6kL_vA+ z{6SaZqEya#mDG`r0|8I2G!CX-Y9@;~WJ>NTsZKW!G0l{Oo!MS6)Z(W6>9medaK5Xj zwW+nw$vg{9<$(l~>;0ZWd7MC|YWxqk%h=(#F9LW8Uo9iK1WRrnaZM?hfcuA%kqpxU zC)nO3EpKpU%G`=k+x>tz!)U|3RzkQlWm|6CG>AUgG#lpO(7?*M*_=>r^f{%vCU>K0 z`R!`H><_{=re#BCGsk5lPfUQ(=qBUSJ@q8@`?oxJnXOWXv)2b_?Wx>)DkHA40lDxg z*EopYaI3quJD1m=Qd`A>Kb$nd(j=H-xN}QQe!Ii0=nW3n{bi~qyUaQ8Y;?GMvKdSU z{qpsNcY3#N0YFwi`2L0X^^)p>()`ZICRa?w(Vf0d291?eUEQAXty`G z++pN#^ho8?-tXnJr}g7ihmkDaTBj@msqp-q}|A#n<6 zbEDbT&hsER$V;oypL0YnCJi&*1e|y&c9xUy)h@FyDYr8PZuV{ zP9NTPrgn5~viC?8dD+lNNdO>;_YSl0NjG2x|zkqq@z zEpQ%({1z}czy;o*I4RJ;W6-1*6`aTKh`2BtKe?&q``Qr_zgS_6StbFu_BMH#bB6Bh z9LE0^ay9Y9n==T}$rHM_&d!I{56r5xIkCWubS}J|cyf=<8z9KyFMJIAZ>#aYt;YX@ zt;R3`_DOUo-Lmy}9DgO8jFsZZLFVt!@OJniq|YL}<-Q5%^22{I;VKkMhe`QJcs|95 zcizk5?0-M7N{ZC5cN#nA&rC5Jm}1tIshnd3S)|ivwk1jwH<#(Lm^e@Lj_j8WE}N{G3}^n5bi|9X(#Z=ewJ_G{K$suAIx!@5|> z*bxiC$UL7fw&y>YwFO>EU|uDHqK)SG7We9r>bLaw?@|l$X`a@*2XZXR?|GzJ|585B z&7hpwaC~U=YyttrHor6$XAHp(7sh+uZoT|rpv^P!_=}gu{PvI7|WfjQo$r(<|xjQqrT*0(yBww2?(yqA0>q7eOyR-DE zn1xJXI;o^xgQ~=uY-y)FtJVH!VRYROAc|j4_I>hM**)pC)M=;EHzk+9WjTzMM&$XP zSn1@!&#;BCJwbFNPZF>eyo*GT%d`~@Hr$`7h6e z=c!vslewtUnMDvW3W)h#jS_R2Cu^AaXRn7m`)ET3@VnWwKU4QTd+WP_%LqUA`Ppt~ z{fo6;56^t>IB@vUBwqL_M}BIbXotG)grD)2y*m$FmUXzS-cxLAU~wc_BITIo_UZc- z*D`C*FTJJ*h&|z;n%B=O=y!Qd9)8$t`oz~XQct8n;c1IC*?KznO*-*Pk#JeB^Qs|& z^@>~BxP$l622vZHItHfxW)5>zkHeFZfio7{>gDic%dGYS$+GRe;faO5l`^D1-y2e^+9mG}tef%R=4=?Y=PuotCpiufQ2hKNiSANwyvg^*iCZ;6~f`RqQ^rCY}|>$WCD?)lmGim%OtLyfLR>P@D9$+XHO!z{(^_|4$EXeB}| zHX98-6+fD4pPMKJ(F!KCYE6vth!5InD-s&S$e`dU>&pZKC%A!|xS{lF`BeVuj0ie$V8w)9+zzKY!|Y$k3s4LFNI z9^*$i>n-TNkkUlr%0n5bk0evX6vK{tj}GG^jv6gJ;z_krcCWj_e&mu8Evx^otBlmR zV2J$e3qaI<@CM);C2I2yb!1)x5Z}IwUO-ckfC18nFZY$b_Mg=d`Io{XP#p{G{(sCY zcmp0Zi!lm{by8b9hLQ)d^8=*Sw=ae!@LUXzmms?+@$u_lJ%4-ncu4GI4iK&|xHM_u z@FbCYC37!v8nqv2kJ2xs_Q!yAyt5Y++2TD#Eyra_V!tB5pXO05{LLXu(2lzdK4S|2ZMva1R6x4g763|r8>wsbeQZ{RlymzX<)E_b611+FeN##MNN{AOpzBLi-EHeHvk ziRx}2{JZl+Mx1Q{1vxRX3)4(WbEbIztq_6`8c);4mz%>(%4~fbEUyf3PUK(xYCo!? zp<)>lquH;M$jI$N+v)|@;Zkv48I%T{k_DjEAmM?jLV{Pezc5m_-8%H7;7==x~ zmocdQybth7_o0M<9i#;&bI(=hSIW=a-K!TfwQhPq!Iw-}BJA$oJk@pvJ|-PO2v8Hv zwM>}&5wJbrYNWa%?MJ5wc{o>WU+(0L+#FrSI!E#xdnIdlooKX64wyC zBPlr2s_iri(g1o(83`7Pu80OJeC`U^Jdc&Z&V8eVoy>q&viZbVH=($DfFhQJS+aN8 zSxB~`_R(~818!L-#8%|ng%US{v3lY1P}*Vo&%K{+OKGNFFN^VjKVv{0U&EwwloJ`f z9c4Zm#qv5$fyx!Lr|PJw*};E+qb86yhFtZ-9zZe#fZmvny#prkDr1Tl#Q|g53FLFy zn2BJ?_=B?wSUe2y^$A`_Wt>|YrcU{PkUqhBikm`^L3V>dN~<;~5|_M#5LxD zj&Ttquk#bVR)w9uoC8MWh}$~#2Lq@MudQ+A=vx-SDcVPfUB22?C;jK}W6Hb$qw2p) zJWv*jKv}>SWq&IR_9g6GOI@+g5^Q4)Je-O8>k8Bc7tF2ce}qJT4I$b_UmAt+&JarS zxS4peKi3W`rsCiT=7MMqg#L}2<9+1X27Ke`JC=N23vEi0CYNL$rZ8tFl zYk9%gL_n<@Cj*Q5M&y^@Jyk-i>}Y%nfY88xa+OXI;nuZy6HeQB^X*l)t*hn4P(48mZ?ZX@vdO6!1v#Lfo($6pUYcvo@ei`$b0#? zDv)z9P8ymof1V!FiVo9GzC0;&@>8Ibv%QB1TKnnys}@Ha#q};TD|E2OBllGJ2o>J5 zi9BL&0?J5*_Que23OuJ@5@TyzG%ByJM#UMwI3xerhhFcB+h-<(yO(27rIBx>9%-&> zjk=7Uv%h(|=}RrjE`&ZOzc7M~K|tqjA!mNV*|d4KEbqvKGf47}@dpX*JmU^fPuL+$ zA4r->8vzRaxXHIJy!x28``9aUH#V9hT@(5=+kSQF*{g!Wt#>)cm{EI$+jo?q2v8PX zRKRF5eN-hY#r6G~x`@W?`C`-ZoLy|G&fU)A#uH&3X;D>Ih_449abFuqPPHA85u28g z3D7!&r;F&72VHDG*-@fnp!eyz>d%5gV?hxIiwf1+V5Q|SP37~}zig0J*vT^F))4=g z4Z^~T+DoGMh$3SV-@5C&7>lC4^V?jC%oeZDC{NjI(QLzCOY8~P#UH-w1jP-@Z%AWX zm$;DTb&^oyvEVscPSVtIwC$6t3I@sr^P&WOBDc`IpM`~$60}=|XR1e>N;`uah7c=c z_){u_>;ja?>v=Lrb~`pef(lqzdTzSFCg1t?Xe47P0Vo)S=1@yS3ojly`J5o&&lAFY zc9CO%zTw03!eg8pOpDU0E0?;qtB6n{40e0HPfSnIkhRxOhASOVlzNOZOogaPX!bL_ z#J6>s>tx7D`VeE`{ncby!DlTNl{;ikRl1Cd{w~GzMmcDRLL-rvB*RWMloG7V7?u$g)Pv*RtWsmauC>O9+%X^P zkc%H$Z}qk$|4 z65z8Zx9@T#zW`WA&IL;6ZC(^lsBohLdaoc@tG-WR)ORRV&S$qbArwpS1Wre1^?4hfB8)Op!- z(hq$Y&{cErQ=5FkHO^(KJq7 za_-5Eor?R}*V9tC_xj`HI=j8-=HqkcFwXSG5VPi(=q;Jw4=Q^Ml&m+X<{msA7`rEN z?sC&qKva2-3-h*Blj%N{iSOwyp9rs@pw+aG_2<8+zcdnvJaMx!7j`WFrD>&#_~XLe z&6z3!5y4p!CMM>cBH~9^%O%FMMoBbUqu?gr8&I5sBQcBOoWlTIBaCv>_!SgVbHy+ zi)7$eW;m9nmy@n~Cg*exys*~PrOzrwyMmQmr8dO6_3J18)%lILQRlQTZi!(a_R}eD=tR~elaESAgfok{%4;-Rj?ZC9o%l`u|L0RNq zdB)WVI}ISQHGMdc%#0-MX{d-=~y;mrJL&&V9YMG<5`*Fxi4;t@!ki+rg9rW ztePCG7Y@l=atMfKIGdjXL}k#8zOnvO2`rh+368x#*YpkcOgI1+-=N!g?qj9`uS~aw z?FTBBfcNm!C#1FcDSxgc&0&+zz$k&xKH0soMgS1@o-XfYKznO476-5}zRQ$nXS9oZ z7&;na30)`PymOef)~hI&pZQ zJb&8kWBHQ6Q-gCyS{C0-%x*+EqDHZ|xst`aEZ3h2sQL3obLMhjqahW0tKN_@Gggu2 zG@wh}Tvf+V6qOOH=){zt$2Z=#Px$$}6a6eRjv>9MQ4`x3cV1s-gsUEA__eoo?*usw zB$;9({jjr89m9s4DpSZH(Nm(-hz4R)_z~F;66;V9y^V=_QqNCYdZwpV25+&9Y?P^G zHsXAN+Y|H`sLVN^376#uB+(Vtfu&@-8Wxqu1AX z2#veKptLo>ri@#td}zxlZG5nwS?m4tDfGpih<(FQmhJogavhJ7G+OY6Hme;^X_dJ9 ztX^NUzTnc(r%xEx^7o;A>bKPI+_?)th7T*Ys3XKjUhHTVV(gu*=#-U(l}-G2Q&Y+v zlq)4;;JIfj03)RKQ*Xw-QG0{snIQLm7wDh%&L1NOormLiM>FS}CXPdXOcD+M2qW5A zY=3n&_MUVLc3=IuDWudaMz~_UAk3=oBm#7@GY)s{bvc-SnzvH!nAh>M!um;iF>ys+ za#3_ms?cI$?t6UfEwAMUDTPCA8z|Z>6aSM$X-zHO!7voJs8I=f?Zhh3T@9RP@n)`yoP8BR>>$uh^ivOa)_&xWdIY1h>-?+Rd? z6;F0YIpkKkPl+#lV=CYlSMhNZ@=REBCqXnf$8VCbP5UvjBeWS=;#iUn>w+*kh;y*s+51dN~~jDH>7U%Z8GrDJ1jr6u-Ij zcY_JN?PfVb`PNSFERF@WUrtMu%(MM6pCsQgU>#kbG?Fy_q6{-c5SNFJgCW{0UW%9CAGa46=Ts3e`B;pR`q&h8#dh4?FPYAdr?naSqtt8D!?zZ#QecF8T3 zr0V=(c`+qKgp!%m+e~+E6W=^neYpx&6NG5-8Eh0R++k7KJkv5$UW0c!1>~wI${iMp9MJS6J(0W zty^lep}{4OvM_`D^F-x4;oYCZ0s;XMjG%wMv0j7T79F2pqLq0nI%&M9JkV!WK9_=g z83kw(*g8nF00``31(vr);3bbzrd>3F;O<7d048x!AOrk^2A=Qm!l zd;Ugx0i*M47Vl4NIUX3DivW}r9{5>NSdW|^L~`ZYA_3jChTkM3Xm3Tv8R;{BzMgUpHLYXrXnQ6%Bzhl1(5$F=&-fCZ^9M*jGHt%FV0~r0 z_nSLhQ3Ns;;|&3pCCd(TgQ;c~N54lapmZMB^{{)|eBH(I z5&1^XWnWjhLUY@kl+)!kLrKb}D&rRzc<8R@Baj301_DrEhXa7vA}DR}t&fS~#aQA9 z&~ql>fK-f7JS7xp6A^+YgIt(l1Xxi_0mqDCSOhP*8)GCTFdtc3$xRP|?|hf-hF>)I z^JOfSyEmxA#&?e>wzM4&D>GT#tmT_~gTnlG((CP#|16}-IPN7& zCz?ggB(@)*3}x=gJW|(uzRqR5H!98P7cwspwkvRKMdef2?!t68FOV2XQe#x!Thluz z|DykcrJ!-7jIY#5!eFNQC6vQuN0!m(80j#l4NI1T?(!mkcShyAiG>Py0QyGdFn@6+ z8UaU1{t^Nwd)6*=4eD3~|2qDA? z9lPb{=Nfq`bP{>MzQprsisS)WM!;a=wkFLD++&obks7nE#UO#~M0rphB>Q1zpvwG^~Fk|dr!OIkBbIcN*vh}*om)vuzSCB%F zcwZvj4k;hZA7Bw2l6Q2bjTq`05oH|xKQEQ)GR6d-Ov&rImVAv7V})8hdTo(p>AeOr zxI)cqFhog5jA!_BCg!j*y_67Mg>^T{fvnPFj1SA6@27Tz)^B*YJ%k zix+B!Fb7gQkAwfK4LMR0SByR_Y+W#}usxq$pMS~Ra7kepp0~Df>fuAWzW@j~)4SAb zV3{*IFJV2`2&e2wqP+YTYu8-Mdr2q2TzGRA#yWGL$LISs&2Bq(YKsQ!w!RSE)}@s( zgtz@wh-`Y6J(ybC%K;_(Q!i4pkN=xG(jh)G+EW2NXRzC-FrvR_|4oh`&F^kA#UY|7 zpexAD|J`hr#LcQJ%!_BO&9^$8e4hP8R`0=>unSetGX(AXwAASE>(iM>D~(epPdM1$ zFHKak%YV?)-_o39LZ-$jo2JI(!0#O$8CZXJf>t2q*=%6mY+q#VBnp34s@i{q82x(J zWnx55>b1gab9Om<2z42zRz)+ry2lgWB=izeP>df~9CFo;1BBzzF@a6KIJ-4tjs^Sx zwpSYHYZ?am+Y*cfY-%P42!=828Hqtj0&4rbM+sQLV92k?du4VR&>JmEx=&vUpX#}q z((&@>A;F`%id(u}eZNMuHy4zD>mOF3P3vkGjy zXLafBh=!l9TiKS?ZM3~Rl{1xP{s@pav9U2d(5cZvFp%fJlM{aMT>z3XwG=^Eb!z+r zOVg)y^K?-$*mqxv8(V-nn(V;!U|XIus<@&YpzHuMH2&WM^2gGRIWJ8}{4kb|2 zl%Iv;xSIJfT3t`MqT7XUb>(#$g=Y@C(uzg4W*%R-{}7deCdLD`cjUv3s}c8Knyc>g zH`9s;lX7#(F->Hk$0V{8Ddm@?0!q{3f7QGeDf9ecB*hiiKO@d$WC4;$6UkLmPr zFU^W-nw&koKR>FOo?+jS5Ex|L+@@rBOzYPL))I6#{A)Ga(JxZ?kTl$<9<$ZGwSmdKh=W;2DTC>8}I|kN~g{YYj<_8 z`^X>8-=l3O~l*hPrE;Qs6cQ$ad8rZTw^tg(wf9LUzCb{C* zxA6Lqz*kvs$U3c4HTqVdGwTKW7 zvqv&0oLTsXu-yy~EWH_HpRm3_Og7^1h@?s2L-WFbdz3P#;M~(ZDZ5d|4EqW2D;~|` zrDbF6+DOaVK<8dNiCty=!Hpt@t^d7oz&7EQ6k!K6*0Hwo`RnScKH~RBmuXX=w41cF zN%!ZXEm9#O>+LbRm2Urltmx-M66kZ&>+fuuq%2=dSI-G5Lk72C3au2ft5=QExyxA& z`}23w`x0&Y4GofGRnPk;Un<^V9eWcan&A)#d0K?L7C-M|9qqjiR|63SLG+~!7;2M` z>J4_Z9Rvy)3u{hMxzaKKJ~To8JP#y*t+v&nya*jj&Q28P`7}wKrxbRo$4J^Y^<6Bz z!4A}}nC`nGP$vH`@{2p68|YDdZQC2H2~lRi(M6GIxC&|mK)2?3ukp2v z(TC_WT2oTQEiZ2z+Uv2e`~<%p%t1K#`>gxdg7UHWRLtUq^S;h>JPu3CX*9t0S~8Km z?T$c;8G_SESbu7XE3KyZuj6}5qVGZ5Z1SDe0u1CMU+nVKchm@h)ymBa6QA@Guwszf z@ZvvGC`KxkrJ3J5<_2#h53a^~+<6#1qVv_`k@zk?>0Y8H7qxntnbm5f%iQ)f@Qd11 z*u2q)>u7xuJed|S==CK@^~Q;!1eny>%Zg3x3%xCNxoLiZ1If6liIb4q2VH)_>FWBl zh|>BxVLSaST)rdBBHL5%zz4TEJ-^y`r*}EZfurcGNx(&;?=AvGY%I+b1HHnEum_pD#67Ds0zUe=GE>um_o-&eeni&kL|A}^ZG55k? zxlve9=^EpZQ!TEP+zd($sA}ztiZ@>|Hx2D*A8_ZO(x+eO=}4(E<|1}znp-^aRI$jO z>saRj#6!vI{w1yRas% zXt6(9Z|~)36352D?2F&fH*Ofpt*;b#YdvsAcj?LVijL}O-$et%4JF8!h8K0O_inB> zU?ty6IwNhl+eR{j(sn&ds(s6|&H$r+m|j54DA{#hQHvrs=27u%#mRAs1ZP&BiJ{f6 z2qg`4sxLQp3rT4?TEec@A61Uiz#sGaVxd)hIxQ)s;o#K3caVZa{8~4OTl7OdzGZa$ zD-^$MxuunOhPLtw9T~p&e5PEu^cu2a*Y%V|`Lr|dm(Bc^Pj~DLBjbNu^l*-B*Yn?> z%Z-l*<<&7pj#P8ZLCp2!8J)45EB3lJQSWAJGy6aVQv&kXyH#i!vvJ4hDsUHTV!&Fj zZik|QFM<0DY-5Vp1}O!53~=(mB3VOH_RjM#KtVAwk>A5>*m{^Q(q61PaVsiJ7qav% z8bSMBVuHOBt#*W>c`u}pT;$_`Nyv59bMBJ5SoxiDu4Ff#^V@lqM;D*KjCViDdX)X6 z$NExGP*~shu6p$;l%T0aL#S$MnX#5g|Amk7vC|KnAStkNg1+KE6)&#&4!oxeJA z`Cjb{Z6z$LKserqkrZCM87cu{0{hRVmB7{x1H&v(4VVWgfP>e-vc<2F){eYnwqm?K|cz)@VE^-e~O$(gI|EJ%PkYwieA{csuM`fd9DTWx#bTE zZs$H5-hslb2iG`vxjS8f*t`Mo@Tk90>?~Shz|UTByG=d~ zY0|tlu%pgoVrirBO}?%j`cxbe7NR&LZTtbVmlPpo`rHLVovn?P+w)QGa%S5(yFEW& zf7mPX!gKGbpC1_1ebXWCDAkV|PPOH;wKl~MJKpP8Hmk?rFb%m!AP00-HqZy~ z>3G^;&q-%(W3k9F(_>vd{l&!*O%E)OKk2G(Ihx`@RbL6n+l7kXAMtBCDt57AA!)(W zy4azTRpE1anuI7%&+{E&eZNHk{ClaMa@k+(WWc@s@1^bjDT4bvTLO=p**{#rnsY~{ z=%EPqkHz&KewBa4AW|?g{@KKCsAMu$g^qX_f?xFC<$J=1wSg%NaDVG96Fjo7?k(hd z*|==z*iHXeY@>>7>Tsc@zr@%*6~j6+|C1V@tfOPn9R?lEf7#7``xG;!lixa_#tOZ; zNa>RP+<(SjJeh9)>ZyiOy!Qb`@`d?>KF%F)z2$;qx9EDhpX)rn(^US|J|ip~eR{Ng z!>)ODtq#ozFNTmPCM$(jCOBMM&%i~j5e1>UsG+H9$DnJK8@DRxfS~zmwl~H5&a%(@ zdeLRFbjv>)?K->sLo`J>BYub)WOb(3iXv(vAkd7RpH2N#@hc{z=SO&y=NzVdOK;DAS}cISm0DuV0S2u%oNWjn0mkeV`Eg%6G?r3;-7tw2 z{Eg1$NE=qH*c&(hjtT$zN=A_VXV}hihz&5q&e>z2lC(l|co2Be;w(VNJuwJjG#N=- z(!=2`Jb1t6_>rDyL=*`IX#1le!#R`O;*h9)CpGW!QnYBK5aRR)=##;S1UA53aFpyA z47~u_^`gSDJSWT2e2|N+Nn%I=cH{((qcPJC-!S1{1XA=wn5bP z&*NcW7{5xTLi>OgcuD4o2TkF*a*tU6s5eXUc2hU`dc=&&aU3Y;7X`>;(?S8Z$K0R8 z4=l~k;DPtnFxz?Yr$C1f8XzvWF9DWTAZz3dFU*=f4@{UzmM!wf(JJ2WxYO^q|BJ!* zE*=IFt`xooXwBQ9%7j&6VEz0iO;&WUWVJAW;IjYmyV1uPY?D5yZ?yFX>mT66IM}!W z&hu)+B+HH6#UR!W3}!tYR5LNs0ijzLkbW@EVbWyACLec8@V`xtOh+Df`w(b*3dX|u z0%vp*q_DGc@JbwSKj8I&d*VT@VK#E6710RhNe$+1!vi+fqz;zn34`3b z@Y1=2=0aDs9Rv6IAOVKLWQ^VJqO`g@1Nf*TOu6S;DQ;Z+(fCpy-?uH`W?%sUQYg}cwrEnUEEmy==xJ^S$keD zX>0J3D5OtI`9K=IHEPdTY9YX8UE~>&u|Ni%BFnp*o!vgTIWmTd5Ub8+c7yd>U@>b5 znDpu94HbRlqQFO^E==-rp3~g0ZSahzQg2ywSwO6A{?%gJoEwX?C1i_;agi$-14=`+ z@fM(O6Tf6$FcopaZpDi+E=+qDLGzEImV&xL-iTpb8ytR;u!YwKlAw`@2Wnv=e$33J zrv6b*D5D~e%x-X`HP#trr!Kx?L?mKFdJX<$ZDdafnLAoKKFm?;4GdQ$uVSze$3jbcC0|-eu&~w%{<`X zFacccYm59%>gbUbhg!x-y{g9{ghiz2sQ0J5x%+W}x8LSw>9qffvP-E={WhK0S9SI1 zIz~k^_xpQWCDQ|m3W%|e-Nq9EnEurL2@OfF2JR6%(G7lX9hiMQ(1Y+n{dFMnW6YU0 z!dKpCyWY>oY@r|Eov?dvN5kY9cUzpXG|zTF$resKhnUm9ja6g92PQW{2MrzjR97nKz2Rr8Er z7VJqU$lah>={dbKU+hRMR!hD^MU6*I={NsN+6&BpNon$j9Pzm(AiBkK%99LV5UDdK z>h5ITf;{%SUDa!0oMFRy z8wHZ@4E@zh{WyuG#gOLaNq%sS5rHRQ8;aVv3vdbmwU3NTK$cGhiQ&lsuOc;oH}4&{ z9Lw0iLdT3Nz@LIi3>b8H0Xw_s&k+Y7LH_MZR*Tr#aRg@@Z&xUMBNV8;o!AdB$PKYk zyEJON$p^~=cNqA~-DKK%nLvFt5g6NehdO!h>R7$NNi++s`qdT#B}^`#G9W=a<=OZT~lV z=fA9D^w;?Ookwdtere6h{aH4e?DxPuRT~7j>B^htw+EyLJ@`|nfbU=F>TO%QLX@m2omV!G7+b9^`d=O6Arg=YVJbBD=I)$e}Gj%PpLuk6{q6mKVPG;{QUEU&X4 z*pfqtk03~q4?cV!%h_mMhcjPoVMVxg%Rb%})!LR)1#L|`U>wmreXAoQf z=->6V=w;;8pZqKr0suE97-NnX-+%QCyIo7mA6B*cAJF#LBLI7?E6X_Y;Mo7#S^fKr z0~E}#SX$R04lnUqYDjB;4EZ{MfNBEqM+^W)O?g0zYlUZBmHg|5{QDLD7rxrkjkamV zh%*qlV8Sj)|8yVc1TA*Yz*Vme(O7T6$d|bzxO-vIW*0<`MC>g!QQ6^CA8!z2R(D`D p(68#wFISUu@(1G`&ipk(;-3c7fB*P5XYc>g>EYWP{vUC&KH&fW diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/RIKKYOAI_main.png b/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/RIKKYOAI_main.png deleted file mode 100644 index 11379407080366c527bd61d2bf362e3dc62c8238..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 99021 zcmeFZ`9IYA`#=6f!c?@FC~Kt>LaQxnNmBN;ETfJj$-XZ$krbhnRI+5r&a@~?7!*a> zx5%D-&(4hbT+gS@d4GTU{spgdyPeZHWuDK+<67?5{kmV*ocr3EYFlu-aR`EJQ9pM^ z7eQFX5QOC_>qhvUxJh|u_=U~koC*9Yz7PGI!6RAG4M7AE^)t%)9xul`OK)8sdcLto z;;n&gruvtrROa_e2|EPMIp1fox2tv>%0xy)WKuV~M1H?uHOF{kieoEFPuM0oBfwgyb zp9TJBf&W?He-`+k1^#D&|5@OF7Wkh9{(mhXJ+C~v{sWXhRqw9I?Y(5=aYJM?4d!PYou*Vk&j_UZn;%Cysn z*s&cuu}2cr2Tb1JQvw6tCHuci4)7G_p}glkYofnY+Z8{1z;P!jIfcCb?kz6Yn#;bm zYW2N;Nq&3)FH|3XPhf9fywvcVUJygild^SkFBX5SahoQNjU&jyJtEM5*^zr&x7z3E zu6XBXCvgaujxqKyl1!xamv49OJk~8sl#Y3UymaRrCli7Cv?UQMTrRMW2&%y%83k>m& zfB&!t_b2nZs>ykeLhTHy(-yVIVAH&s8Yt6NIh?KT!+d6kNgUH-nt_=cz9_?gdfiDJSx_itgU z9+Ip|DD&&%MTFwlKTOg6_AUWG5FGS4LPgX@&{$J#+l;S$ooB2x3&M5?x;*%+&Z9h{ z?AFVtoFtcpeSIeObu#DqHjv)`eZ}?6Q=)GB`f^?+A+zxURyw_cu3M48fWJo$jj;Zr z2_Mb$Ce4NI?JESoA4UH>UF6GT1QW zZ4&wMNw(>IYC1ddT}kYW6sqpJrwOk%ryQcnSKY@MDp06L3>Zk~|GhXXTpS{;O`VsS z`-eU=av7^{SeY<9v!@BCh|KA+Fc6OXwbR3w35;SdY4<#?F>gRx|9VI1 zXfTfK5SAb&GM)k_@B8;ngPeH69=@}muZjvH&vphfqfbY!oz-F3*`oZ!iW&Xg-G6`g zMwGRx=-$|~pS$-ZL`t(uK94F=9x{QHJRbhnin)-u5_`?>!21&p7AadRy0RiK)c+p& z(r=SDJFIjpht>N7GQ-5Q#VZ8-*7x z#^Sa1^>vWf7%r3D~$K7aqz^l~h|Lr2p9wQ^no-28PWLdpE=JBP!bhNr@v z(izEl3xFYyoAw~a;{Sf|n?rh8#=SPp80Ng4R?J9*u{s}S0n(RaFVm+>US%R7lh5BW zB5BXo-KJ!P2DyxZ25U|4(rz^$Uea%&o)Wvu^_3kVDpqD`o$RZq$jW>3W~ZZ%&q7=2G#8?D_^zm+ zc~IHW zs0Q}R5N|P%Ms_{Lcb(05!(G2igIpGZK;s?=XoBQ8m(e|`z;V^w@_+~)zc)P+UnB5s za;;;EJtuZEBDl9UctW@um(^rKO7m>wap6y4XF#})fRza9V5JGjrKKo6?RPvpwlg|@ z_Ohyh_XMKwd#e;HgLbemA>0t)2ltu>y=6SEuDJJtAwtMTk1Y1euWo79m;aO}yop5J2vXFc_g>_jLsRk*rykU7R+YE=|C$wH{J)jk{c z3D;eR$Vi-TE;253x2BekaDt%?6xa~Sq)a4qJ`XEk2uVcY9K<(i1q%p|ZH7upSuK;D z_gjn>uUOe#kfN@P35XZWCKl4yyy^ZpBvq(3ct|JvKGTZjOcyLfs9N*3*+@v zzrqgG_2%fIGor_4-jz^AZr#i@^SNGtzFrGnkoN>EY*->Sd`cbOc_10?TBlV0wyZ-~ zk+R{2(`xCp_r(S7Gq-KH@D;y2d1oBTq7l;m$JT)=$BsAZobid3-GG<|LJTXFgJCD` zdGKw6XjHmtR=}9BHqVimt*y<7h(EFZVs4CQxi}9{1hs?_$yPxxr+n8c@Z+}n^D)dX zMk^{VQO6}pf}4NLFP6xZCzRc@1!N~Zf*&!0d*ynx@VKm?p=;p#P!^D*vEJU@J>g|$ zd?s?jq|-E9uKU!z$lPt6Q#LuoRC9gl)t($FdjDcq<_4ro$B=Z^``;LBeG`FxzO~if zeX;Bx=G101NoPxF)vH#4{r%4)@nLFa&TGng@N)Mj`1pGEeSPWxy$)uEDiD8pmL{dq z5496R6@7P)mCy#3S*-KcEWe)#6!4ZV&0s*bJhlGfwj{&wV%+iHt7@QOcl(g2677;j zXGjOfNiBhAmO8yT(#URQ^yih~=Zg$mZdJtC^8*B} zj07~A(cf4&f8M(PAT4c{kFWt2@AUW^FujZH#7oP@?W0j_H_tY{01Zx4NfwGzRt>|*U!W-@4Jvy;Cf)*C7Q4y8jwUqSgvSi zKv9IGXGK!WS<;vgMAE#Zvop+z%TCmF=k)}LvAo%Nnz2b?QmuSoLfKc_*GBr%-a~59 z-o)v<2aw~Yv3NP1wM^uAD^Sf@Y4kbO=TS0jdclNY)F3U>f%kDFuHu40p00`I_n52Z zpPw)z*Yi$^e%RONI?Ah*mFbVf&!Eg?5b~=j!^z}Tx_SuCpc)rxLH|XZ&?oLi$k$Oq zn18gU7FTz(+k}}xsnP7{EGI(ZT$#8f3b#BN^%VbFUqA0pkr7q~Xp9Jfp$NAlGJb^J zE1;$t%3=I1v>ZC0g+rbAkSU2>TUI1{nS>gtDzbjn7 z#&cU0PCFzrH@bf1PuF}EuCj7pHFITR_O9g~W_LqBTCn@s&yKvSg&UCF;EV}jBW2`= z8|Sv}WC*f%C?S(P2!IAWKqH3f#dWKRYrmFP{Gd|CDM*(F(R1-=)Ey8N`RwTFkaQLAnhQ4(LRb;*aT{S5?_c&nc#67YJCKAz%lD#9ujdD>txPP-IOq~0JHKN#a92~~ zUwE|_^ZH6H+^Poe%-F$dGBLi#c3YbRjRARB zkwLcLzwrhN-}_=}W_vHd<+Fe0PdCeT^JN+9OVgu+9y2o{gUbJR|FAL_X5{HJhyuz3 zC}&)~Kh#pcj1GF`zAbzI-}Pe%y5+0Z_(X(I3ha|_n*?O-wZTV+ObvricrZ)5C``yjg z;P9H46jEh?*AcgpuKF1W^*WjkZx7vdxC-MIW+} zQ`~p)KFs(`OJ)n@Kd?>P`v-xGFMJ$tq_73o=a@V)F|L7U`I4s6-M~oRJa%r ziioAq26#poJfk>?MzKP8M($s3)nB1%>}ciUrxONjsR6E6qh|Z1cNsoRYVTgFrk`u& zf~O&gf+Q7hQ5PWInY$TLQA1`#|MCfSHMrKv`s9gfFS?fD!E6ameKKZ*yOx2xwQ-w8 zgzBxJF==)XOyezJMKs*~8svSr;olI$EvWHGye{ck5lJ8S&+Pb4VyUHm{y5gK@a0Tl zni>AB2To+}Y2ilv(U!v4I9kEzLMQw@#{+)^H(@ypmM~V0dKU!;q4XG2pK$hk`5}-w zToq)0{)H1=VXB^^MLtL)D^Y;=WQ|m+WZTq^1xNy=E^cN9&aUWJZ~+2-8`^{$Yp1jq zh}uZF@KFwU{YkcVnJA2nR!kMl55)gr7umeHnNy#;^YNpt2y*9(Fl&5X`@zogcXg_O z0>Rt>Xb734nQIW`!dbvh*kRZC%W$O9{Ja2ds{{4)FM7DIR#fiYWXGFR>5>JR)Q`kz zfnArG5G4e?#8g%t7=WCPj<%8SX?k{%LPdP&Q_R|nGZ$%}D>?1LPXun_)*n9XzD4Ap zOm}%=alFcQGk_1)0G+K$jxljA90GU684;3!DgjPH@Uz$GONnIQxJ4zXYW*s3b1Y5H z@Q=KzUCL`n-Z{ptFByctmeHXFZ}q&c|6=hoT<0r)pg367z)i-R4USRanx5s|S0T{z zMGs;tMZqs)F?9ZQab>(edXtJ|@tD0g6)Y8@wGd0!v1t+FyOYhNq-}o}d&XJx5EPx1 z_Vkbvmkpmh`dUtOE|pfjV8%a)mU}57^MFXU7e2Bb&TDm*bA$FxYp|r|@Lq6%)q^|m za)-LK59dci{gIy1AGIY(8b+J*N2S?$47oLZS&y%k%I@vI(pYLVP(q_sK6ncpm2G$A zpxL&a8-gd-Ejfx=GEZjW{4aRg`YyoJ&p#9>0X)Qq|;$+?%juj*9o6 z_tDU{^eEQxA}3CgA-$iSPHYeh$-aps1!3{gjKzoKsQzX5{S$%NOl@bhisv3=`?fm2 zs|8;mw{DFM4q3b?rZf%*bvZ?aZIp3Wd`NZC|N5by4BHG2O{vEh&(!Tg`UQcn>;ck( zJbd+Ebm&DY&?Cg{QmFNx_ooWVbhg#Xr}lXHbtwmSy>5wgfrzF&KAUW^UD+Cu=Qs_S zOOYjDDgWWB`T&G&^zh0e(b(u#_e&p2ZiavO3`a|-Zzz%M#2s00acL#G+uPJl8vNMS zi6^=tg7UH>{INl3jJYadF?LroH&>!PpV9o3xWAGH&}IbEm$o;JkZZtAUQZFs?Qo+t)*q5+P89MXcA7t#2>vLM!Tal~0- zq+#Kyonx07{;WT)V!mZ|mR)4-JB-5Uey z9UrqtD!Mbin?G?^3+#;*N3C`CQk&Ed;>dU)8a)IC@R@iTf8J&!Bk=#;Er{aNem*n{ z^raLR>qj^WelWD=xYrGk1t1{&SY?2#2hj!Pm{An=56c0TN>EchP$!H4{qG8XLY3u=fiIjoAC8m`O>%uFvVv z^Ruzs`&kH_s3V>+p%p{hBj(0f?DYYh#|%8oYvtX?g8~kDZ|rd*n-^TzM2u@cZouCi z;*r^~>AQxR>;tPMegUnBS~oy z>2^>BCVl%Ge3hJrQwhI*&HD)O;s2OVw(IQS;{yvh)TJJ;j#yxdVj#?;5lND%hmITJ z{CPXigvG;gVBwfE~``YTU<3g6rO8nEHjc-Hot)k09s z9JQhdw>{Hx$YsBa(ZWHv3)j$*Ju_5#bCv`1}n}uD3l}%lSIK^6^h(LyCv85 zgkRDi&UU0&^+RfK)|;s(!%rKl|3#O5XqpB8Hm0iX>-mzYv-Os8U?~@R=?!?X)t1mg z21Hw!Hi?r6%4VWec^$;akQ~b8NGikO{%xDgSjcIwL;WRL@MlXTv!%pxH{wly%deGM z%?13n)&FwvS1{y*WZ0SET}lPBk0&kVT($w7l0)efq5lYuaeB>l?tP??<42bX@xaQOM0%&H2pdtL5gd&A7<5^`Sb{&n!Cwdn*;Qw?xtLL`x zX)+-3!eIcLgk&roc}+0~k+gt(Pz#n5xe^7x_X}iN2KHX!nO$Ou(!LGFK>Stk{*9t* zD<3t-*i#q~a{cbeYi3j#M#{}~|FB{H2?ZuZ$r+0)%7Ut@_lsV8VV^SdRWe05tbd&B zKNL-WUd>m|QIorSvBtQ{7?MVu_l`J1Iq&V#Ab*O(46rF0JA2zdaF$a)3-Yrv;`~~= zP&6ahDG>mOYo)5D2I<2pv&(-P7|QZoDm{)O2v1hG*((g0CLGZqYvWu`zaFMUh}Bzb z!)kat`yb|w#K7JbxVtF)--(G^uKz0+HXU1Z&(IwK#E)xMqPIkCyNGKXgPWr z+c-Z%sEW^uDOS{g{?v`mYRnvFxQ*4$ozA&u4W7_fr0Dhq4G4nE(y^Hg*(yN#Df=H0 zJcJqLBJe57W3-^nSzDYV){0E(jEg1Mvd!RC(C2(%&X>Eqml*Q9gWN%uj$n6QBLq2}p*mAjFpn^*yc)qn{ zIqE?mj5!GX=gYQj%?pi6SI6@GldGp-}x5AMxQ9z&LyL^ zc>n&`U0)gisI`S$j1a=SAr^pV&=VlGA*j{CXp4Y#Gf4o?jELHL9^J=6#?=jLiwJsV-M%{Y3oUcZ4~WycV+7A)Z`qpkdn3^Q3vU2Z zlV#=pZQu@IRG>u{ST^cwDb&|`#Kiz8$X%=K-1^*NwXRx~ty7n52c6&-^fz+kc)>vJ zum11tX@8l4CYpW#1p?|sikfz5V0gTLT~xT+li3hK$lElU%zo02FNXF zr3@)C-SFvW!J91GIIo31|MH~(>9Bw>qNK$9iM2g*skk8%zGDK1Y4i5f{jZo2%IgF; zIoAX*YEP6=x9MXJFOb(yPDSJPKhrgcxHRc7W=hcIu<2Y><8WOgb!dV zDfF~EGvZ7@IT1frA#D@&>?SnQS;U#zP$~?`(mlkBwnyX9vWHIu;lD?>M|q7QmJ&(} zJGm>8xrU~sIYEA^8Vka+Dls`Uv!j(R`fsHZL)-rboncwhMYmZsg+6FiKGeM;eUcNA zp3FDQ9Ym|$G8!j09{_+qp8JsG)T4`JM!kaqa1bp3H;-aC-vGh*wI2V06knmQc;~N z5`AN=;sXTq&+o4J;eNw*FQ#-;#F-N^%AU;bFtY8Fuz{zfD>C-k9Wi95Q29}|NV=$o zvSxC_N}^yGT7Td2%)d>T$Vv;Bb2T9Uf!OvbZb@Ofqw%ICS+7RR)2q`Bup}cHKv8`( znD~^z_kt1rM$4^~a^w66`X>IxhS0$)g^1@-W^Gm^mD8<2d8Ab z(y`%BTO49VjL!@ud3*4q`oKv(l`vBWqcUqj`j>n+b|lM`4&A_^!Uh>xAJAF4{F)ZD-`n{=ndPA zzCZ4q-zd$T)tZX4fO@DE_sMu=Fo(u+YX#14p4jj!3N!Loj($N%21d+RkwCE~gFS)p zp-|bL63gdTyTnqV_+9&P(~(#lLeV_|Ergfy5RH^!~3#|?T;d^saLLACW;rFP5WOur%1&eHJsBFeHB!q?X9Wx_PmZ)@Un!m4!iR4d=hkEf1sDqP#fCKS z_E!rz6f!xPG5Z9E31OX3x~Rl23<#9b-YI`F|5l=iX#j51OGQ^|z?Q6B21GOkT&+)a z#*qZpi$((07#bf!8yi0r-1KPsqI?{mp+o+Z_%fS_bj;M-r!yQ2vscyAeP@4^2p3KN z;fRqtwF5yUxEUA->g{Ml7cac-=O0jzNf#HI+|w+2iabhfoMYAEOtX7S$I3f$0DKEs){gY2S)Y{ zg>==9%a1uccBc?99=^!P`?Me|eR`X|gQu4ty>n7}d;_9%Yh|Bc7~1JGD_NexGD=ki z?RhhUuVOUuZ#qrQ!jo{KZPDmI?_{5tiZ6RGE6;-8ILa=^h&&8gW4-rL)rc9<0J7u5 zSpaQJ@EtTZ1@oYFK6vXcgZae={sd}3+$FQ%T=5tuztKkDPewHPlI02u$(@V{`S=48 zvi01$leeI(MM)ctml0u9qAL7Tw#AP4+qcNRB~7DLHZyRqb=?H;47x--|YwE z4Bi)=@P5>vdiPhQVrEBbFetdkYAnthm(pk{)r^Rs)ctK+S8r=DAnIPcs018*4<*jV z1)8}hmJW#^u{S&0T5_j+{TB9CqSV#Qeyy;}M{3f}e3V!JtIVAv`%)!m?rD%ID;vKr zu1!gwf|8O3v7-6q>mDQ{k#GHxn2YZ)9f{sKePdAgUfn)CH=RKA%A4V6T*#C=#eHz^(-`Q?G?b1)bpw}w!saS^e zd0Deq2VJBh^CslkW#CX6UYDt}8z`eT7<4CGL4_XUlW0ExA?Co_OekKS`m=+^Unufh zA-ya&ql|G#i;qrF=$>wO@}rk{&P|Q6LsKKD4r+s|`V{K&<~fXzapEy_IUaMv*5dsR zmQP-zatf&cF?o4gR|bh)Qr%`S2EaS9TjoKFBYSyiIGx~KGRh8xI{s|3r&dE6_a+vC zY77XJf!Uz{{ykbS+;ThU15#2yTi(=JgGdm1OMq0$F+4cgzFKHUe#hV7E&gQ%3=fwNFEhhWG9yqun4-$p1exR*UAahNe>>5YJEp@Z2+)^0B}G z1sW<|f-@nuS#e!>Vr z4@EZhlq+*Qr1?b*Vi7b|dMdFBxCj+c3HXKGV&u@$H}BO;T4wlh=C7t?*EG;Jr`TCz zyXeYU&SAuJdaD%@beR|zBwh*Z|Hg}+T|1;my0{)k57!C$&8M2Vu13`N_#S|`b zhk|NYB>_bijDKnMAuzk2AwHo*hw+w&^d*)`6ShsUCE99%rCrZ$c^6(LUOqdx5JIz0 zz6EL=ISi#Bx<(LQH=km)R*3_>`NI^Zdj6f$B2cWG+k;+9pt|_@1o`-)4nAA?zx4TQ zM}?2^D#Th&xrHsztiA@;sNrQql~W-7n8i$~#DNwMC2@@Op-34$*=`-HHI?1t(r@vA z2vu`7RBO7bD|mA&63j=@$=1k| z*%XiiR*U|eSbJ5HGc?1IRy-@UtM$?QWn=fFFk{M$C4it7^~vF$&fMBl zie9ClpjGNHU_5Kn%MxvEOS1dm?jZRg?&hs~Qy;cPKOaQMdr;k^Xxmnw4@_xelV5(rF<1P&|LzChtjpMlW@_|<%WXKHfn?lNok6LU*6gJ%<+%Jw z8*@1ynjCvABa)d4;A&JQPsNHjc||h}cCe?FgJ$Nx8SL{^-tW)U6*6yW}s64Rjlnx&n_k!(V$@@+2F_ z-%W@4K~=k$)znX53zQcyJw2g5b;DJ$ynCx%2gC0vO#7h-W4;lGcs}5_ebe--uX(o1*rP3P|Th$!)0(QC`7raHAF`&Ft#!h zX5V8zI*YZ3vmi|%+8eY0L#-tURHl9bl`vesv60l|W`q@L6TgNmyICbCpL!t#UeI3N zErP`VW?7WfYLpivV=2&L-jCKJxoi22GZj#8XN+*H?3O2(P1?={poK~=0H-19ORVZ%N+uc=(`psn2R(MfrXoaGe9L%C*@78h*lHKF}hSU2zi zGfadu7Qk?505V)vvcYW6(~gV2{adcj>ITWj{C`L>fMsd%^N!j#DRx=&^R?zt(r<5_ z+6CK?D3i+IG4v1`dE9S__Ix|(7Z@IvKuf(ob)yKOtQX-@yY%DaTgusds59Q3cpuuC z5+Dj;xRwF&^;)sCHl(mtT0K@fo+`O^abnDZ=AO(!pBt*tivuS00v;SO_rY+nf=+`c z7aa=c6MMsse3`-IL&5c8q>h%ecF@qtGbo3keeRx-UEAZFucyZR&68RVjB6qtV_c>M zoSPSp=S)OB%#Ev^;dq|Y{Yz)h1hwGw30NUm@9qp|k1d-vOui?pC|-Z_@w-35S%FT! z5RwZjd61gZKSVH}tZLTsIaeQfxZ?`6tKUz->`tK*ASpwpQt_Wj8wCr$3dMhTpF6IF zz1fS90MnecipXd&I3x>{;d-s{yMwX?4Abkh^?&v2`-1+C$(O($uN@u@e=E4OkrK2X@GcnrKL!$q?|P3Y@vhMSs!<^H zF{m|$ym!+H<{4z{zaA8?g%Axt=4?d75(EsTn{V93*8r=WsmqH6>C?v(>jfk+nL*M3 zB~I^mBDJsGj3iSycmr??Cfkm7XSuP7B)wET0TW)svfZE~*M9yt9#3oUNqndJawXKW zyI^J)LSmki+wPM4!i=qbG!QTQ^b4)$OKfW}!r1}^SO+aml=ZW}`xU8=gTctihg&zb z+>9K?Qy)&Le9KP+Zk}R3Ur(c0t1(wED1Zydx4Ob`hgK)0p?Q!DI-8uIV@~$K)6Hn1 z5R6fLaqm)^eGVfn>v90$BnkzBa1W7?51ABQ)799avxTC+p`XWzoXZcwAN6}deAMqO zKqGV>d*cq3c`bG!S0>!=>kze|oE<^b6wPh;zZ4N>(q*FNtb(>}Rj1}NktDD&*B1;6 z8hP)p}bwu%XK88n40(NhLuO3Ej-d*?S%_vl(K@7DZ0@d3&%viKHv=*=Ccm1&V z3kTjGPw_G$&|I##1oi*OsjiH@?|bZH+*PfPOJgvAxb^#-;*>#W-agm@*%WzAM%PEg zMk`n)>R9%^k;k)l@SAfzWx*`i{}*5d)JRG+Hg*B_K{$yu#>dqFu7e3 z4i1U&jQf>(Tq9z^DU$_>z#3=dC)PNhg=V#2>6-9 zJh_lN_?3mv+_K2pryXej`7ji1X}R4YrsG+~AkQikpLs$q_!i{Ma2NvL{Q9fWra45$GKg5uL6IDO)%!%L@Vg}zcXl#<*;&&JIU*TU#LdD$Nx{6NbQ054 zUZJD^G~T?;@msIC8cBBVTllGCd3#nbXJy#0=S`x&jdvU$-pTkQ@JFAf=4e}X#9=zl z$o%k^qWH3|;iaH+kNWDa3d%+ldw~9a*(DLs_3@d!EhGs{9y5d0(-Ro=5LAbii~X3= z5KJ(1XNFOXce*kf!WD{QPM`7<{d2?_-dL{66dZ4>m2%T3hY_b`MD}%P#zr$JZKHAx z7*$7THuFRZ+W0J;Zla$!7Zn&3;Wo=EI#+MTK+?kGXoLsz{_R50mdTtpo%lZjG(k@` zk-Sr#AcTM6RSx{MFg9glQFz1*AK~3H+s%obZ0(jPxfGJzcuqvUD11hEW_2ky>AkH? zA+bUJQ&j6Uv}HLX%!Hisz<`Bt8lzFe*%U2l1`WwBvr=mLN=?6j(nc%&#PcX=OJ_#vN4 zFfV)rdSZ4N)Gn2a@UrywA(28!NGL5nW&D zN{BI$9{=aYAww4-8;f90)tS$gBXctx{o%j)jfPXX45e$G^-%G_O5LT0p{Eg(c#!pp zY5){IRmb_lxJ^8U(F~dVOL@!q6URU{|nzyb`O01@|LjNue5)m{^4q< z7YAywUw&cocD8)}xLq-a`XWV-`>!8s*=QmCAV5h3b7%gry6vRBMDo@HdI@5$IS zVj1|&wmt}SBd;QaV< zJA0#BJODG1x3aA8B~jr zvgIo|4L6DELx}JP4E#tum{A**LEui{b~A$ZmMmV&El>VBGdRQHIVttQoeTuV-U5jB zvclrKPxK=3O&mk18bBsDIqO@Sr;zXHwnoam^?& zunc!>A=dX?#lJ=4rvVg=TJvKw3%`AfGd^|GLwwx_53?aH^?u8_FOu}sbAQhZzcA%V zrUnRF{rp;ReSER2HJ}DJ(-z6*{*=!U*qrN(^;TpJn@S_U(2^gogw3|9LGum1(q>lr zxwobm6*5Y#GVg?Enj~q&U=rrq@27xNoy+f+X873v7sf3Ai@@r*gXX9R#<(#!IaZmU%m`J&^-A^qF zxzsBIrdZ6j9W7_pQ#4lF$Es6RAUsg`PgG#BvwGp4G8`bakPY$Y5TkI?x{KHIW}JNJg7;Yv-1ByhyT$k6o#Yo*d51<#uk_u2sEnuKx^23zRksPKPLod zzDQCekN)Ysa{RMLRG@I|X8AMcOIlj*CuspWmaB-0_*kR_FMRr+9z*F|Pnu*IP((?| zUnjF4I%_MxWx?}?yW^1GMb=lkLC${GddZ@se<|owt$S}j?c+q@zL8fn2gI0zp6a9> zjE_5?u1X^rC53e3H5`w-UHs-6p!WJchO;-@-<@3y*{^DMXr)iUUEt2A`)b#BQ;x2; zVS}eZXao%s6#e?Uc;gqKldy`kweJ3SFtx|J)(1vNKSrTjCdgA0tCTN-j&VX=w`q(- zSo!CI5c^%ryMB4E+z%W6nxbsAZ7HaKc&4wtTyat%?;WN3vN7n=w$oh*$zr{a94tqb z$>3f=Z#L4YGaZ-P#lxkJIS$k9&191&(AftV85Lvf$sn+{PT9cvbL!nsi|>o+--<%L z&x_ETd*hzi1tOHu)yl|#OG5LqqDh-c+`C%0rkL`WM145zlzIC2k=<8(O6qOvY3>C< z%H=p;Xo$&TSfM8^qjif=4sh`T_@CI1<|(RN@Q07vjnG2eKRByfAd>gX=i?;6Vr#tr z+mjkU0grbB_rj9scX127=s|kNCQ!!b!$nzEfrRN7ux>FG!s(W?91@7usVWm55D*%q zd6jf}Hu*Y83>?}s4m-6Y?RJHKfadGMK5g2ou#C zHF2Z07(IhzLg|^f&nLS0swR;p_aOlxxY8CQTPIb6!;|L(l^f>RS?}A{LoC#)-Vs%huiC7CuWyE_L(?EfG=I^0$Ee{vFK2(Ky;vwE z-rvTI5{4^YxG)~=4ReN;l@-1KoM+rrS>@`~TlRGJ)Xh=yKWw*55uGd)6IDjpYY|e86B$S`)L^*Qol7qHp0#5P-fjnF5u~b_2`-%x=#y-L3!KFY_!2w zwH(c+EU|Pf!1~8K>u2c5=+~pUy5672*6(yvnba@P!P_LrsU$F!O~0s6;NOwB=UR5O zljEhRz_qtQ^r4>ta^gAG_hHju`>2UZc8hwJGOdT4fNBrChdMTy7}G6a_xV|Aec%|b|OoR=&7t5f4aXJz9bPZx!whZpaq+9 z4dyJcJY!U{`g4wDC#y)3LT6>_#yF|&(;M*{y)U;c!LAPma_&FnN?I+}GFT2;1;xPN zdrf_rVA0?x6xmQAyE#SS0W-(h){6pL9{JqTy>ZUcsWXaxadAF!Gsf;I4ImnHyt0Lk zX#K5sVe5r#s^!?m$501IBiY!TY8+4&= zwk3b@DYmICC=Wx(`t&3p2eA?9<)2YIg}94ke_B~SogtDxqZp9|C#UiCi6msg%xid#;GTTJ`;Obm3t3n&txZY!_56cSz{c)bNGi!z8 zVeCL8@o5IUd0gRgT&OK^x9q+N46Tfyo;n z{Rb51wO|L{)jm8&aWX4MPM!@p<(==O9pSQDEj`r5aF_*`G>4Qsm{sMnT@AT5`Pf4; zqUNg2qJ_JF%L9pnNmzcrTCXNMtta$hZQ9Tj#0T}!^CB=o7Y?>wvvhUhJ@ev0J<~gh z?c+z~#G#{952^8b$KOYaUK^$<+4nI2qWm%`dzR5#P`eQ5B$- z{#&g@ko=IB>-ZmUdQ^z3PSr|?i%C2Gw_|r(y}DhljMUKm7FMc)@Vg@q5cwI%`mks* zX%90k266Oz4ao1o?4@dDf=#T)@}A7BBQhu#irm>Ox*l@^<753q z(F(7AY0XWd=U{i8%vfxykg6^lx?)zf(Y-jrostlu$&wZxp2L&>)M?LrzD?_1SD}gQ z)5fptwWyvOD1X*X@kbdgtXt})vZqn$d~g1OS#6EEQ%m0S%TR-? z>Sb6cPU}qak22Z_xA3CuN@F5hvj1Rh)OJIMQ z0PNf6=ubxO9nlY7NIWBKDL659oyst;vN3I};#qd6d7iV+2k7`wsRY)kSx73L7`h88 zqG@ex&w|Pq$1k_$41crtF6ltmKz93d`Ecxl5uBY#|2{O!0>c*-^FLaa#Pf_6(N~(g)c2;h;QE{lv_j>X?S^frKo`Hh z2!bqQNd98f-KY%&Z*5_tlChu9l>?i|{*eO}A8MngX#LKcut7uaC%>S0|L|F}0ovNZ z#oW6fa&=gg97-i9mbkdl!;jeuGE!hO-zTSKEVFKt=o};Cqiaw{`o2xFHOfkUH{}9y zcX-P5(Ihovo?Y}D_4x*h6Z(CpU57Rzl#SRiR@Ms&DB<$US+y5T<3OMFrQUW_jFM+r zy>#mdd~?j_uimvzsJy&rn##-{XU8gYMD3|B;DO z7~SJ}BDcCMKdF=W(`<^Yf&waEHzH<)^}2!4PgEUaM67!MM?B@&E%NCjbVLcySdl=-<#^RPgD7PfD^S>U5Z{kyr zqtdp37B_*CXM(r@$N>=PC&7mwcQf6|q@?@KCr1ca(c#0#CHE`EOP?p+JaaTx z(&N_gg>77mIiYc{lqq{g&&^Cuxmk2SLqka{1X$nG^%17h8oClo@j@xeKSqkQn^T)0 zeN7i$Ue!N6xFGKB1*idIs<5i*Hz@-^$ao~hdhDnH7h%|b;Pe#Vxj zPnKM#l~3l2>vGya`g#O;+-xd;?|LX)tzn;PJ1yqj4>r-cZHCBJmGu}VVllh}i{bOj z*K!K|$3oj5DyDUnXz;^6*NIGmd_(23_#ms>Z+`vhtv4NUA|5hC7n{YNn1UHjh(RrkH;GN*b@`dy08CsJ~~rZ-aXkx@0+^W!hP?y#DC7- zwFgaLbcva=0;6#9KWH z19v|(^LBFh!w!ID=Py3I>5b{!xZK2b8njr?Q8QHQ1A@#y2(znpeo2nBLRj-A)(6D* zLOH6m+F@m`K#yk?!CtLb4ux&3#oBD^=C6JQZ4&%)r$(ptkM!Ex0soys-z}Vk?f@Z( z-+CcZLZdo1y{pRn$)Z-;WuO2twrb2)c2D1evXGltc4{aH7;Jd0FEwD<2Rg$~FYPZ~ za+l^)Qs&d2?0)Q$DSUpyA>K1P*+=1d>&39U9{o~Ma+!kgt*%Xo_PKRQTM=EZBln?{ zv%6(pv_Q95##gx~y7t=)({t|^wNd{nr%({Ot-AX{iRaAPd?&U^l_D2hbDZeX=h^;T zW8QJL^2Fxikd(}jYvFgFwb}pE8PhFYNFPX(ATXg5<0POp2xrZ*`XYG;y1&-Lf^KXW z`=YlE9vn@HyGSI>aO7uRPEAzlLi<=0n9O11SLEYVAC{!1$->a)kz+I&)z?+BV=DB) z?R$Gw9ba-UOZuKV({3E}$aNp|HpBKO-)1lVh_O&Cq}R12s$Q7RFBJq z4PpbX!*6dV2WQ{3S-l+h+(&TwhXrKj`i|Vxf30Ny82SxKpEZ?`Q7E^ZWn+=6`_)q( z={wVg&u=+!E;uNBu7Tz8*j`ngOleIH+0A8J?%S%e0SWYD+PXe{MTl6Q-iW^Lxrvek zUy8Mz^G{AG(vYM5H?jFY@oi~$WbfM)&#YcFVIB1EH7>D83`y|9m#Hr;=Bn*)yp5df z3aFwv3B{aG-;#atf#tn~CdV!3^G_Dsayov7^I)slgS*dO)2CxB#&+(@ghj7Kp_yG< z1n;a1RqJJ62aRfpg17Jm-L=v#soVAn9Fu>B0HzCmQCIk2+0XYsHdz82gw@OJ%FG?H zu8pP0$#!`63z#T0A5g*dS41$|jQu~p-aH=a{(B!ENo2`VmdKh)2!#~c*LKP>_9dbe zl`UCD_CivjD3Ow~j3vcb#*#hBz9-4PuY)l&zw;X1_xt_%e80c%KlkHt|I^EJIp{qQrU;42tN)m6$au~A!TjO-w z4ct0TQmYlptHMK0*p^aiuxu@FtG23xH$c+Aat|7p@p?mlRpkH55&MRx6g-UP@0HoE zTcn<`JAG8)Yrr;Q!+ANCwj(Ez>}sTUcl=jYqtcDd`Fuls?{S7&>b6^jQi zcu&?q*MW%SeVMIlyM}bD13D13wLWRH)6somBX)ubu+-dOSCen(Qj_(R*~MYOMU}zo zeW}Op2qG>@>e(u}gI@=$g;sxJI9s;i*;7UnaDE}Sf#g@M7`H)lKUc0oBvl~II&nHr zhP;;CSecw(iA@@qdwb){1a#Php8faAkHT0F@qQzUajfCXv+rpt<5Q!>D>^5K1*Z{@ z)=u%ecM$|5J?6tlNj|LIbu$r0psYXit3KJV4XM!8kSp%1(5emAiVx9yDjXs~Ej7R!p0d>g{ls(A3*l~0Y8?WN7Bdd2ul~OP~+7G*Rf0j{A#`Uyfn~=<(d(s0Qo0>_)74W3UgM;)@BhlBy8&An50!T|s z4(wW58g#p{TqWs2Oha{cNXJ=ApH{q-NxGXEtFh|)~KTx#;A^=01QaPOP)wdRM9?sazeD?TYcF%G8oJVE7OcF0Y3U)^QcG_SF%WYx1 ztgFGyZSQ$5$4V*MZss$Nz{qC|GbRBrX@ewwG9sYcXv5bSM#=LT-D})-kr;Gia^PV} z4SnG*fF_FGYm|Zcth{fw|ehm z4p=KnxB%Hy1@AxO)bv00;a^l~&6pm+YfACUVhg0{xKyzJ{BKbOZCU{YXx)@U`$NU{cNS+&yJD8>u^bSn z)EsQP#p&XL5R@blKt0mVpFSg}7W{lB&vfyPm=4pA;?|uloxA&>{WDBG)q1}yTbiZy zBpjfKLE%gr6-VW&LI~c5kT**J-<{`JaP? zK0Y+RbFyE3ST5LRxVrI=7vMD|6*qGWl`$LNh@CsFXmpCp7!AQ$r*aq%jaIFvx=|K< z?#AJ72Ow5exkcf52-x;4Dz?bkhoC2hNM!A-&U2-A;_j4ok-fk3ji_(+KjD~Dk~JPL zus-@XbaVq5*WDxZwkuA47C~j^Ss%uiNl(mt`p!6^9_Vy6KEyOtIPV*}%K(n~M6oV< z2pK#NzT*jfawbc})4dC+dBMPDg(4L$V{2~rd2a@6qx&!2>}YK|GfdErrZZ1neW2WQ zmIWY0k}F!JE1e@OS);S@wQscNbTvGgLCUFIsHCtaoo$GmG*AJx6y+_engFu*+cjYCpCei09EmB1 zyKEB#kAZTY*g}IOa}#*(g${;YjK$Jzc1iFMKE0Y--9Q2m86Fi)e|+%^gIjV?E?c>o zcj{)cyP^NBeW!_~#thui=iN=iYqQ_EoASi(V~ODk)|>e(^v=U{T(4JIG*bqI)K1Y@ z7{W@2CYP@woz`~0s)Wx^+!FoV4Q~yl0^S-U_BZz$RNz3feoY*w zA6r@)z<%*xi@1HWbK(0~*FX|V=$S$z>zP~#xB_%(j@qd;`MBRsw{99PnspBE?|L&m zmiFN7g>z`zIBK+~{Ec>mw4k;l&lkLD5@)L{x-p~eBYPR1jgY~v$dQHTmMp&=72i)l z@~CnYX8ulDSWY1LD(Hkd%M-`eqX+{L`vc4%q zTtS8`K|PH;YZ1tpPw3Be-DdN`zf{mz&(m0sg+=Rd*Nz$rUN9=_Eb)e(RpOHiJQ?ry zUnBSv0&*&ESR*6I8dD%tufN=ZO17b*c3-H%m3NU<%7(4XJdns%Shnn=n0KOs5TZS@ zW_6Md)C_AFk%aL=+YgY|U<5BbUvmMO(_ zEmim+PVy_av!jQpQ0A{-Fm7mE-%_Zlg(EJUy4sW3g0EgB%r$kjK&B3f++UhBXZJmu zv1kT{mroXpqr0y1lc+1 z$b@|N0}K)Wk%g(AZB#D%1Q|$bvEyVI%3S>^!8+#aqU2V+sbD>3CVQK50s;gEy6sr0 z4n?r}jQB!;tZ!Cq_|~cxzH+rp+}P1R6Gk&X^reCIQ9G-|H%1P7x?R4WrsJx3*h-^H z#;eq=+(H(%)OMmbGDE(n-JI!^*9NnZ;0#5!^W1jMPI!07g?Nl?`yC#3DKM=CKco!N zBerZcAA?qy-7e&aw}kYwQj_>DjI&ir(bH6qNm1*tkM6^R*TZ)-7rni(XSeZl+E>3& z!N*C>Rsdj9sW4_IlKod|wl&|pXtBVFSio(ecLDY8L>U>p9u69v ziX5n&lRJxjBlNXTR^R0cRdnOT8g>arT1U2$v9IqOIpck5#O%-$&qT*y&e8G{45+`E zrjlCR?;xOQ*cNWdXxX*f#Mk7y7jJ(qmG=9t!0S>;bCvR&zd-BT7i&8pnfu=R!_dK! zt|gYnvAdL4fsCfo@E(uMsX<)l1fC*&Vw%}T9dCY8l5}BYUT!jsbM%8?F~w!>kwDl@DWZ(0Qedj?uYN_3zT2;SWc(gczR>KlcK<-52|=Dij=<=|<9XIz|x zk2&R@dmNduJ2ozJ=SXSeHrtSD$arc_pH+dJ1w>KDjiSNhVcCzS(eRdd@7F6U=<$z) z3FeB5!uK=2U%CXrri4zKN`joTHy%je^6};?7i%<70wn*DoP!gF04ntNoS(^)k$U0! zoLA+?v=md#qg`VL``C@qYKVS102gXYVBA`oKR>jG#nP!L{M(h(KFYMN;*(a_;9zEI z6Oh**3~cs~8gg9Xy4yu@>Zy7CsZqd3DF`G+*VZ zq7OvvHp25Yi$b)!)HQNsme{N7C)Doq(5<7bml2eh9382lKRq zHP|qnM<5@q`_`yh1aoFO=RdOUSztYEUNb@=yrbrV8xx zLL+vF36+{<**A6?VvRcsmRqDaibGoXI+#QB7}Xbi~M$iec@&J3GWN`{y(wY=ZyZ;kG*Wjo_&$M zHoAmDZ6e7jK*_{U5QVT>J?>qa)?E*HbyhHQQ&Asx)(Z<`xfjwh?X?pN%6 zaL|_Syvg>F!W|DpBBYq=<=bBGT*3=r~>3>o~ljnTK*EPRN1tr zJl={ho!k@mCe`4ae#M>5puDt%ku`ceQG&Z@A4MNms;#eXDr;n6(tMS8qsd3|!uI6y z_p^$ys|N zKL|7d@~e7nba>LQu#ID7r|qupHhf%t@r)e5VNTXHK0~L*HTpwD^I*<);-RTT76XX-{l~426saff6#tZyp>f4yFGc_MW{~I2 zp4BY)V%~ZO!Pi#TgeBngTy|=y6Nhnl8s+^yip_YQjafY5Zi{?d_1pYP8rzVXX&3%k zWT9(bjRYu@0aR($NdSf#S(R21UL1qby97A4!`ipKR>ux;J{~i>nK^h6rZ}l zdSUg;_{Zd*s-}a}YxKpW0_Q}^3buvj4Y9)ub00IdjmzbR-}pUy9?P^{9QH|0{+}Q& zpACH>gA}nAMDk)ccIuz)v^CLEENiB*nW-*gg}AZw3K>NP2w*)Ql(jnzAg3%^yl1)qjoYWY*~X6zez?+kuvO7O&o zwF9Z$oP*FSH{DkR|Lx>L=GC0OCqcoRf7hj~R5#=QBe_WMqmcmR*T>Yrk>d8O8db5a z!|woXb7t^X(}d{}ocJ>r>!P#w)9$9q^9dN;jxJ-%Nm40YNjcB%ZTRLk0F_JpYzOi9 z)~?o6;fFZMbJ!7g)4I&Y;^W7%`!Hj_XiF|TM9upu0X$KP*2-&D*<{i3jkOz&{iebk6Wc^bUM za_ATqzm&muW&Z~#K{hXKP zf+@xhI_e9X{&YH@m>eh;FF7`f*)N~t%AcFZ z?Y<@BX)5NODfW%+!Ku3|Q^A}TS3r*n__}rRsm=ss24bykDaR4-uH6QzaKdMGI__ca z_OtvjM}(fV-wZkr_V+dV6*Gll!R3e}*T-JRdNI-NeqFqzIp8E3{jl!pog+IBYFz?2 z{;bq^&pDY+naI$(r;<@x_we&8tf!LwMJ5sPKZI^Fq9No)V-*J2nc? z3pe{Tv(^@|?r1kk$;Cxt(z}h+i$CA+sZn+MzR?k+D*7p)fKa6L{uSr()Vx9Kr>}Q3 zd6?GuKJyZ$ayvMt^-|A{Y5`#*l zr`$^{g7oZQi)>*3vLF2OY+T5(8N#v6hSw#HU4f_0LTgK1&#Nx(HDt>bq$H=0LFpSL z#YR{>!iWw0{5^`#yZW&(JE;*WZsp|Cg-0b<4EWK9@q^w@(i}x9Pn=|5af(gCcZI)Y zI|S|6pka+DNbQ$m+8xXp7o990p#S&akVhy2CRN6XsB&ID_vT#}PR$A`O?^zZQYw0% zk(E(un)P$gz=ZDqR`Z121JG;aIsXxEN=|A&9eOY3)|jSGi(a<0mGo4e45?{%lj=bZ z*xctCjx7!;_7+;_&=pjEQ!&51nLbBkgNTqsxK(;G-mm2F<`35S3HH~Cs&mS!`xMM_ zey@eqOdcbr{9dD<>CKxV4Q>*kC9#-!Qo`$5F^$B~lGh_8iSTbA#ET|euz$`+5z2RN zi`KOp&wKo?G|tj~vPZ~|y(rULF`)|LgXRIXJOxwHZ%6+jk;F1qn#|m!Iafx0q24Y&q#XudZp0?zG)S%+cPakvV(u65BN(I}AN-*edkBP`;oFqM6X#!!h56QyT6t)^w zv_Hkwf5h_jph40$r+3o*>pxcX>|Ku@%;_Ey;x}Wfe-v7$*ZGJ`TC7&}PiCUUlx{F* zNzZ`dbhus@;Ur2;D-?>f-(VPA^dTgU0!mrqq-lQX;e9;WzOoi2c~PR5wY4xy@2E$v z$cwk(G`y9EpITt;g=#>=ug^j5h;Z4vQbzISK5E>_t;lK_uebW7d4JU$$7Puc9#J%e zm>Ex09vI+XaYIq!(a~g173;~uzg*Dl00BO$|3VwhjV$6&9 zAExJNRm)$e81h&Bxz`44+1n9Qe)j;UaUz}= z>*be}dmciq-P>s?T9K6$^|q36X(vt99NoxVvpZaf-uzpda$OF#E~W>6$q+79GSX1x z*1zFSC1vl(s|%0jzn?FE571iZQ(SBA!u60ud=vn*gh|rP8fi3mP4e*CCUh(0->~x} zaza59Wh3m^$2To=nBLb{v^Q^$QQ?3V?nMlf0j{xfno#XNpx5OfOe!jB)G)4C3;U|C zB~=!ZqrtGY#HJ>OT!2j+dKGtuN(9Y7Durz7+rNp zHa_rLG`k_g&?r9Jo+nkDmqdQuQKpOMVYsZA`Pf2&r}VMJ84||emAEC>Iqrk6fr+s( zlgZouIp$l`kEG!J`eWQBtWb?yUd_99l(EL__v8ZP!E4APO(?}w$=?iQd}F~;*MzY= z*_95ajf%2*#e%e7MGn^twQA{Lk? z-#vnelJtOlb1*%bS)zLeYuUGn#W19b>!j9zE&n@iJ=V)VDbd~OU80v?VKI-f7_1l< zH>hoUTP|47Q%-BNsJ^$QzvCFuRV0(`2|rY*fM?wZ#8)ds2!;L|B%hX;@1DQbt*kV@ zuA+6n&|UjuhQ@mDXdgwEtbWOs4oRtdDKiVgN7M3GyAQzkYLUS|^{C)-M4zl9pem{}C z2_joLFHEbb#f+5_bcrIzZr%8k1`NkC~h3U{zQ*;;Kk zb+xk(0d+givJcYhhs&BIlo>d?Et#9U;o1r025qTY)n_h(HBA`5(~bFx48~|biUR+( zy-_&pqE-4}mKMhM6FC|p58X9+v>M*azb!u0oqZNr`E%nJ1neWe7X1z>UoN7B z4c%YR{Ll4Hf26KAbdFZ6dSaFEj)(FKbQ**ol3mG4AQina4~>|kxX)adpKEd5COL@5 zO;yE(axBw~`G~#o4aw{XEuGduh#zV*^V_fGO($3k6y_U9)v9Yj zaAZvOoEysd#t&ts6AOL}QPgAIACt0i_2vZcZH_&EqiHOVvB_P6VFY6K<*)&#L&0VB zkeV_I_x1SBMr=Kc()Ou~vm5hC;hVW{Yt^3b@jCGwAGaZNY;*L5hw0-YDJJi!QAL@z z#mB;zwckX9jD=oQLZId0t;9W#TrVL(;1Kv;80lx{R=IwA$)xF6y8gW@eG}w|Be6f> z6;4Aj;v#L)I##-8zKLRLOIoA9X=*Yo`0%FxNUtE{achvvI;pdL%o}~R{6nu^1h^^> z(&yNdZC{=zVvfKUUQWBV7^#A*^yfQ)N9HGdYer~jq@tEpWbxw`$^iGEg&z}AQ)A*2-aEr|s*it-Jd0UC_TRI`ptyA_y z6k4@Tze=U2sMk9N@`H8{hNaufJuFbb9Jy{LY1H5CiNo7@zw`PD1tQsM>gr~Yf3Eh; zheI zgu&@2-kThTji|G7DJ&NcX4~6!B>PLz=M%gdqO!bpl=t5U1^F6;WQxthEQQ@!ge3oa zjc5J?L5cx_wzKhJ|Ajs9aLjhjmr)<}W3$_jq*f#x02A!~l3^aJb6{L{ZG+T&Nsba+ zsirX*R5#C;GjDN)h!rfsJ9??z)f(qj)7yYrOG^b?Gio+~?!BoBo!cSU&--(lh8|-? z%nc!tpb0u%_4^BVFzO-p-fi+(NSbuLy=1+mrCon>zbYTcLrMN=ZQAY$$t5G^hKk<` zp?KrBnjhx40Bvr3Ssb5K_j8Pe!lZhU|HkcS#ZvDZD}#bL&BAVR1w7uu8@<$4{%&C2 z{Om)aJARoY)~buzv0o#P65Kz42*Y)D0AA^45ErXrM54yb-AF%vd634geQ|;Hqmsfw+UlFFFjv3zF2L^8dZ+M~Ph*w#1NIArjg{$ygbE_z zCf@U|t{9V=(-yg$p}N?$-U47@pJCBKWoJT1n7xU0O!B9JONT*41-!rk4~xu|5THSK z$568k9*x;+G<0J#C@rZN=0tH9tx2Ru{+G+YRe68Z9T=9|A2=oMYi zvfqC!+uXu+KgWZe5}!ItshO43wv?JOrwMR;mD(BUHiEqpg3RVr`6^8HI9rVRY+zck zcwynXOMd&tsM{i@$@k`WS8{yEXJ3*gkC82abWqbWHrB#UDQz^-lu}$li#(TF{U&5w zt%dO6W`mIUraRe8w5^QMV3MuvN0r`&$eo_W?F{9hoayp7)BhV22q{4Oz%@A!e$`)n ze{&tnLs=6G52GJAGEJoz!% z()@ENC6RIgdQba|TJGu#e;BZD3M{C}SS8Ml@7gxLS=_@Do?UlhM-C4MPZ*H)ZJ~1m z?uUY_e`uss_of{PrN3;+1v2mYYu{Hn8WENY!0pjnGT7F4!?z@6h3O>zmuo6oRTpl! zk~jXZbR$LOPTPQZW)D&`44U{sm{v3Cj;C=%m*5M~Kh>Pwb=4 z7cb4%h8}atwLIf-rLVe)(B9gLl+?lgwJ?1b^3>VmzDpQmv?^KMyt8AC9w~W?N4d5Y zoUdYxIhHB2u4?(X{1oq-ThZ-K;g^<}q{}v@<}QgJx0Ab!X1C}eWCyU{8 zv?n;Ym7V0t>W%OZ{)cC94GXp(sNyjif@e|eHZ}8!b7JB&MZdZI-b#pI_J8my$k3n@ zd9lE9Ogwz=9?%^vWFuqr-5t6q^;rDt91S9S+w0fpLH>(?qFvxpqdLxQ-v0SSh7nbv%X&$Ypy$ja zG`E-NQtWNA^DD33Ij_8<@{>z;qsD5{c~4U`dXV^_f+(ObuzP5@`4}qYsONK#zQqtf zPfqr#=!&Kz$_lP{fOBe#nAOY{lb0n|tY%sTF|vT+5RY*=|1*}k0Dredw>U;?#5Uw* z3tIwQ;31;i8B7kFnN0Rse$hiGT6{)+`4LCMeq7o#-616Rw}>LGC$tGLu1%k>1e&;C zSBFxwSj5M5IG?#9Sl_Z9wln>ViD2xHb4^c@1CM6@7$=X&zb3CY-%e$HNB*s(qBxV8 z4C6s%S9w@sh!z9sVc&v2NHIaqxO$h@?7G*~{1dja9c<4bM|KmyA*7~+AkfTL2z3s3 zN3=#SL`EKjU7gw-R2AS8ymec%@9MR8e7(JU0cw*{+*qP}2=<$a(cQ07Q(+wA-+m}@RmaldS{}2VB7|CG` z6~xfc)a24NS>}HW24X19d4%Y#UqD#rSm<|@a&hXDEGSF4$!@40$~nE-m0bY+4XFt{ z?bq~?VZrP4oc*O_rG^f)CdQkfE3JLnkxsKVaCyuFCt0WSxltqg*k??8e3HnxWHCcYe+afu-V(cT-9zp62f^*u$_2HWHgBXJ`TAQOjV^Q&Oz_%}FjiIq@QCdalLJuW9pr zyzjKi>FE11pESgn_r6t;*}-~dPsj17S7H-+Kccmyj8E+74O7iP1&b+dzzbaE97qH! zb}vEoJqGAw*;lYKzp^}ZE8j;XYXGwHKnib)_>7&QMi62Eg8~>U-&PPS&-rdHw|nmq z!fUI=+$oX8g>z0q$HG_}bd~L)rvYMXmqWx^`?b;YBL@)hp^IPrnKJYotsI#{!}ex3 z0>Du-NLMc@oTM}$BdLj5kGFWJGVRU`^Kjk|-CUcsUX7tYQhdT=pE>=K$R*szJH zo@y?R>-vfj2;&5?cTAC6E7Up7gK3Ie(DhG3_HdPqNM?@9T0Wzi(~kde9_p7`fvNi- z@v{&02XvCPPVZko?}szDcD;mF)Xx9GH*TzrQZjQQ_(q@zI*m+QE2aM`qOEnZCvA8o z-K%wc!G!-!ZWPVbKT&C0Wmm3wIzHsvovP}7RF#HqC_K-laqIW*Xy1`izv|Pu+u*o& zN}w2^?+9zQkw?5#nK%hRBWfE1`Lr${BdbLkfm8v^R2~+*(L5=i{^Q3QJ%K3U+}oZ- z*_ac%7|bQkC}EE~BpFJ#*!iH38bh03rpc_kjnN8-!uV#3*%%|gS5)3G2^l$rk4=rC zv#eOdPVR}hRjn#m3g=KMnIFV&Dt}bZulan{U%!IK*vS_|YOS%R{7%@u&!inQpi(Af z@F~8`xLvnG?vXI&wtt5Xoa8q4mJ^vj+5|Zy0>PH40qzJ`2$}`af-uj<)m6dolRJ6oC3zT??iHsiq*$Fa1AY(&L-~hR|fXkAz{Q$`;fTzHBQ*$P{@_HG0 z)2oW{bR}b(OnJ@pD8@xi$h$vncV%VJWDM&8<|~KpsjhVpW=+IGR#R!2|H)Lw^vFJc z?A`tI!S>*Q+ia##YGZ3vp(!lM^lKCkw$wPso0~e`mx}8X=$xZW5En&nL&7d=%+;^h z@N14C+I02z^Q4%x4~)|8!ch@UOXgi`t?a#LY?ay4w*7maTi*e^{A)1T$5a2*Hd}DJ z7Mh$){{>Fxs>Eh$kmT^H0wXs7Hs<56pK{5Bj(dnHvA5BO(TfySL8Spt1)E=|!8r5`yN$0uAP{M|*%F1f< z;)T(;jl`MZCQ6y#X6><^5`RKz-Yo87$frLNdJ_H+eYD*To(C4>+KYo1(YAgIjvN7O zD;Jxi*Svr+J24}BEspyQ(W9mH2lQM9v<1#i^l2g%+EMy$@|V;tTPbT{F%b*2$JJ@L z5#0@&dF`@72XuN9KH%tPTR1iJHl!)@)K;^AJqHx3HolKdFzGwE>4h{0&4XYD;q=Tw z=eX^hkucnfth~#((=gU^X=BM{H10(xr^v*^=Rw=^?l!^0R`yOIkBj|XzH7}$F%ebg zmr10+RNHFUdzXiicr1;O%sM6>?sYoyWVB7kG{R2VQ;IcSOczG}AHo_fRak>o(GE`D zL2LXc2P|gn!xz6$zThJ*m}=>pXP|qoXChl>D|MFbh=pucqE)#20FQlcqsH-1Ip{+V znY(Ii>1xF9qc{Wfw@R=v6ZK-J+!SZq;qqxu_s0A9jbknZ<*R&#V#NeL=!*E&hq|gX zGfu&6Z^Qv+jm^i5z%xZ;qy&1Fk(F>Pzb;cahrcsExkrDml_e#;u~JU%@fj|jpzRaU zEJLA{D9zFqHdYL!jX+>No8v!=Fh7~`*GYmbjmW~ssW2xJ5H*-y)>P=>M?aJJ=QT6$ zVio;|r_3VF>Iwl`r7)(#{Xuvc^aFAu;`}1!u*qML53y%NB}x zVt?3BQ>8n`p~?NGLsNh?W%jZw@tiLHaNGyVEBvXcOq-1dp(%|TSL-W#OIBV0U4eGj zx>NY?-rb}*%m&$S_$A zKiXYtyS^E45943}xT-e`?zJ^85MYMCn`9$5ekopIe~j*dhiO-Yj(xaacPCiW@F$J! zs?AVp=b3oDXVM@}lh%J|r_Bnr{5;G+%8wQTuzqlvQ>GWyw06|cTLRU@ zS~e}PrpnZ^rgwGj5})7bYyWfA)~0OD5`T)1HYQ$6ra+;N-9bek{2!avyJ5<^rvP-@ zvbA1W^KC2?{)tdlmMwSfmo( zy3=r{;?wf$2=#L+gYA#)TAZpKUpxlf66ucWYIS?Lvz`hGCuFH0W^xs zQZ>?K10fQhd5d|;X?`?*mhWQRb*t&q_xV5VVMM|y4~(?Wbto;UKsf-akd>g{kV)NB z<0*u2d8utnfyPU~=bjD6_%M-6YqPyOl1mYmTz9(aC4cGLYG}_Ty`LSF zKC!ElPyH||1vf$kO&&;A_%r|@1NjR;$@yXDrn<@WUu`9Yh7N_s4x+?Evq9hHEm8n> znDaB8rDQJirt7j?cJ*b;tSTzAnL>Hj_O0t*lhayFfxe}|s##L{yn*KJiHJIR($k#K zHCcMK^uGs&YI3yPQo#hN zDwyBWz_IinYvhf08S5M7H{MY;u;Z*KbL$OVR<8q3bgN}{RKko;)vc^_zMzz3NFdb6 zT6lT#E&yKUwW=Z=E?uX2kwmsmFg*mg>IS#s* zq2A4V4z=6sL%9|^&)?(`^qcp2efJ`E{8~VG9Z0YTVs1FmqqfPMP*&w#Q$c3#sDDsa z8>q0m{gdf~e*@M*3+K;=r6xB2tW`xPbxcJqhJq;_l$5qKmkcY)WLpj#ho@ z>>#1;2D8>1(f9hFvTyCv+89P+tAEjfg^x=$iB)U4jPQVPNazuk@vCjkkHb0!;JrfQXk>DW zJ-Rt#9znnQf37+ase(2i;R0m*$qutR$rj^3^k&qhEbYvOAVubS(;CfEZ?8I?6*!Qc z={c9Nhvx#aukjj(zqzrte_*woP133CZ%|Dbw2sx8Djbaz>7fuWmN-5iV6Q6}hSc>q z6?4yDZr%^xEy&rRrqQd-Tj31ti~zo4O%YKhebB#I{6FgASeL~m$fMrTi+g3(7OSmi zbb61nfsKn1o+3L-O3i5u%)3%)m*uMoVxV$#Y4M4>JM5{gg@3L$D{{TFpjTfeTF+WU zv*y`GpfNV|)X4-h+a>eT&RLzJrwgMOSwW0aHLL&~@0sA^0MSi?nD7)QLD)!i9t9LD5c}k^ z+fuy(Nh*czY+FvHJ+szZ&|_!v9$T5q@!GkxYh91Jrc~7xvsd;gezw<3b#*pr=$hIK zsw}x}D}SvZ{mmbY)co~bM`+KXk+b?ePJ0ymmgIv0+H^(=-r8gMYXxhR@2Q_s)U|Bo za3H)co`SWWI`a%3j+uUzTymAvUm_ZK_|N104Zbo2+qq&vA41hTZImqe91Yt?^29p( zeiRG_UqZ0dr)s1sJ-6EzY+jAURyUbAs)yXZ?a@@@`s`3aVy5Z;E{N+Z*7x*NWa}*ju0$s>3zf#pF_5laU?Bc>3mwS#{Jvd(@qkraLR-hi^I+2A6^uW8uNc z!gbfn#U5CEwec>UcW8gCdy?-k%V<;l+9K=bMv~sg{6OKm`ii`E8_(~w<&AN9SmeNP z4H=9N@v-k|kNpL&rdtw*AF}T4SiGO_X{ys*O^b(qsNYhM{GcrSTQAn^r*FY7@j@yR z0{r^Fj=56+`p5V?^t&{k(UomRWBe{^Wk-Y;(FPhTezjrXLVKBtojm(&`(aT`&$5_- z+@6@m!IkdfYq3#^NWDOVq(e3K2dp3_-yb_RWmp+%0lcGk58H%0=vN^93%+W&W8M8? zUhwO~_5a|^v93%`L#GY-ChNxHH*U~1e>bq}nI+d59ZjCDBcWWa6tU^NWY>AN2Ns*z zwLg_*PHpe*{_kobja+SYz+SEo>yjXqu*EE#2rrw{%mmhO2Z*3{hHv5m?f-vel)rwp ztJj}YWVYmD9wAV?$d=&= z2-MY}KZzQ`fhGv2UyAH~unUqz&M5;Yb{3T~2V*4LwKAi!rl`fYY4>^}I}FZYmJxE9Ooq zjXNmNXSRHxQ>)toLQ{xDD&%egvVVCy;E>FSVwJ_+wCweM^jHsl=tR?G@QomG4NBR` zbQ>xF`f>z|X-WY7SqdkP&g73;n|rDSFzBJ8xZwmy4-jH6MFSTym+lREd(Nx<3<@}) z^OJ_50}h!;$#=GRu%pz`%f(g+`epm~dV9d>Pj9Cij{l3A$Il z59)DTgdX96LjQRB>rZdK`~OiGt5p+iz#ZBXLVH6=`mJiar)l>OI{$ zpO?s#FJX2bXeCYSV%#or)u_N!PV+IH*~-`2)QLQ5!viRB6iSdE$wVsqd|H`Ed9RUS)d(= z_rI2bD3tBrSj3&Gz@$6iZX|}jr|t8}NR~xHbbzA)$8RsN$1Pc{F(=F;J|r(cQczGw zga|MARpiuMyRVI?hsSDA@T1`G5G25WbOxr4n=L*NI45@z!r%kowhx*Yu^ zQ^a+|%KVgxD2Mi)^|@F4GPQhBKr{OHX!M{e<^KbKsE^I0nDu!b*+zrhcjwH)0~x7& zBdrO1BkRsVOhDvf<@qqAr@iMpCD%5h;z3yZMC_B~pTip~2)u_`!`bc7E!6J1Bpwx; zzR|yYh3`czq1aj_oTYikBsHAHwi@r~i%2+EPToP8-bU87aRW7G&(? z#UkzoI7*{;rFinXyj!t&d-@AYXW~j99kkD;R;_!u<}f~Uu5;QkDfxjJF7zJ`X{X5q zPV|NjC7iA(8~zcO>pjxQv zgnjNH$#t%Q;>MMJ&1))WzlbH`W;%tKH#)$|MI9v&E)xCbL8no1B0R^wYX5PChq_6w z&x%2DvBx-^PsC;2BQ5=VzMNj_AGA(?;zVt9=yG*;iPc5K*Do_7fX7GR;w)8i? zECMrFQ4q^0PlG=BH#H)xknIUIH9kyKuzf*U$tf|g!u`J(LesQaA9heGT_0Cs1(1hB zVLxT{5P3Z*+``1`sMzEUw!PUeI&D5E+d42 z^|-TTB*v-cYG17eRQv~GE6t+2llfQl-(S|)c)usDg-yG;K*@UZA!YNs--v8L1613+ zKJ-HF|Gj2L7>gLK- zGKitAsbBY!CFdD$yg?HFbCY_=Y6*l2wah74wlz4lH&Eidr;RAS(q{W;W5SsQyT(2B zKU@y{(5TUIlylfwlf4I1ZH;$x>fU=RrNH(e%2}AI=Ahs!s5!W^0NodSnNhO9?@>Cp zM<0TyFlE4!ApdBK4}7>u1ykp@sv8U#<)CqYz*a`z?Q1u8FGGVVn)GLG`t%;l=2JF( z4#IOq@CN>G7y6oPhRKCp2diswn-B5+hb)70&iyTJRHGI0%Y|wT$oxaV;TrYrlk2sr zK)ZUdQH^{fH}_JVg|Z(q_$eTw`E7<4T?LQGB*_FMJF9`tlBXNFCY0|yi0OhO4v;=2WbTLejhyb?)>wo0=XEJ^N?$L4sc2 zQU7uS{fecvFxb^m1f`tEr^Pwb!dQ>l!k`>54-LnlRtxpm9SotfJHZWM$SZMlb-s}@ zX(mlc4%nTwCQDP^%L@}{?8cDxej@tCn&)Y#n}XZQ_Y4bdJI-5=Fn|cjBk7NeW2ua5 zOC&kJG&OfLJ$Yd8>W7Tvz*$t{MT?@J9{Jv~0zOTLRz(v$jO%XoIHcSN4*rmIxGvdm zcDlD7KHFO`ZC75@>267r6SsxQ*pN~(KZno&ARk)4tmeZgC#*DykC{zj_C-zBs>^1xm-N3MlzbW7|b7)oT&piXjpl?p$-*7#(@;H7& zpKOqeHrh~u>MYtmrWeDG(nky7PXtu9?2Mdf!K)Tl&-BQ<7?^AGFtefd?ejH!)`_B&?7 z>#P4JJ7_>ju1{g9aKFG(A`5JM1Tzl}KBk4WDHU&Jj-rcNqe=(rDr`X}fv6cJ~DEOfX z-b^KJr0E*TEw2CpxZdxpfZZx^6(I?!$GDONzD70FT2tz$IO?qyy+5s~ix$1&IA+PU zSj;nyeHY=Zd+8g$*n6wh-YRKIM22!POTZ|2L3S)S!uu43R4Ep<6UiUef!<@nD0cRf zz>v^~DJyxPtjD@(ez^m`DnmVYaBTcz8cP7-to~^Su^0wRz+uO3oPJWgYHhhCeQE6? zreO*lUW=K$3=JcZYJMih?2$*IXbbB+$+?qlXPe+TlG07-Y^yc_`di3LGJ&4={J#!T zKpK@nj>NYIF)mz=403wC>7~`vFXrmB8APPIBGPc;a>lYoUZ(QSbesUqZ+1N#OAcH` zX=5?R(;t_OA9;C_+Zb)b%;nU!BW-pk9}Cb<-0?2<%ED}Ekzh`?!2ePdP;Zg&mbAnW z*PevmfC+9Sb$$BAZNrA2oJM_91cWZ9L&*v{1jop$+81!1@g-0JKx0n7bj~> z-04ye;e|*1aN?3uORXH_h1VCnjB}>aZE$grI~)9Oyj)B!F`g)@-&;%Jd01B+KZsM5 zJN5rj_SR8RuTk4DAt5=agdk}n0tTRTDjow=Is_C1rIeN$8k7(d6eR=^B}GDp4oMXR z1f-G9p&4rC-S^;$_gT;PuJxTibRGZT#~pj`YhU}?`)1U8*^{nvrjL@IcTfNptw~;V zyqYkEqtiF9WvcyK9f1-lBM>vTmO(YKz9f1^`{XP-dr-b-=}!VV%qU@!+8ys#Fw$J3 zEK5AmUSk&c<~yy>j$ba4S)Vx$w_hyWpA=`6=e9fb+@)A4>FOHaI9SfkJi2|ql}2HHuXG|h$FY-naN z{=u77axlXv4j{(i=}VZS-lk*G;w!htYw3DQz>{%g6i@vy8EfyLvDz^Jbt9hGSpGTu zg}=XNG01yXQICu6htA$FGKt*KL08ZObb9kvRkz{8uMyOfIsQ`Tc8N~)yU9k}Q_{@6 z>-KweLO7&3E=YKNtFnj+d}A%dT7teZ&a(Y;F3R>Ey>-!{q&UCjN<_$t!MwM{Ef^n$ zX+^^cn$r0RP=tiBcJ^-MLKH&PBLC#x+5rOHpFl`X^Nc^i;3?eRaarP3yBg(vW;D*0 z{!q?8{dxy1O9Tw5D`(aGlyf@KzL#?cN8a4bq{bNC)=op_TBVD(52webN?CB`x zR~@~cF61!;P(08D!qc!#TbBlDYFpP&5EBL<@;bi`l!)uV8%G`LJa2NU0GUR&zS6#yFQ%e7nytSS9sFoI zb9@i$XC3R3I>G%0bdiXi2^CwGC0=}+=5~C> ztpd)C`c#dVxK*2?33!X3Rj18H7XGr!zWF13zNk~;HuvP|JtW}vB~Rv__XEz^&z-c< zDfI1OU^L?qt!EQ@pwqjY*ig~!_*Wad(UGxiMLkL-D4Xm{18QRHJfl%)5K)7ebVTTq z-^^0SZwRO+=|s^)wIlx+%d?*pn$6mnS-707Kq~OgJhNv}8y)zl`3ACzU1WIeIYrEic`4b=gfaoSM%c6s-CNP#4H?u{-z zu`f!POw&yGgfw|X#}mdJ5B%6H+E0$Yx;JZqU3M(4VE0jgVFef*nX#w1K)ybK{QZ<> zW_zk8#7PF7VC$u;AM{fHBYSB0pbyg}7Oig=>d4Zu=g=K;-<;eADwMa@$0cMQmJ*-j zN?(L-{u}L-$4o+nzTn;*!|<#&9{tW9a+=A^NZ0duYkQZ+uei-_YKtx^c_au_OBw{> zlx92-+Eu6agPD?_f*nOjvVwIMxrOV2Mg4;$bJRu>NY>YCR%??>f$xQjU&SKl3m{rUDo-}$pn8RPy8dl&0*#*={A_`ph-tj| zP#DBr`y^0tQ1U-Jm**llBz=tT-~;n^ z=0tVT(y6`;5j=xhEn%I-r)ewYj3jFab^|vV0Fu7cnKZS!CIztHj;PnWt*NB@X@M{~Rjn+$I-+3$#(l&{Yu6d| zV49P!nN{~4Z#yD)dsV;LVhSf>)J<%O<2|H7A<}~+{2{H}t5g&jum2uk(A-jk&h zzZQX&hsda>B<4ntI4E>{Wo8hVGEm2e!0Xh&9GxU4TruX%d_Qe4w@0gqFMez(b0_FE zqcYwv7QgZ4Tt-D(RH&VxEzNFFW)C{U5Dq1Ok4ts9skO~%h0R~y-|zcsV!qR2q^Tx> z%d$d1JELb3FwzK;_FG}iSgUU&o0sBG@ak~6ifBd{9`fjh63Lq0xzkH(l{bFkM1Uii zUydufq33yAfNR5xPH}82b0Ev{`>V|NY~$td$pW)o`V#mMSFkFbn*LtO;S^EnY9nwp z7mJfkS{4mBjoFO=u-$-HLvyB-552B2S-)TK5u;JO`RT=o%@Cm>7y>{gGB;%;VCwBVS6ho1{qKfoA^qdYL* zxpOPh!GIzK^NCBb4iW_$tz8N+yws0SK9tf`-o1Z#x3E#@NanPCT*+&!MuNmv{Rt(p zhg72Ez$3|$zu+)>f_m1EUn||0Q-1!XUiHzRmQ-9Malk?jSgAjaZD`z(T0Ta%RQEtG zXRO0H)uH#Up36AZ_gNL!v&!z+dK<%CWN%Xo<=++d?9gN7ASN zALkZNF88F=ouXfmp`|2L$XK`4GGdo^BR@_%g7 z**cRe^Y;T7X*4;Z@paaY(aL;Lw%_LynYAFrvuEVi5ofei*F=^FFzGA=+-unh^vZvE}XC!)NhQIyUCRZM56X~ zwrDPEOTG(U26^A#TwCiLdIY*O5oxie9c2FLkGW)#uMui%Hxo?D>a8!dhK`NDJ4Ch| zU+CK-r=ik<^`176_-VZ~$iIWLWcWq(G0cI$hoZEM^Aj+~Ddk110Y{MDhJXRVtyfd1 zfY6Mcd!Dd4;ByBdrGN+rii)l2Y7{m@i*L74l(B%V2!LlO{!;KzuV~%KOQZcu`0pS; zhG&zsJG4NC%_CuQWUz&L^V;DL)-WfJtzgL%K4?UWRXsptiWrd{4{bOFKO20wBVJFSDW z338@$qN|r1U>b{ymKD@Vp?g%cf&rz65e$^xnY(`G?j}Wkwq1CUJvU`(9>erB>@sjN zX_>d;p3G!t-8qUcc=YkwNWH(G!yuee$zbsE4NxT+0QnC<+L5C_`6ktmQ4+$MhAXs^ zre4<#Y<@|bZ!3RZGS+K-O4-uO7&QbYQPNOxI&IG_31 zb4dyry%q^;l=Ybwg(9`k>|Z*xV4vK=s#CY+;?aPVkK|;kgDh~QO26!z^`{W3D1LoV z8`8t)o(ly!({&GPziEWMkS`n73kC7MTCdgX3c`#_>HkXz8%Xuc{!x^a z7G*NOD#gn5jk~vlc+dj?`t8=Q8M&Kl zF!JbagE1ZZ?XddVX*4RY)Mf6#elV1?$A#gIfK!9Eeaj-h+%CB@sm?bpGH=v2NhF-2 z*X-adZ!K4n`JlA^`Mr{ge!;Kcb+Oe=sV`qreO;guIE0s_==AyM1JgpDq4Z_~UYWwg zLcW8E;3Te1UT514&HSo<#j8g5HdF##dkxc_p|+1Wt>@CvYzz~EnqTg>ThFtstNwJ* zjONQ&pw${Sna&~OFpO3YvR;^klKO?5^59pX%~i+mkMp>iX-XHLPLN^sku((n2xhe( zIX_(VFhhU^jIS?{7Yc`jRWfkKIu@>u zU@t`lPDumtAS~$j#=3Jf34PvNb-7?JRAD7~^Eb0^+izH^veZ8{Pepd*0H2ZZ16fMr z=%&kj{LIVcEXqF5401Dd`Ejj&2?YD<2*F0I^)Y|?_^w>AV$nLW+CIEtW#H=eH#lRrm7dAMTyutC{abtNPcnb`Z zC{FzUIAYVKVo>nQjxHB81VUoW!4d8$PQzG#Mn%gst(Xv>(NKZzF=3_ml+B2_;vE;b zNbo`&bv?iaUv!C62FaB534os$t}XIp@;p#~buS+BH0|5B^FcmYeiO#wu!^>NrvH1i z27O$0wxq#N5W?;ASvjI{B6^36OcEQE)2Y9jbYbT#`)W%)vg#oTi)Y?~MJW%EGuQ!s zUg-_aBL2R|24Gjou^qeXPNmIWbXC`~^sMZzl*({KDC3-} zl2-1an7xO#GpW`)x8y!>{!nufAQ8{&BEkuKxA@P8n(V;gZM>#ag(`qklp_Ts@^Du$1W)_U~6i^RfkH5U(^?PFE z6CBpW=57)^CTj7`{%hP40GI;!xihsUnYPz8cW{Zjn2Dzy0d39C3(#(%`eT?AyT+Uo zTP?j+jz0$VuL-a=pf|~KSM4Tlc6wG@#RM~=y#8-U3&=-CFC0BNA^8EuP!z%}mBhPx zf)?C5;!cM_M(cMU@D0pfXnd8(@oI8D6zhtO_oQtyAeX1f9RyDWK0_7UfUGxUfaMIJ zp>G2oKkQ57IUcRLs)@~we45K{{J(=z^5n&bx2>#@FTMN=VkMsag_Cx4=|))TLU+M2 z%+-2~(hZR5w7+@dY_a}KyzRT}p>@~3j)&8!)af~>fEj;>EKR4FKoW+4!VI1_@aCRaxjuOYax=G>ZkK%j`giXt_QYqW2ukm%4|kc$-I$r zDa&-m!2I>mLq_%!{qa-Tnk9Ez3k&o6S`;X)1@k_|MZBUHz2}9=1vAZta z0>YjgKpKTXejO;K38yrHExh{FOLQ$ghj5goS%BYN#!D$GEHv#Gmj23ow(V&3nyX1) zmy|4z-noCGESeTjId1`mY=)#FS!87#Ix8bVC4@TQp^#o8^_5_c*0rH!dHfW-HJu;?g@EMRpcj7Hma@ zl41kYXp2(DmmqYDhv|S;4>>p6CWEi94cy#nM?8ZnEirf(7`ix|;m;s(#$w1tTN58= zQt0*sCYR;z-Te%-Pixm6RmA&+)J~Hcr0N3g3vOhr`vBdR#7vdqPZLC~Q;>H@C*}u+ zv4;CW?Qmfxes1Ps8V}mlekkd~ick82KPjU%qY;q(Dx${ziQf*L-aH|A>-5MxJDi^L zhW|gOmq?g=D5~nPPFnlsFkl$_0%v8QxB2qpeWf}*+G(zk3QTA}!u2)2gi~pW zTU=HpxwTvnzW5T!7)||MxxukO<;Cjl-6j~p_5YP#0kLyMDew%;Yk4Js!_-YTuIoT# zsNH`m`2him?HdPf!mB++8`Ym<>U_WqWczeL%c zsXq(jR>C7KQq~#jJ-I}^JKPEXQgk;-9GlSl&2_=x?_3&O^=(A+ytRg=g9Eudtin&j zu$UKt?2@tf3@?S0E$$nZXgMAX7%KDGjOAxZM~9Vn8M2z6sq~Cj5HEfRQVwWtU_#0J zGve|ww8_Bd#{c|Xah6O3EAQ4-qM0U2z6IO?bq(_2>RenhfE!mIb%y6r_ubL_`asK` zn{G|U@DdTPWC)Y-&IgT}(K6!BcMg4!B3hcLE{E1r9f>Gi`Nx@&R~i2W>{7NUmDaht z)~jlZsr_aEn`vaap|W7iUXq|-ox@hgYuG0_lP12R_;k~4=E@H)no3+zC&!yFnk_Qq%h@E7(5V3%S3g= z{;!dqLsG_eKfujEF(->kiNO;6uU`9bd(@yWM#p<(GSxea}D z_v3zqhf?b7gme)^ka8Hhh_)1@DQ>NB4kn)C%5kK;rRWoW~gP*>Z5G*C8$7_CQW zY+SHO*_O5wS5Il>zm=Ufk@??n4}^Wsc*|fFt@lZxfkEv~GQ{%hx2!>y7B>S5Ih`*V z49J}shGEsea~pR=TEfV0@Mz7Vj2CHW8S=yx^3xbqmS2AK_t)81j3>N~^X?$tFPrE< z0LWo)>KU zo*ivr({B2b&2(kogDBxP=4iTIA1GdRVW}-|#5vAx%Ic;0hZmDfFDEmM29x+NKiA_#=hqv_70!c*bHyMnMG(74!fPjuGP^a(>upHeMwKri%1l6q_cZc=sn_H zFs*6HRHd27y-}?=C6`>3%nr?tP1eLw3ALfYhs1IFW7p~9Q}_5d!5m2Fh}eVB?;+1- zdiB@`lT?w1Exqiw7cvQ3HA?BMDXkFjN@8juwx2r-r z%025S0DFqe!$&+vjplK%)O$>+%evDd_eZCNUm{wOA#<#ctoOmYm!rzviQG0c>@F)2 zaw(?L#PYKydP*{VN=Iyl{J)7Z_RcZib=FUjofnf|x)sM>wr_BhkK)#yUf4Pc3Q4q& z0r#5GKDs?&klaRR20sZmIoRFlqG=XzdABcARYvLXyx!_nu@kwrx6Z=rIcuMUMw9|0 zvnmqKdjG{j2=nDS9~H=!5$K{%Z!p#sSnhFjv@}ChsC34WO8n!^J-EXORH;SlhTiP09L;7*%t2y z539*31?tr(ROwA#(;JxC{LauBL8B6Wo1SMCbBtSvLj|VOe_noZwq!mgH#(InyZ>-I ztf)dC{)7jgP~gKrJyvlZ3VD*R^IXws^`A_MKPmtb{(l~ppz@4jhC*6X*GE%4=jGST zez2VKV74lFRVr%XIS*S-$L3GcMy;;*!qbxLrpMuOj;bXM!R6FU`78>bD>yOqQCMz}z`1<#w%)Iu;19)xkbOTH{^{!V5Y@>DDOlR*%8Lk0YX@EK#ypZ^xse zNok73TO3iN!Wp58BihRR7Kot8@yMGhB6i!JKD}7(T5s#xH{d0kA<9_T?%+)sP_p28 zyESNkjxn!irka!4n2GMrM;b>`sSdCi4^t#5ec8H@WEGTI#F0o@_I4UmA%g3iK#HPf zxzmRK?R1B}9%*DvD*W#6@LVBsW<4w$ZOoOU1qnSnZCvzc)9vxP7ui-9D@};uH=C|d zz^+6xICTQ{dV!RO{lQW*K0^Z+h4og(NNa|cNsdoGRh~R#3e}3-+7Uue5Og2A8goq~M7{j(Zi*t|492pml#q~nn&oG7X?QNc z9QZi9)KtH=NMm-PmDX6>TN^66GiA08E5RNzM;1XQ-{2>$YT98TX!RN&%>Xa|UTe75 z(1YDL!HOM^i=E@x7EedsK#N-zzINxpSG6GjPy54|Qa#q-?!Al|9Q}n5mvxar{-+x7 zRLD|$oQzvc_vS$9(fl5-8t-=hV$%KOX+81Ql%T9ftlnn#Q+{9)$NFYkD@``unS`uR z2s|Ma&?#~^-GWlJIUiVWS^Pm`PPhrbaTFJxj%ir%1Th?!B9>RD&>AWQ_7vY>71jf9 zo6D(l%JXca+JdRlcl+1&mej*mA)mi=StJ}N=(hYV%2xaG#rsviCdh=P|9A!-^s{6C z@eH0HXm2cxm2OyG+|GEEiFO9PF9&lC2>(VcG zP4Zur4e5Y*6-2*rn~V6;<&~Q2fXj{Pua%9&C%8riifbALV5+;|;Nd=S(UCtz_C#>y zqt(}WIl$%G1h8L#*Qpz-!6O|bCLBz`OcH{Rl@KrK4H!Fg4aj)$?E&znm+v20EZ4YQ zf5i22mz*G{#Jpfm$)>eoxWq60Vu`|yhoz5K-eVLW!p0V1T4^I$$d`X7$~)q9>;5?T z8!Ko}op0q^Cj&wZwQ@sSn+`2MpEictRc+BScGhFsK3vcj2%fW*G3X8(N>l#a)c{z#I3S2bo zRkA7KzbEZOL-rJ1WYH)+&$6dP#Y3#mL^X=0?*}HK-KhaAzIN4UGQ-+_rnb#c$~|9Z z<|?m`w+6rg*zEI17%#S~!G?#Sc%#S}carMyyK6?VsSwY1PG0EnKLg-L9Mqy-xb!(X z1CZU9LJbD8J#HGb5Tpmtz}01R7;X~>-pb8O$QLX zZNe+sPTFHZ>@1}2!u_sy`p;fnvZ7fq^|gofey1%r^M7SRAt-7#yz(k{b?rmWW~sfC zJ_XgkVC7FdFf~HPpCnlv&H3mJ_-MCRuErDxu9;X>--5EaVikd|G4S(Qi4F$gsTte- zDW=DXN}t{dr3mc6=!7tEY%^K_B`g=S*|$@`1Uh6d4e~VOa(T6A;`r$N*qUi0X_wwU z3w${#1p8V|#;yTUdA_Q`qK!YyC^KGj)XjoGbG6*#Va_{u+pmNCo!9xaLcpK3`&PQv z4%GC0;@1iJ{GyZ;-Ir_BucB1lY@uXaI%BXB^7^M?_9yp-(R0n4*VvzvYBFd0TznLW zJrt3&8+)Md@c;t?4I#Bpz=rzEW3=wf{J=V=wb^;pD7%Rh1!3lm!60_it1E}Df`M&t z*9Dx&qJrS>)1A&r9>F3Dg(=gqk)iaOIc<67liwzLi;Mb>$g8n1MZb6{w&j)MmEJ+^ zSNt;CJXJC*DLNOJM9`fjYHf&Ed&=0 zG@@uMQfWw8zJ1c0DYW*+^6OTGB1~5H)3t3psq(A@(KusbZst&;E6o801k(fGKBK9a z1Ss?d;Fr&KT?h^j=#p84rpRjGiT%jF1<;ji6qs@?@x70%nM_It2*Gat4A5kVr9jmY11UcWFUqG&Cz-dFA6K zUi zqPt>3NlvyVU`g1<&j4I}>Z%Wh`dflu%3m}6zMG-cD|qS}Db?<;x(|ai=Z$3| zL>UF!4JNy?m#TBe;{DV+R&GmE7{ywqB zlk&d=FVd2SAA_%1Al*J!52-YatX%>=)=M>*dXPv5iGn6A#bXQQY$Uo0QJm z;z238w=3 zqA6YfG{_{}!9{A?sTJb9Q5kK5Z6yR}P_OhJUh!U}1P~`KStxSoDhtt!y3~1OP{i+y zyxaGV;=I-B4>@i5>novrr2f>N=kK%_eW!l+QBvG$GBVJQuY{N#tqv)f>Az_%#luf< zlc<c3A^69WbP8G43=92UP|7i$84e4x4#4$|S~ctAYxpDx<$Az_H>KDq#~e&cA% z8tF8?OMcmx6JPL=`~QJYP;wziTggC_nmPM9hM!%8t4IshI8%z?}NVt+wRH9dXF_Lrw^?!uSZL4LpI zd{=EL%voV9;@>em1G%U=Z3kltiiZoZ!9v@DZ~qUZvQD7;X)s)Jj1EfbGj5Gu4tO#7 zLj_%~*rKpk_f7ePW($)ivYX%)CsoWI#gL2!kGiNvN5_I!Ij)`AtDPTm@(u6OvEvxT zh=~d#)khDonehTPGyFUCE<~3iJr^i>!^v=<; zS9wCB&+QKkyarprU@<^7m zuv5Q4D|U?sdO(WfE8J>K++a%BAl?)9z-`TVlXGv(k*T+wMUD5lnFsQXonAaJ@XYwq zYEhoQ;Tsy0Y4YUtWXx-^-_sw&Kf5h|!4igx=65S{U?9$NgwcXi*@=c3UPyinuEuTl z0DJRqCXn8NPLo|L1WK5$eKLKHaeIgbu4kPzX2-L|AKZoc#1o5I`F0^}oQ0*m-X6wz z+)}o5?CzOgh2yrHQyQy5p5+K>0{JSDmsQc4%*m*1ttD{(d!Z;rQwJ|28!`E|jz2@8 zM~w_Ip=Ub)ZQYq$djS=$yk;#@LGZ-Cy#q=yGuJUL8AJns_8P_oky)ymdWpGt`(rXQ zJGn`i9Kz>v7RAnIDC1{9dV;blME5^=Hw2qlf?Ph+tL0*DhlQ?{-djLlgvq;SbbqqM zyR;JM665`*T(rgY_;cI#r3m<&j4jUuu=fu3$qCLUm-JPuI31iV7~v72|2zT^zl|su z@f{2HvqCqWdv&NC=ixIQ!EHA)TF>0zlQ>6mR+8Q!dKbj|bq zOl*AdO?g(KWl_e0S2i2p*l*cSsXk}B0g;*4m_wWm<<163L_u)@$-Bhw{ROK=*hT#r zYwVhnG46(7w)8(pzm%8&v3|;9)PEItqH(>T+DJuZhXKpW#moSb4s6^3@7P#&GMKKPwq<*-?d^wbAYdOO? z?i>E$mzneY(9!Z~iG$GRSIZgAghJPcGW=f~D^Nz2rvIx(FBF{@fp-zR?sZU;KDtJm$`q=LR zKp^FulxnjL;CElPL+lZQFr>a_Rqgm+*Q;XsT;-#qJRacZG9;$6G3E-z{az6!S!MPj z{^fUZU@XI5rB4yqTMW7$hwEl+w;&^&&pV-%an zeCW!k|GSkM4wyjr~l~>NKZL?J5oJ8{ENw^w9HqSpvyW%zw?d z95H*%2A^ZGxgO=2pUa^|Z=&Nlr(0^I70ZzH?J8Pa;97INa*s2N`lL9AvYG?u$>|ij z$w5Eu8pc0D3il7^_caqc27Z3McZnGH*h7B)+ONXB{H|q+8x_oR2+i$aoQEKU-84 z0?E^)r;SmOP#@*5Y*o=*Y?Htp=hlB&Ki0dx)}Np}c6wvygYuh+9at-}ReRx44@f zKI(GMPURX!f}9j5K@-!L3%q9@kCa8~T}S-OTwuF5Jw^g!3Tye!^i!()k2Kd)A)1%M zg$%07V_eDnI7L`mk9@k}!CyS2ruu4t5grQjKHaM&o2=cVYxp%fh08ClhFqW_ga>iC ziHtgai%**!t^R3Ywqh_jlHBdS`XEG)NjR)z^w=%Ad6oP+?))&LGV^gAfLD2x8fJ z-QuxNk5|Vl>yE=_zV&mE5*SCuK(W7?3QlnmWraKAfj)CErlv4KA5L6rDqWq%g|vF| zhsi)gA9%q3KzyY_fc^}xvnts73TI1zYZCP907Y(?A*8)0J}4a#wi?J6&F9y#vdvwz zM1jb$+f-NJbb>`T1;v&Ype`20?L$!BhpH}`0~yUkULquH+D7dZRY_QpfOEQ(>4JsA zh-2+)BLs)ohOJD^q%t4zT!f=c*xO6jnh?-$s?;ZzJNbJDhe!zYhBo00GmW0T=83_{ zRYRQy+xPsduq*B32r@*<%gJtXLXcSTF&R3Py$kmldJmW|nJEDP zs8lfNbyi4*W6@cEwqP7D!+o8;_l}CJF~?$i5SQh#JGKjF+s!8rN_k51M~1~KyW%%O z;|R@Z^ScBdfm=T>^w`yyj^fa(h-;V5U!Q4!m+ z&;+=w8|U`3&_%NwN{F#NUMv0##Syz-(e7iTw57Op8Im?wZ8KW!)DZ*<#YUbBS^JSL z#B5_%{R$3qkUz|iA-hYv+|;Yp;7Pn6llQUCUT?ad^-wu88jBjgSN8e-vFB9+z(`kv zcxW4^H;;etP#y#7!TNNg%4G*3NWza=_gM#sogDuVZ!y2Q!Yb_a?Ak_E6a(!+>T_g^ z{g=!VK*L=Wm;b>ld+fCYtv9;5UedA$agi_u@0M*0!7t_=>-|)iX_j8l#ajEw#4Bm1 zPLm5}thkkwRDxv^X|>^e&Cmp(SWXpdG`3UO`(dZdQGL=imCmlCB(KNEiGuEpgY_-G zsxKgiexP3N%u5lkXvGV@3SG0>5;A5%v|iP;3TfTGUDdr>xr6&p8sHyJX4zF-Qfqs> z73bf^J@Q{w@jb82%cz_&oU@ld2j~`HcQjj3NY8e>(LyTUw~c*~SMjFF27z;-{x6?> zhY_Xrl#3ApUcWz}B=fB_Pp#dVVzxhA*!<^iYb6>snLj+1?KXCtM{4_Dck~2@ZhC3e zP&r4@@I#3yAnX&@2U?4!>w{<8Ef&&wdUIc5S#7*X-~1Vj?0dHa1=Q^oI1w8u`?~bfdxfMoprt%&$g8*kj~~1zV}2^Y{0ZcI!zT%G$iJ3R z%IS9UPm(U_Io3zKMAD;I_?xkB&F4wq4t(BRW5cS3V9hdYoB#jnGgxGd3K~6YQbrhIoJ(myoBTsm8 zg_ivoAb~ykx1}!i8M5nyn6_D%^~YUrvJe#36Ngz+@y=#~%zf6q89UK;z$-1o?AW&Z zqA8}?@1}~A62+dDMC&SSzhi#$w!E0oSz|z>47osKC6j5a$4KO5P9{e^wc);YEGA&r zWt`c@!{m+?U*;Jl1m{K5q1fKU4YFuNWJL$E!vzIwvKBYgW7(e&GOf%bv8ZtP^9dEx zrt2Yt8#ZZ1&(f=zSB%-O)3*hP3?C40euHqY<~&}J7A_GRVqEmMj*~rV^mc|dMVfWP zt#^pTDqWR%4)n@8-&g*}drQ}9XJ4}ijq70?j&dHG7cBDdYs6iyYO*j}Z@yTtRR3)3 z)0LA%LG!~m1S4sogXy1Wwj0@?D9Dt^jFR|)A1Bw*yH>FMY2DM9$lgpD_^8f)dp&?7gfK#X?3YWfwq`sUs6Sk^gC z-XXBU=sW1j_#-$3fMix1{IB?#Uj1S@V|L};bnGHd+94g)ZjCb~ciTESP7@*u_mSqf zDv%L&Sf@lT$i5>r)0b0~27AR#LQa@cVb2uMh|9%qs=8#@6F4nAHdMkqxFt--L}I-L z>MRQc!;;WrBRN}ZMO?c1)&C-*8#$y~$HAl8Y-2Zih!s!C-Kj%Rlzq*vx?$hX$|gc@ z+hQvH+${Q&yFW)lFk$L=mfdE>{$IDd7v;-cGzF{YSd&}M(sDF%%kJbl2uJoG_#=cu zij6zDI*X8I^|;D#-~020#-Da*-GNA&)Hc&Qkz2oMF#Vk?M5BsJCj|5JV|-~tMVW#{ z6$sk)?xSs#DB#%9+~K+K(x#|$3Y(FRFAa?MtD6(_;Fs#4NtVZN6PuH&>SWR(oAL7>@*Wmpz28%1P{F*Eru` zp*%+0)+W-v(yRcxS87dnW!k08H|e6*I6Kye*62u&?&1IVdA8z72?4k@hRjzN-g#%2BPLYICE=($(aMQsDQB!sMB#bz<=QIVT;i;65(~oPYghMe)LUeFT4xkDJ;!=%W;di{?Z<{A%0ACp?)a0EDUy#-O(;D-iV)dQD^HObYZj@-< za+kGB2?>nyKn)MRhh(|*x%{xt#jryFXf45k!puMpw4om4NOd{%8kKq5?u9v4+R3v; ztCPR3%SvJW+X+S92h9QzqTIMjtIYUd!S5gRaoy?5$is}f>U!-sHA)+2ijQ|ZnR371 zo}?1l`-)lMC}?B>Poft;k3*$FZx^{5Os+KaKfpsoN}uc7SORN^_(1Qr`5j6}gojR= zw2Tg$8VsjZa*dtd(UtzdP(K{@%y6DI>Wa+8O9`?3dX_xLIu|ZnS30{A_xh*(wkmt% zuTcq?1$k?frc+QflU)ttf|0NJ2Y{4!rhb*3#SOBa1V&1-TA|8yAs^f5(~br5Wd;fX zCwUoV+fiXSF)g#W=kesv_N3yU48JZk1HCSj)> zKI9N<%}-Py-MflQ2n^v_;zKOc7q10Z{1Tm5(aH_{6R?1Gom2A3yzD|5OV0XS^lXcp zR|-Y5=`&**OZUsPOZstk;8yYO8G6|f<5d&wD|Eij1V0-bCpP>rogitj9PJ1AW+wz=^!OLRF)% zZMsH}{_~wy#G@t*!O}wnb(%| zyZ6q-kgmg}JC}YJzq`nOAh2|u>FBx{BQ}pB>C5=oelTNtB%Er_L_nD@7g+WdXEwz* zxm~`tjx|uEF0{A)JjeQi^$1h_s^@zq4npvbinH>n;Xw%6qsgtI2FvUEG^6x z9z;KxZE96`@4Vf{(!&jYr+?4pSSKM^;O!{s`w5!i2%q||t)AD$_R$IOPJq~8V*|=y z;)nr_^e%ctx3BRYeC*uH0Eyrs!S+6s(EY^OeL**@MuA}K(cZf0&?igngMzs~^?v)~~Ip4CMc57_vO_749)N(oyu|@#8q2baPBc&DuUV)=jg+XvEipUKm zLDu_GK)k2o^Zk~FW;vK=alGF>-9V+DtcogRJa+wY$1w()uUrQ+`y!`D(up-V zW12Vn;Q5bpn&qG%yq$hmGhuRU0Xw>z7_;jxS-22CG|r8Zl@sXnd&oHAhlpq+-1c3a zLho>;ci_S6-O?$FdI@L0TPFES+BB7xjV1I>+qHR%#3W@jY$C8+L_$%> zS78k*<>?52sNMOTv^tI$DN`r%&*lS#0tIVt7S&U}^@pXpQF-O1CnW8Nd0&^A8Wi`$ zdbgXy>lcIz9%J_eVqvW9QmVtGy{KzR7Dj7KG(Y>)%p7awHuk4xf%G-c4pLpFKLX4! zcufk5D)jtCk|Dx)TmJIJm?W+Z-lNNV+A~=Byl>?^{z+vE{)%1nWkHx7?>*@*_UG;v zyXdqJk2u=9l%R8fF*NWT?Av(1c|yg4hNrjAJ!#Acx=KFHTarI>N@SY(|4<%DU*DIV zO`~B4=~zGkKC1U10LaWP$18^_X*;dGj5(HZ;zLPr+U+E~?%U#mmI8hL{oBd;0}jaY z-^{{`qVhL2X)pmSj8Jm?n|tu#LLT$!hl^s(pzJzuMwx<^Z(XCb{b{)iE+~C1y?Y}Z z%B*U7W{S0hppqXLdCCCd4-Bl#-DrI|HQUkaL>r_U*o{{t{qTa+ijd6G#f_bDqotRR zv8K zLN$Q-f)v+O_MOBvoXzIY83IR;YhSp1YZtr{*VEWcpv&!-d&o0I#k6&=stzfF+eA}V zd=}lw4jLYtV;T+1>AENb5;gXzUXPz5jXmBmdseIHhL&A>6T3YtKo1Eye>T5ChuiRj zZujB8+<1eO0=1wi1EUo(x6~+4sv2di@9|M0XTbm(|5*em0qPctz;B(L=Vcr?>yelz z0Z&hkAnlkATzDuMBMo3 zOWS(8AN{nyw%FI9qnWV2ZOPzAxbWRm#PZI`zRYJIp>Y`j3W~S0Bs<7(eb|s6QO+G# zgSmm*wTxnK?As=uR8z9HZmC+eeokWgn+fQ%9@wrd+vN}UKT?@*Iq{H9N1UzMLU1%c zW95osRN#n7k;wG7*TgS>4g(&C>Jb}3sowz)m$tv`(^p3omRI%tpHQ$NjKR8PG6-TM z?ewMDl4Tt4Z|%+0QldG=A{-6HjouVFx1mTlmdeM4gFf>DZcM)}C#-ZX;foG*>9()) zpI{Q!4>+;%*F+NlIt=E(n~nCx3$K|0c;mO+G|lw4^{o! z?M~UBCN2~=vmNZjL4I)DNPAcZ4#AVyE2l&%Vt2>T*t(T{rxL&iyHQxzU*go`PC|c-GPr9*4ow z(jTi8Xuh05-3ZjPrGK<{+O=3v8jvp(85nK-+;?O%hq_)!&8@yvl`vG$V(LySwx005 zS0e^5h58+>7iCQ6G0b+KdG?)CADZhAr$k1!o46A>Nh$GZ+jc$x?0O{$X!0%Ph+-Sr zuAyFs>^k=5mSR(2@1-TLTuv=<{0B^N8@7*}_QtxYyYe}0;DEOb#jxE1-P=S#Xmf9e zivqD6DtTS+6>}vim1r(zRbVx6A_5G90u)Js*ep~@*-O5ufc(|@;7T34H)UOF5;aqL zRi#@~r$?9N*UHRU5{DU0mxH{?nmK32W;@QtME|Poty709psxOkh6#Fjs|R!8>eL-l z3e5O5ik*8nKhY_EpB$3N385+34aU_VY3LxbxC^)P<8q~mf@NMBvinvxHmK`W%tVqB zEBdWMVsiN#s;w{n%&V4~uyxXInz6jK;r}xHGCOWFwvG3}IfQ*N=w8y1Z;T!p8w5i) z(LCDutA?xNxji14iEt@#`1`>_8SmGfm&}~xe+6cQx4(4ww0v`j<2`6X52u(qZ>SV` z@L`FGPWnD#uypm%EnRLl;{B3X(N%|B{FwXH6#vQnX~TAngkbdtF&_fhs}I4Gd&|!x z3YMpzH62asN#Xn`MO56Jp)bL8UjyR@;L9q^K1}R*Te*ej&5tCk6EX zl$U*&=1UHD!@I&zk_=IwS*CU%9aK=)GwX&7&xn+}JQ9)SJ^3a_%GERO=1Vzo7U7P9 z%b7;}#re7I_EITLlL0ez(Yp!<1?X>3tNTM8l1dQj*c9gxg@Bt4;)|3mx=W)s+wn`` z)LxVeBrDOV=?&Co%8CFKFNjYc^SApVC0Kc2p{f)VDp&Ye`npEd&n{P1Xg>0=Mat zVj8lSi{loJKe;4ctGa|>33%OuLMfPT?F5p8;a|BAJu{NJxb&fOl;SXSLc%ZFu(bop zXb0}m=*@S;Z7N+Pm^hRw8~NMBlfpq+Tz0{{5da@naA z@noK*${E|~r9xKWkeub^k*iA>XsPm9B&$ArED$&`W`L7zLb_2D1ZzKAdS%|WOIz-i zC?2%$Q_&MN>x_|c7@kz*jAy;KY@6%hDwa%m+t_edWeX0^PWZ66JyoyAgHCSU|5lx zi+npFo(ADfJ5Mz8SHWem>jzUz0Wnt(?%b&*4hM)Ew3(i39Ci-g>;JE1i_@aLkRt=N zhbYe7Z?cOEzqU7)Cn*@a+!MgQ!)p3VzWH>8#pccn{tWuibnrI)=NXv9Ae9o&^|N`r zc{V45-W-Gc(HDe^K@&kn%6vH&p>96|QUT>xY>#gCC(5s!#e4h~JiYM=pRLOllzq8W zxV>ckrQJsW_s{*HD@eGi;2n5y8KU5#O=D~gs9x>?J-krSC;`=yf1uLVK9uu#C}yL2 zi8F{ApP0Ijv&Q^xe=OC1U4ZN18jk@9SBeLflbc45`?o#;wSu@w@a#POT;D+#r zJm?}=`E-(~^sPFDfd}R`^u+r?HYgC-IT#nCt#zNNgO?6H1%ImyS&q*!R8SfR9fLX_ zgvmkIPWMXHn#=oiNTFMq<-U6 z$Z9yW#)MnFXB_trpLk^VucP4FyR~dRiXF-179DEwTgjva{x-Fw=1m-rPHoTKGX0D6 zd7H~GfvZV{pX|?Y19w?rsyF#p=sr63&e&XN`0DsS?7fLQm0i?7eheoWLK#aU$s9^$ z3{B>-WXhP36f(aTJ-0kj!&Jret=$Yacz&`@FyF`uzv*xvrj` zIQM<;y@t>FthM&ux8=;)XncqbI5I1EGmJ2XJVNjceoe>*3Dok3_*^^JW}?>ur*DJl zW|S8nFnVec-Z7@ra#j<{4r?Fx8c53aU2bVM-Q7LO1Oq;h&Rq5umgzMffMWs=L52aV z%E=_-Jxa&V%x6zxKHs~+jFY!#^dCrzT{=-!_>zB;Ui4duKF?CZwzXGI>!%6l`jc+K ztHk_H;w@^UXlvBism0)D~F{o#im-Z$RAdnNi;Xl08|U-02{$m9t+fCkg)LXxb*#QUf^WWX zQjYl=a@&o2(a`VwSCYT6Y_P`cbM~YX>CXH!HP}Gu8Qo8hxpSS5=bCDg>5)PS; zJ&B)Pn8<&;ADIsynVz{I&*bNUfNt^=3Gr--*&#K-OhP}Nmdw4~I8uv>Q;ajaKEsY{ z&M;jqy(&2A)m8Vt;Ht_&xHR%vCvd8D163MiD&o6E1*FZW6co3nF zRi{PkZ~@u8vfw<@C`f15t)MLpVc{SBo<{oZ+KPB4TRSB_Wk8yw-s!>neG4W(l1^TP zyIGwKsdZeM%4G2cwrPYfPr~7|>Ari|4f7D8ExQrA)tpCNf&pek#@`}Rn4}9>sceX> z@09U=HfXkeic(suSfYr?rrSeK;jzcKQbeGQc4i98R_HiPFXDb!hxa4kJM18IMLP($ z%qH(PqFZ-{&f6;IgN`o2j2HOkL*ST2aV*_*6Su==G>?X(TiW=GKXu!r3m(-l{a`np zfISX@dIv7)&Me=HQB_*rx9hFv%P&m&I`=;bJC;@p5C#4bgAEuY?lS6lD<>MEAX?{S zJQ&ApsP@RQ-{%5oy}R-;ELl2!wFr9Pd1;SA>Ao+ho=?}7Xf{1T@fL>ok9i+P=B?K_ zh28Z4n5!0;&q+BrICuD5lKU`e;|VC}>&huNn{Ma=UUE)>h3vE$9^sFZZsCDOc+7ZszvZ&C%ZU=@mon3rJP z@oYr8*R2o@-}Emh7asTIV>gAD_ZWv~pa4ZPn(%!A>WD&}-7u<1AYM4u*AiJ`lJl_D zjF$ponCl6k2N-|US5_-fDPA?%DLYZZ7*jlN-ya=6$py|r?#}CFvG`}fvQ>6)#7>81 z9tjv36?tU4$sEj-ZfINalvhr}nx7#p2aN4a}{XYNwbr1%K+iQ`vUT zo(!tQgCrVlC%f1_p)luXE0dmaqVt7pT=T4r+U`= zFG8lJk7_dQe|gzAdOBmMU71HW0|We#|5!m4Y#F0rkBadaTmVAJXRv*rpZ?J4AZBZ%!;52D9(>B2G?k2Nl7{$aX>qjF#CbnCiS6;-Cbqb>c1?e+i$UZvOrIGCWGx1Uux-RNF;gTKd|k8n&26cGP%&IQ77P{r23QTVH`L zKmwoL7MFO)-NfaZ-PO(X&)l%5lZ@58$z&elvOuHu6oCuBL*$zRzWF$p+am4R0mHYh znOE#CBb-7KJM>C_$7wd$h5h4~w3IkuvwSdWly@A5rdYQ;%x(=ryQLLF?|xg$>R#Eq6TZLNmf9QqAP7^lvIL4XRe6wVWd0X{(e4-7A-HC9WE zNZ0kH@7rHb?RC>V;1pyu6R0!0 zg-t|mgvwTy&=VOb*@p4|k&Al(__WZv;y#}_e2zI{kv{q|IO)a@nw^WmWB%uXTc~RMRF8tq@y8+ zA;Md-cG=LG64<$KNWV%X>rC=`(O$vyj=FER>8`h!qO4>RlK*PjPVD9OUozV_D_~$5xNByhhT?i>nP}Kl;LK`gS_*S-XLb#`~ zA+FuoJhF+$oZd3>(`hR9)C%?1xNk- zOP_4^{>Wzz+rmnFq(^An$%4o+axio3MB&eEk1-!dWjEz%Un92MN-Of2D4zm{cS+QD zjK;2UL{eZN4Kn?SA^aHT_mg?zT^ z^6=AFs6>(ifv7#^LyUPu2q+|BU*Eu~48Xu!?kdX6hM&?w%{@$_{I*a-_Pr_(t?fs2 z^jGAS?(AbNG`yi`))4%O7M5Ib21Y|+R*GC}4v_6-o-$P^@BYFt_MgP#@hPNV+4p(W{HnchD@h=HneIe7pW0MH z_#%&4I2UR95p-6On>~Pj>8m%V5MZ^TJcgRB^&y;h`=7)Eoq^H(UK|F78AFP8b#Vhl zqZkx(6#~auL@KOW{*1=DZghLVn{p1_gfZzt02?&$mWbgWb2&Rw<; zSSWQgqpBt$K~n(G-rWFJp0G!em)@OyS@r*MJlN|7H|9idfKUs)1-G1f4eoQlbqimT z2j1@b_I5QZINxLC)r^e_6gycACNEE&#J{!AN=0c*NZ_Y@mTY>p2dMswIS51ocBi8- zhrv;hH(%|+Nh188C_L6unvM0$3YfHom91?3wXpP0pz4-Ptx^m$>epbc{L4_i~ zjl%%{vCAhYVXfO1B^%rVeqEG!oH_FnR@L#UZSnClFGt?9hE%OGiPx=aMk{n?xf?J) zP)cXcOM|05^6WJO%f1NFOs{-;Xn%*xEC>{CkJp0(jbsY`>CRy8f*byw`u+Rc3v{1A z=qnQ9YoWEqKgeB4am-(BieAg!?LVso{N?x`21ssTk8}mfZEjvZ3#o(5WXXMMUvw5g zk~?DMAWgIw3Wy|}d^3b#0{GdTZUG`#PwV;uy{{#gu8^9!UKJGJrsbMeKz^tXxE<|Q zZ=pwQ?=)=T8;m5chm+g`@dSg}Kn04=Z}hVj5AX@%J9=1>%kvHyk=Ab&9)Bxu$rW}~ zay$D6IK2<*cz?+iHXmLgsaq<}7i6~u`+s0$pU``~9%ImP4N#!K)Pa^4Ksj+PQr9mP zO1~XNMhWZ7THysu1N&Plp3l)AzfBhss9Xs(iCGya;b8P`{+%K>GMH_B{Mc}GO32>~ zb*HebIM&8}y4$0x%w5X=BsOJ^ss2O}EQ;WqdZhk@;|WW4u-Zd%zmWOW|2k|yQjR=N zm5i!`xY69u12jClZ;f)9)rP9i%dUJls*dureWRcCByAjUo6 zhPm>oMsi>S3;=Os=EzC79;}ncg_4)5?1)i-Sky0Ml*r=5svQS0GIgaXI)y_3fbYLa zN>G5RBU-k6_WeX%4!~F8h$@>yHvGcGeETK=$rm9 zioH599QTLa=L#LBPZbEJbd0b>I+hF&+eS6r;YU|<^X%a?iH`}# z<=_Z0nnpGNs6-ea)fP?pn-I*QLUx@VYIqRfE^?yrxu?F|5Ox`<3G_YPVOcWb|1S^V zBw^MLLjDl8ZTW>yBcj=@a9`5>4s0ApPZIQ#5F5B2HFMObgU zNE%<<41V>$@F=}=mjU71NUGGHOXuLICssNY+|$`AqlvmtzhA z3!@wK>QIKu{kM(+hn|M?x1pxtB;U1I7lR+`RJ`2HaQ4;K3NG9-By+It^8(cL z-e*&E0Vht}`6P!{`j!A3wRJH&U<2XAFm%C;8AXg?p@4asEKhC@7=&1{I5GdfElwOa z4T1}JXEbVqW9A@h!cI`L{m76UH0A0>Mj8P}d8`UL$TP?EG&21N7$`k`u}i;-U(*Uj zEw#7^z9XY0+qqC^CARF}>e+1dBtoF50BTT$OkGV$MLDUns4}CWBQ>_<<8fGuiGn>4 z&BByhH?!vbtk&s3U}@$Y)^#_4<=|R{5!V6sK}HGgW1U#uUQUW23+VQp?XbLhMxt;x z>TUrtH$XC1)kj#4hkBje9H|>(3Gmef@?ICrlr=xHbxBGUWvkDtnrQLqjo+uPV+j1| zps*!Z&Agl@I2LCi=<+AvEjw32CQl8)UY$ULtrEt6m})MH#25@Wcrvd)jO64yi(@cj_U9HQJG7CYdPWnDl;22vv?*4$4HH3L zSGkqx=AsIN%+tJ%eC6i|VMc7Isn>y16jES-8>4DR#2gbf+v7tm@5OnyZMiBrfs8@h zdOb8C)vkNphck%*7GqgYvvqttf%ziRiY=#2L@VUpzAZ(N4{l>kOk7NGp zBCQkN4@+NiyP-nB*79pi4Jr*{lxYg|2a#v6t0?mlY8;!SCMAx^S@H{{TfsUx&-gJv zUaomQSE9$xS0LntQ_GCXHvt>r@qcfCyASo)E7A{7lpf%%5a^1zz$mYCVp++59{BC6 zU&>alQkMUO-y@qrNt6OD*C0ZTlx#m;KvMEuV&PlzAK$SV|KZu;h$fpkhQdMF4_PNJ z4<>t^lNI}YP%G9y)JEm*c>a#6pEf%Aa1yKnr`XB<=O0^z-|0V7=DEJ(#Ime{RTOPV z>@$DEW6vlTmUG*WH?HbSSZnYXBb{BJtv1DJdT4`D8(G01G|^ZEiK;A374N}@vn|33@_XKA*i%+kMvb7 zf8j&e-nEr``+3~HwJ6QQ|0wOVGs-F|oU@x?uJ)cDx+tp7PG5`RtZ6p1FE^0`Z;VI* zg#R2!5ia|-wAj46x2GcZT40}>;UZeMmX|?r$%X+*CoG** zTB)u4Ip)Aplfudteo{Ya0B)kFG^<_I(OMImdfKnek)z*KJ>OVn8$r?&ETO4&KTJ>o zhJ+gln81Ye!>}~MOAkM4n%x6~>Qq@?j_qHS*A=Eym~&Q=tDF@Z787AuSP8QCDJ?2I zeBD-Nl1}-8-*&eTiGj=cLXvkc;H{OKS7>CM|B9NyOdJX7QC9QKcka;tuGa%gQ1jHF8Caio za7F1-hQ1&Qjq|XwS(!OSF-p#=Okgml z;Wps+_4;rdJ>wYRuq=@>b2<8`Htl@R@A*WViuG4(EWvcuah9C+`c*45obNt@oQuLK zEJ~L$Ht z&WOz4n%L8Y)GER}3aiT?mc~gY!zP{7VS3|Zm_e(1>zj$bYL=#du`LLb}KR>p3$;dYUdcCKh|JcD$Y)>UNbgCwa^!abz10Z7p?a;vxnp$^bG6p~>rh`HT zmZEk1^>nX$HzrkXcI{qUp|Sd2F7-=y_WQ?ohOL(Fm>ADqu(T9pX|HZ0gFS!U%gxso zguybiXVeb>u_U*oC;prL)DEA-t`(XXTjYjChXX^UYu1-B1FsNQ#S--n0aHV@b8BgpPW)XyV>FM<3_cs+=t^2mzZ>tZrGi&!WPRk1uATda zvH2{7aBm_R5R~vW;9CB8=EoOfKjd;4(@d zk95TBTbqyB$1zZb0)sp<4apN^Q{4lm8VrAC1bvh?I6Mc+$}Dm-XQ!j^Pnwl}ui9d* z*fWt?gbm)v2^l<0Q8U;JFo%f|N7^hC{X;2WiIBi9yi{F0s+rhma?#k_=_%urk<}|_ z@eGD+H8IVZo~ z0j8bVLy1#Y#WKY$4{;C0Uaj=0uRoPDzE0JccyZii&(I?`bxO$qz?~8gc{umxEV9D~ zyW;}iM{KgbU0@1nm^>ysG_)I4h&cP1Bi*c1Ga#Ai{P=iiCKNH!SSXQ z8iCB%j7YZduiqC{!sCgDaTB>QUvy8KJr@fv zUxO?&#V}zZH}_ondW%X~IAI1Vnp#M9aj~E?$BD$i_h!dw-A43Hk}t@8N{27^OMZ2@ z-zi?=E8OfD1)z$}4IcNU7%-!t&d5>kGih{bpf#$A zqZ%md>G_#ybe#gu@}&J&)jBr+qR`r$ldgdCb!(4e6S)s2#wM7G!Dngv!O9E|H6NmA z|FY#SX-Hb1Ag*(GNxySGSj;66r&sfx_|rAT{l42JTMF-<0-e*wMwpBjdH+h7e9dRL zMgrlKW@Wld@wGi;-xha^h@!_h9M+W~igkXw(+Ld0z^Bz=I|`>q-~A2D>Dxd>!g`LKUK;io@8=-T2JnS= z{&z3Zl}*sK&-|<_JF=xCjsll@)euddV+S6Mxw-^|dD@p8E?%{vsJ`k*Cu$p^9R5=F zjV->`EUe#|V8k9#7O_d$5Uan3I>Xu?LX{A*qo29C00Iy;F>0%45hls2|N0P|rghIS zyi@ZYr?eX~b#6@s?NUCj0Yq{r+)B0(P zk{MiY>q&tR^^~*6yuuj!B;qNo)Oz{Bjo~FP?DSwRZcc!mH$Xmy;W}k{_3qE!<==|J z4qQ}J_$I+kVPy=?wc4ZLOobWnGY0S8m*t(z9FtV_g4nGt9<5EO`&B#txGqdAGu4up za?EUeS*W*9eS#BH?Fw5^a8FFoHA~=MW7X2^Y~mcDRJyGw1YeL-qb>6E7aWTW;`d_A z62(M)0ire&2^gfjNPf`5GUK_JZKA1ES`W`+MJ{DG(1jK6t2DCC_6QAYl~oksrm`|d z{v^W&(`d8lvy7prxx5tz)^x-~^mf$Kt+?6_s!Z%vmgV8u7a59?%p5bc_y3hxNr7>Z zV+V(_`$|?&Fj5fVb4Mgw#n_j6BAzIVqn-StGha7njV4lJK^C(uh*<;N)NX=-aP1oa zd>NS`#S4k*asP$JSL#{jGc{#L8dI5={%ERVI1T@$AwToM>Lsa=1-~TeOaToKz8myK zSX80+(eFUZizYbPFVpcOzn-`;E87YgQDR&KV1TcJcVDyO{~HfF!`;KnSZuKwz&dts z6QwuO9LEUM0;=qnhd`_{!HmHefGZQ+CxEXmq!$(D8Ov3t!vl@rk8`S}fwz30zxJ_X zk!%S$V7uW{tf3TII1g;nrR-=RxXO#Nv=ox&rZ=4TX&7srwP5IhDaLIo_yKd1c~gTj zlnG`w*JE{S07%0gOFwemsd3gG5k2B$(?xI~j`aMh1!Q_@^&q|>?Gvxtc{}YoEOMeNmE0LA(?en}#(Ri90l0dG#_SNqh|vlIlh;)5Q*(Xo&Mu=`PnYI( zf{tFYKv7ZdlSPh^+%|$_iMkWA5V$XTTwgrK?N{D0yyiq=2F}FwqRHUztJ60NiVDA| z1zyL-><8NPFfSXxrWk@+j5s@cQb-hk)44=Hu=Z1{SiJO5Z(C!w;VxTzLFqm`Hbxd* z;1G$}{?xJ);Im(9N4D=h%El*XV4#L~pn{%<`IH53$V}GOa=bacBYWnhr76bl4wB@a ze<)E5H&UOjlttm4XGM@#X$GoS)zOUM^NA~^Pc|oF=u>ZF?Cs4d(m6k>!@E+WKnB0( zZZT^0i4Oa@9K?>rYvO2hf}5cywnZ-8hu_XoOdWgj2ewWJ=OHs{aB}#)YC` zXNrPtvZLG92FpwOq?J%rf^F~La3xv_n8;2K6GHS2V;-B9_FX%4{b(C$fhL_3@bY8X zMdTO->fjjl3>JPu+*D)Q*S|6OT{~-S8)-@7Hl%M-W%_v}^WX$LDTF+MCLnGFDC*cn zAhmygAnOxDcl#DmBU~xW>u_~}kSHsHT~kdi>F_cWIeJM2^dK_{#Iw3m0UqN$U*uv~ z+JTF$dy!V`d;N;yl9Djh=M9A>Z-zJAc)Crb>K{RIzCm$-z~XNphiQye+ECi;ELGnG z16J<~&8KA0G)c~dDQUYFxiebH{A(-5zyh?=AZl3k`HlPb&oy7>jL#=VrWZ{<;@|Yb z36qsJ3F{X_kZ9fKZ&7J|+q7TORK;b0kQ5Est?EsTFd{Hp48{h+mL&e_bO}cwCdRIw z?mSov1P!`cOu~z9`MP^FzNt1Ch6eOi79f$5yg zjEO1k7DnXbf4;{|%>pl872&4H7|O^e{>~_x?B$mRK&Z%u`}U^XZas{-YYXL4+yi9# z79=Esh-zOZ4ON_oF=B*~j8z5g-xnR#Jz+HaY>*|QNoj>3dzt@rAtou}1U>!i-g>(B zd_y_R=cSKzd0;9UWK+=xQ#qdjbJ4WN`PkS}Bxsl-~ux zEH>5@;XR$dw~z=7OsA$bc!;L-5XnA(2R&YD}=9inZyxpK4yscy$2Eq!SqqhTixs}CUzh7$}jgOqdJEPdfG_%b6 zAIP^$vV$)CT>XpzVskw&A+r%qLRXN7K9ufg9aQ{Td;TZ!XZ`}l4Q#mo{t^DTQj)6` zm`&s)0;QO3Fr4;%}3S^GKhjtWb)K*@K?)h5VE)pSz)F zIN*47dbNmu(;p|bI_sk3TW*1Z_Ejg3j(98;#_B41Lai!DwM?dLX{~kVK$)=U>`}*5 zD&;67(q)%QP^`YOF(t+-1AX_Nmonkb-afZox1PPoB%GJsL1Dkpw^@zM4^b3pO~_ku zwP4o<9|o{^r-{9iOrbL~qstW+zcy0aViOu1w#&rphBCEnaTHdKhu~J+U@}+p@-Zxz zY=d?uWfFwq?&5Y%E#6$QvMk;SGW;iQ48#J_&lpWXs+tP)taPWA2{sH5uBT&w+j2J5 zs?tpS(Lp9-C|yS;aRch;f{#Q>txr|s;<Vw<}r)z{^*VJibx5`VS%lMJV9 z$SAb|VJR7BPJF6M=(2xhY6F$7EcfkpElIo+N2wXyL&IE`ek37{qFjZ{CowHh;?&bd z!C>LiOk4lcP>e3j*BIcBOF#S$f1U3d6C!ILp(eTkBnM(41E`A*Ks5_Z=f>;`fX{%s zlaQ$7e#aYoZpnI$yC`k3+!Pl&N+^8*9Z46T@xDWwn@eCd1$_OZeMR5}NV!eo5W#G9 zXheo*-xPR+VG!8SgE->>SB zCqjqpt^n+AK#Usi5wUJy5+t1abh9!{a-pO=Vtr#U0GuV|Ti4G_OHtr)7!`yR*B^oT z?c3Ym@5X-XT#34cIn1Zo^C4HJy!oMcDuKcBjeCEA3x6K5&TtqQ9;6u`V}JL73;Qj~ zNVXWcBG2+CBc9+LIy^kT(%k$E09=F+C_^P0UWQHfWG^VL#n3dLlC;j^=ZIp9IS1iL z;~wtDG7{L{ErB_S@d7?0v}+$X50#{VZ=XaqUXps75P>KXSE)PRjLBEj$hnVc?oK z6dz+~m`EnpL3yb3P??>nd;y-M5-IR(CtNdrI*k{1bwb658H3vmCMih;U?t_GUi~e8 zird5l>X>lR+Fhv&?#DKy1oTY6hL108>cRZeodA`*$Zk~t!;9gD!UN97$~NdQipdqG za@27(14r9US7+w$Xsm+y|248J*F8FNqJa2f#-Y}0<#|*CF3Tl#ZS3ur-rZFEDbUni?Y^Kpj&rfQvi8jS4y^k(JY>@xq0&aG?}HjdX-J1w+jgn7MD_=394$jMq^k zY4voCxt(niqXdIeFx=+6`Tcsb01_BUHvlNR78I-Yt^evwj1E;^JlbYy_~R)fBbwwWd>^I8uWdfn9q!a!TG~Qvp9LzJWyE!%5dN}L2M1S^%dNm*0GtqXw=e$do~URib;U(#)eeP`fThV64OJ^S@KblFq3FwPXZ1Yxz_*)q z2{KX0$3-D@8oUD}-$Lda$b`(vz6zm2DRB8w8?#@NPPh?=VRX4#Ll0M^kfky!u>)ZFDp@egjE>FqHw6{v5h7>-wh{bhbyQ*= z|LWgZ0wIMMuG-=aCwTSFLiys527O2s%4xfNAD3I?xU=H)dx0bLqnvn)(K0d^vPW)3 zESEi({`Be~8a1y2*WWN|lOPd_^$am0nP7mG;Qhb>5uPYHy;QPf4C$H)*m%~)Tri1} z?@1`WZYtoV$V@*+#UaRT9zwO>4Og=*ty)N+M`|F($#R-4mlVu9#z|5@SN+>)ty zXP3}Ob{Pqz5W(7eppdwle2E5r!dbB>)k2fKv#QjHM@%V~;9mZZei!!kU*{7QiS zkS$Oas5t}`tS-AKK^Y0GSmx22f3RE+$!NV8sw9oU$QHWRe!ok#aXAi{jx6ypoy}39 zLzy2}L{!|De(-MLy2~C^S0Xc*q@(Eh=N&t_)Su&R2t7YGd(&jgmLP`fq&-0mBU_0i zA~oQz;i@KmK+EAsKsFy|KzmM1+82$Xubl8k5dcMtm748ZpX&PbO9Ger-SWK*T-y@Z=kk9iDKsCA=MSF_eiruVlI z+xl)=ywIUp1%&s`fA(Ge>jU+`(`R?UbqkX>%a}1N1xpjoQS7gvW1$OkjfI$0cHVTr zArcA+r4Wgn$)Mea#-{@T(g{e}gVk#1U!z`?n`Q0{37Jne^NBixdkF-aRD3*9KW0}w zq`T(5|Bx?-1q5qCZy>K1tkVFD&=p_*7`7l7_{VjE({D6`g;}y3FpVR;NBx{=Jl+*O z*Aa@wo#Ytfa2e|1{YQgnCqbku@Ytx(s)4(?`IK7kszkoNnMk~D%_uC2Ve`6Z0-+@V z+DcZCU^-8!iO3qY_Um0rgH-DWp4q&{TQ`yXhi&~`FcdF zCvm;2p?H=RmPwXW`P}ykHxu8Br$ttt4Mh`jhY;d_j2+03UmeC?#P0>L z>hl}TX|pMCIdP$v!%~Rbw!F}Ef$5V}c@v`*WPx_;@xwif#%t8c)bF>w{$|xOk7(z^gx>Hnh z)rvk$N(YTY1&sq;&EBT8<3_RzYs^KZ^)Ats!Il66xtdI4H%dyo?t#qqcz7|X4|2k- z3|&VPU91Bk9p^mfbXI6)O@hSBu{ji2w`93_azf}rQN)OcAg`4o16}lt;A^%}qA;Wk z$o3Uw6~(x*1BH>0dJRNE#Pogv9i1%xXe7#|>csMJo^@xEc)$M~zE%HKA9%$d#f|Hs zo0UpEo6@AUkH_Gy6{w5E?<1H{X;A+c!VrH{rI0yf3pt|<6&{O8xH79vlAqH#EA-1( zqCAFYb4qn3Ldmz-J@R!SSUw`64Si{q(*nh&;J>L-6r*s{KCR04gvd4EaDfF&yokXI zVba<)^1%@?m^3Jf5Y89V?r0kgv-A-riz$Vt+(1s_2C#!a4V5~7Lc^=1gB_q_* zZ7Zmce~;a9K1*X1uS!n`VUFRvo_9|>@DH1~E?k$CxA=l*IMi#N34%g^48%aBM7n-c z^iL}_<`}pn!}Zx;ALR8-arMi?Mi%uv8$V8ME=NVr6NyX@<3nYZ*8@UJd;y(#RZA-a zzdyJ)?vNTt`NE@l4kwHHqw21r9qcu~B8IEM!#-zF>Us3?=K$wDK?4gN4ui6+_>uce z1V-?rC(V@qzEnY)XPV!LCZ+k!q$)Mjb67MkS*#AjKR8-`zZdjXmHqDjn7@lOwK*_e z*`n_i7&&^!z8Gk3(+uS}@jKDmnh!xVUlR zm8HRZbGhx3N}&JXiNpuclv6K4i9eJoATf##rF7ZnBxnK%0fa#nUCq@KSP}8k5Ne-N znIf38JDSgw#a~Xn7Cx~wHyEI((Brj95rggh9ZW}80CX88MsW{XDP43*)U!JvNrA@D zgCSx;&k4Ct_h{BowE(otWC%c!#0wtXtmQx;dU291n4K5 z$Uqyk@fi9R9_~oj%irlZ4MF-OG|()9>A`SNDW2r-os^DgeCQO8q~3B8UPDa>brTmS zm|+p{J`Si>zp{`rWW&+zQRpuYA8{QNvT29rM)mCtC^E$$p0pRY@LAhcs&2 zx;#6Q9XI~tFOb~E#C^0f|LM9qivRe+y15SU2{F65!L14s%|Mxu2skY(bR7nIiU4;b z%6P{2<8n&lH#2TIvrV5M&P?u_uS(e*5nd;^mS$gl@%E)pqdU9dJ$j$KXhY-oh zZB@?Ts#3<8GY%9#D&Jhsf?PLH&K0bp@~ zCuHkLGP2E;e{S9mCw}ocp+G47rrxSCuOI&EARu=7anPK=3*ZW(b)&7axCieofYbp4 z(PO(UwTq)w;)ogN`n1u!@v~(A-|Z83Y051;bt**oU{v!-ov~*OaOyt=R?@1%E~=Lx zZdVLbAjp7jBoAxQOD;TzPW}{YGej(C7Gcqk_5TTl6mf5yphqr!oBT`8?@%qmkc${9 z;YMwNK`iMNT*4q9exe%}n;^N1Fw2630gMcN(gk1+4*~Yg)G7;I zpbunqMP5CdspA~^w*z0ny@j%2hKK69Q{6Bv9w+J^nU?-~2{MF_{VNco!4-%Z{60!` zc%nYe84O|tTEJU3kKqPq(Rx(%;QmrXCTl3=@1j--k)xVu8$DRzA2cP8xcj&v&v3IG zDEAX((a$k4$i=i$J6cr^Z4}`@67{VT+#`ClTvMRDqj_)Z+P9ESNfvDr4&_68-2LIoyb~=m|iEJ}wqW ze44k1h@-!Q>4VQ!6C$ALsN>q(1z-gqyC>-S92l`|9hbUcy+9DZy68p$)4O zopoCy`~fJ1JFjP9wY``BUBvozNI1>q7|0iDae=wZ92mv!D~Ncx8#yt2yv5NERl#6P z!==MB`CmtASKldYl@OL$BL5z900JrQFgS_WD@(KG1!K#YWA7sTB1b%-oG%==brTWj z%3%a9=T_u~2LxFkq+_@+f933;vm^9J{|-6^r7?P+k_dX5m4w+JwOaR7vG}m?qwtk? z*Y=X16a)QNjO0N8c0i9qnfxnt1GT}}15LqB&^--<5f!p-3BYO{lw|ergyE(1RLe`o zT9uy(6UW*lgx1fmj@~is0w95pWFmYi=@vNlHbITzlLc;>(0hL+oaxoS=sDj zJv$t|O(}4MC>@|E+*BK|o*pur9`pFOuB^Kt_m*|bpjHs!6O14%dT;a9H0a&Nwb_t; zV(X6o_uv1M!2e0$zY-uYtgc98n=PaN2*m^QktyduOSA#<-zAcU>gxyYg>5cG@dxWJ ze>M%LSAVX%Y%i~_$hH1H>gC!S%Rax>T8apOX5H{iHpCs`TD`U|myu8+eT*_)O}(AS zu(_a5VI2nngHutX^6O`K@tX6mkQFy#&l)_|y_K}#4=)QV>pBz<59WlnJ3L*_1YHgb zF~iq=RwrrOjxbe5Y-&`i>RaL5mKH3{isME)2GTkM7aL+b>r>&uI+_3$KrkY!9um1A zG_g0l+HERY*Dp}K0mHl&#YlBQ+Stf=?|LDZC`2Ybb$|-x8|WeOXWkcI%V83@87wMlrO?cm8AHB6&7V~xvYH)gJ9=J?1Zb!M| zzmx6UyO(a~#V%74H+tRpvbULTTK}27-Cxi1j;XS6UR$c)t-L>A{oUT|e4n>7k84Y9 zR!-$y`xfxv=AkOpxlwQ0&Bi5T*8+<_Gk=VoCAGWbKm0m&!_=X!bTOY)+-j;De|Bi? zN}H$ZLZ)}9^R@faZ9eOB1vHlW0XzW@HUcd;TOC#wUptgum?A!nJ(;tev7xhXGMn$x zg%!DPiD#!sQX$nhF&G-On|^bYe{=cl^7=&o=R)%jPV95X`8Ic!?kLy(gQsYbYR{$Y zKYqq|Ki^%3n|`6aHNV7OZ(f+{qY1O{nZA5iNIHS1-!Nd4BX2TYZ}OR9S~&)=2? z-|w5GtjM|JWpBBnS@3F_{nvi5Rx#4y->@>3;(2YQPN1K|_g|!A*Oz?TJQKsaRjB&+ zXA-3S9@bmq_ji{@7byvdAC^e#O&j2It!2EtsmPuAY8slYS(UZF3Oe%cvF($8bbs>I z^rm9+8iU2KuXTR*_X}WBzWZ}wkIaodrH>v-Xm@Wow|RDg=yv$)OTy7Se%kQQJo=b+ zNpIu9lRnp-c16DzN$U?Ye3lYlJW7RHu><3NU6D+_O%$VR<@MwwH@oEdkz0vtW2Qmpf+eObrwynMS zaInOtSNYIPd+>*YtBd*l0v$@6+TBgh->&W2+4)31VEA*B&63G{HKCK@_DvNpJk7$c zE5I`t(RoB)rI^x2teiD8S7VtC2X%`tz_z%E1)mWHsTPp8y?yfC6Y zM-1b9d;y)=tQIQ9K{-1OmtVEy$+sttUO67$P;hXe-yRJ8-GfEd9EZ~2^tE+GJ0sAZ z)9I(QK1~ZI(J(z3v|U#+(`4!teQ;0?6(bzTXL%rV_WG!t1WR-b&llfsXQ%q23&2uB zOxrvKwYz)Id`L@|uCb@nv00LxzxIPZNGs<@_N-NM3O5uM5@ARo)gE0gR23(;29)QT z5g76QLwmCG_Nb=Z#)$WC7V|4LGnGtJBfdu8&wZUR%+KcpN;vu(ul%CZvr_hfIVYSr z#CbRfi{T8zf2Q<{-x?C%v$Qq)8_Qg$jGlPSE0s2Uyb={bQc?bT<4a+0Q}Vrg1Fxms zjl8YIpu-RnhlUaSnNk6azw_vIubo7?7DJt-oL$jB>R+{1Ied58SDcf&`PfoE=Yk5p z=gv;%WASG{a3o_x8;2f|*6VgYf^GV^6n-%J;RA&jJdEJ{@qJf|l$4fpi3g(dPKn1v7DQw)3z=q*9#Y1m{eWAE9i zG|dJrq$|!Gr5*mTGEmz8>UXD7Q0!^T?Oz38hTBEUdSMV(zW<8;cKwRo`-k_n&rO$$ zv^;D4MHdUE&fmckPem(wjqeiIPLfby&r=fV*Tp5@uI0dQNBx% zJ+f;XWBtBTQHAA$uBrzhn&gm$;MrT)3y*NVSHYYROOqw2uf8suI7oJkKaS;7&arR1&-B&&ywK+P&SrD>@$1sT zJL%wBL!zS48(4}ALmL+$Ib zP!(pwZ#BOPx^)loN|rEEPTE$QO00S9oI5{titV(|_Ct-^%C>oDH!{jRD}tpUCR24TEsdIvJuv1B$)(h#6*ZVU?73vF!uxB`e^Md& z&qZ5LqxTmj)*5*~T{(JvY5!e%(Q7sw3?@4YH-n>g?jP6g_7CYT&*L08qQf`zD&$_w z_k~cZC*RBmt6n&ihMnKj4O6s9gFUti)i=BPqgY(WCv8dR&#dn)jd1XcxpDt|JyT9sBGaNtg~gxl#eAhO^ICip zCfa4sPRnh^90_k~3T*YQ0RRS`!Di0^HVxch{vD3y6bqN+H-(G_;Lqo6Z(lS{4sYFI z75o0f6;tQvFB;E!*_A4A;&ZrhS3d z27sx-a`97g!+#(JQ$SC6q%n?9yyTT}WR3neUZPWm9h7w8_$g(6Ys#zv2B za+;lKUJj;F?Ke;M+xFUDj-<%fmlM6QW^-*2=J76XvZpAc?DwTJO9Be%Y|V-fdVPVw zk>BKyD>xeYvn8*ay1{kVPWuOo$#mA(1QBbz#FNtGb!FqhC#`H&2p>ONmmWDMT+m%@ zFqYg|qRMmaQs;%iw?og{!2tbtv&<>ZSLcM8N83v6y11u+cTw{y!WAUOB4Q6zuKM;Gpl6Xv2=XIReXHUt^JX|}9k$8WD(v2_S%6*y@-_$R z>xTDmJs{Hc>#5kp85w8T3GVQ|_9KsB;kz|(-+?f&G2Si!=ldI8CNjIPvdfg+A$#WD zJB>|W+UCvD2Sw&B?7Mn}wi!|K9HSH<(kbcnbzd#(%kVmB{k=Z0MQmF|$aZsB$#10x zp;}K>1%4ErRFQZfiV%Hnxrfm&`DW*@r=X>$+u1-}mmw#m|Td$Kroc)}4@tr3? zm*|yJKf9)i`>wSt4m6=V`K^!+{&dIhH2jtWv<91a(DA&&!tO#70I(aoZ;J+n6^z)5 zhi#0mWMzRfc zl9_YG_|4g?wr%>xL4msrWf(^-8m=i-z7wY(Jr-TuRU!9h=Qg$)hieCh6!JDNM|NHs z`^Ik0uBM;RZo@@0XO$Tbt$=}aWML<|(Wro$?Aa+D^_0a>21BI#f9+j)Je1$to;H#TFV}UQ^PHo=RhU&{Ion!r6d;bVX!$^$&V74qV!1Zeypk+cx25p> z>Qs?9hS-lxz-LgU9Eu>il3zlHr}I9NXMZ+(@$8(}(uJLbdQO6NjU5wl3{MzYQ*GT_MrG2aDl0P&RAd5t|6yHi9IE*6&8Na~Ty zpTFsg3Mbd|Ix{%3l#UL&4d2BcZi6a-56K4V1kXX;xs+uLwMA!8z z%g*NT4>ZaU+8ANBfKjYdH zwbR-Bm@jd!(R?!n>LThwYBfH*sr=9NF#m>qUH0W><0fK-SdB02&5i2C88I%Aq;b{L zztL!%j0yw+ayv>!{XsUmD8QCbVDS^JmAmSq7;1< z=ImUALxV`Bml+e?b3>v|SUM@1bkO>OQU>t&8wf=Ex$%JXv&1uJzv0;(M5wIY_YdwY z@6xQo_vt3*S?tmYN!1zQ=wkHy@>>1^87xs|k=wM3 z?<3PF+(Ok~V21&>)8e@qv__8lGZF^VEV>t(6gR|z7s>7DT!#+^s~9f4Lb{DKDZFtz zRx^6x+(N1@H7);Gg*@?z?{)}@2J?{32$;bQP8gCgLS?uwv1#ag>B)_2znoS%=lmHZ zX?a*_jP_x%-Az6$;w!c6`rcVb+&W#&!#;dDiFSOY<7qRch+nwcxxZNH-Foz!GS?Ol-Cwkuroe3-DAY320NH}B_6$TZ$mY4h z{WPsS*`HY;sx^OIZR}qk(lDvTBQg-Pm z!ojn%K3vv&#T8-Ty}%dM*5^vHPV}zju5Rn@oc*f)QLGGbC2;XwrGagg zG5T$CntGxd28Q;Ezs;PtAt99bbD+SeHLsvd|GJN_?4K^zA9jY<_&P2PJhwh>y2OOy}qc$Pon0p+4Hl_H#swT`ND;dkMSwu z#j(JEvD1a03y18LV(f;KxXeO_LJ=}ekC5q}u)Q@Y-w%pW9NqjWbAgDQ$39na`_nzQ4C5X#h8Rjc5_u%qs;wLKk|(i#NfK8e;Gs zS3apQF5{l&?e@KN6dKQhpp%FKhwH<4ac}z3=s2enZdP0Inr`8m(a{5Ey8h7_DW@;z zsVK6(A>R|J-ulq8=Y)XJCPLH8T13b$hZG}ORl<zppVy{@xSikp z8`DB>Wj9DleJLo@{N$q4i*}rmM%gckyAO`Pr_VV%K716}yauDKX1i#8ROR-SYS&0+ zl?1DxtP29FJ&i5dV@SOD-k|KDtE`}EqdmRVvWJ@@-rAp4TpD-IgmI zasc*v;l>+OZ#bwapwk3aw9=wW)3r|r{V^@KDTQxjAI0SLAY*Jvlhyoo=$o<%8Dr*p zodZ>q*j09mQqS+!4=TwY!WiDbb&LOb(ADH%zctT^_;Vf(?Bazwr^EjYmJb9vWx zaCA%d`Nqqqs}8>JfeFFJYrnQdRNilHjy`jKPxYrLd$w$$8j+PFfEAiwXJ7qPc(z?#a6**svk}M zjWM<9GJEPG&L23)xcZgtv`O|5@Lrfs`yc5DCnM%qmOmhy{oFc>of2q{F47p*8O;O0 z2@U&)KG9COgIUyB#MEWqdNJGY-D~f*d|*)Ut`LZK#&^ZPDrG!6MqC76}6WH zXZU5JJ5Fmvt2;+B^716i3+^7RKGU@q&Obrg;8N+Ki&^3))6tGXKjio?m)R?`KHANJ zLj5@x;6~r6#>*Gi^-T__NaF!iiT)6Fh*w(UAFq^1JydJrEz?rU&xt}+BqEYX6Hd0G zvmlm3$iAQAF%xu86P_Y5RN$N90`LfX8aamc?H?PryrtgB z?fR+2`|gh0Q<3iZ6W9>LB+$2NmfyzrQ0Zu+!zi`cDgid(vJ2lL(f@c2+C!{;eY1CO zh~BHTj;r^=)GwzFd~%DF9^g)K;fjHnFv>RnBEF`rbw_%W!uw$-%-D77GCZ=1qgi_LO@i0Vylwcy$Vt#%bSE6(- zK|lnG!1E{wgo)Ci!Gol2dd-oVZ*q0z8HJ}EZ4cL6`xr_{GNJZ2h}ieHHoV}_)^jw+ zUu(mS){9`|u96hak5lVz6k0aU<>V=4tkXlv#vhyn`&Iy9*Z9~Pf4H>_79uiEQd8Pt z)@S7ecWqU!o){r^^wQh-ev^2L2{kqYb67tYEKKOu2UTdben#<$A%5+e8fy12jv-N$vuGmck$SEMD zdevm8XwYjW$WL_by3Xh)ggO!iXy=pU{RL3t%TM!aA1_y17k}&V-a~)J)%UkEwBOT9 zg_-x-{3E0-qt(mUZ*rA&ESbltUml-^wSQzxzw>KrGd-M+Y+QBX_oIh@b*xFu#ivPF z_fs?i@a~D_=|%zjCxbRgj5*h+M;gMH^m~kq^b35XL=^Fu^q}5K_JvOGu3xN6^qBGU z{{6d9BN)gdF%L+a(NS$6<`r5~TxiE|O%@uBM~A0xnIEo!UK4~sFUWrkUgW*YupcW_ zS0UUj`AL8sA7I>9fa0?9K^wTwD4vA(Rm^~m>x52qfBp|Ci(!=objmFZlPoRCPY zbuhtLq5vuY!yM%^cP8b|Z5*UOt?!B7@CMx}b`!U`3p_dw0NY6%Hh5b50thf@iOyuz zZ>E<62fcr^dS9}GNAD>be9(mnbO!MhH@mj0LZ#dL5@PiI;*{-+nYoWE6h`Q+}i*o6k_~ zb*sKxhFv9D^p-vO3tn8Hkq{Y@-E2N*Qf6N>VEb||QFiFY?yA(ias@uC+khF-=h?>0 zt{c{eHPSMGsC*5vXSq>v3HW|G=$bdOk3I(B;ErrI_eH8){mUhuaaRb-J#goZZFu8A z3=2`0yqg1K6})NWP8pOCO1caEVi=+7vJgDe4V(0na|1B?_*i0#D8m#;*HTEi9cw55yZ~92 zpuIh4@rkk& zzI!rLMA*JvQ;k54>UHJ^Tt_&tlD{53GbuJ$&1yKZDKQ0aM1WQu;xQ=N3f|QI!g@%6 ziNEPiS<`)*>w<>t!P1y47m*)$3m{#k|uxz|Ty(Ug5pz(T3k{ zj;F8F1m7)H5K?P1e4v$HMyR0BqS0A({lSk_&;7ipqz>`|CnEP%z4mh47H|ygB&Oqbh;8LaZFF_*xPmDA6GX03@kP`x_(LSLAU9ajrLm&I)!&Ov3#JTWiIOjh(R!n zJ%ZH2Jo_EmixRrCSl=onC{cJWaG_jILC{M$C1oAmt2;7j$3cDN4Jip~jwX()N0e@C zt?v?h>njp$`9d&=)XU0IkN1q{>fgC=mqJAWmHcRA_n7UFT*4_>eyWf@7;EaPOW>!s z&TU*bFcW=+pc?vlA^t%|8aMj#FkVPPOT;N`Z}-_vSrEbr9~B`^xlK@l3n}$Eu_bpk z^@6my<6alb9D*2W%6>Ud!d7$CoQbew?Bg1@uJjd(|F?}v+U1cxEHWj)TyNAs!+4I1 zo4Am!`oy`p(LJ)+b0%NSG*Ofz4IHd-&?@I;w79ccm5MlH5}W&1+-O6j%CgS9A;DNH4FQY4-mJRHI;LsmtLU%ti^UwokNpTn!x5pQbAQG*bUh zWu5qt|Gd|B%In!iGTNRhiS-E`v{_d^dMzqR$&Y5OBs71$A{?{f@sVpdQb1rs8o+fJ zRs4s*LP9-XR(Y+h?8e0uO3a2i*~e$STW;$H$b8I5Yj=OuM2JxsBKGToL^N+|n5A-A z^Ufow==?(r_wT}@Nhm-kb)$<0mO%C6N<7)ohS+N1pW{bvr|esM;$-L(ykTD4Rx1Qc z@TU=ZBZ`-W^@HKdIRn+vpBiPKo2mM%MV95I6oA0WVPVHmi@PJ`uUmvQ-{N@~f9LIF zv!WErG_{~RaOf$8rN%A793OJ*({+na=1KPF7EFZAzS*a?iXEfTG*E-ikp=4XGsEz? z?vEtcp^V&wJQC2D86EniNk6);z#*GAwN+ln|5)2K;5!2iz9o`65uJ@SRGgNcg2gCH zBQpUc_Jny!p@X_{W5q+C_k*01)w|DY)fN!Qg%k+R;8+N$3K>g*q|&~*z5ELAZ3q>i zgcB?rP>4A8UhFDVpl3UoYE(YxH+`S z?CIB@EIfwEW<|%d5Z!sa*Vq4h`@gm4!tRdE|$sul2L!V^)lWa4|}sV+(iH;ZMK zQqyeJE-5V`{ncLZ#q)vznS*VcS6$i8T9pO;PL*VCy>ojon{Ln3iJmuQL>Z`17_c{Q zKV%)tuXMWHVw%aA$VE~)gpO?8ndwya3<06L@ z(oRES*S{%ydJZlu?_L1eF&Md_m8|#{H2sEsD#%u5_7yKcb>)Z1 zz_%<9##Xj_O#a}j2c*KskzPsZxv=*kQ4N}Mbip<4`NWMVU2>V)W+fmhQ)gFX{Si<% z=aZw{FeTF2G*vWn9k`T4$xI5sV#wi3h$~2q^43#dio<)0FR*V~%DAUe!>*W3=Bt6~ zbO&h%EVlodL;ja-GIf-~b~*B+LxxW%Wo<=+6?@NZiDK=r#)lX9;3#a3-4a3m9aOYq zoI17bJ=R>awE{ql3=-eS0rCxDo0FTCF_w*CJSJKI1H@;0%A$jZfz>J4og)hXs@X69 zkDfb!D6S`98#bd*)4ogIw_g>ubpv4$Fuv&7H;ZRocRkDZzZt-oc7DE^@%J(&M4^#% zhNq$Z>{oap=GIzg8&KzZ3#gVLj0F?a0-N-qiVlGEFk08{;llS-Y}N;`6zoeO2d%Yx zGL^5+35)EJ61M&DGdsW@7OP?9AB|7^%IB!(gNvLWsLOFCNi`-(1+RESDSUbgFkoT1 z{?~`Ct*~QSG077Y+P?7xy1x2`U!_R^tl{@<;q+w`GqNS*3;;k9e2}nHbwib1KczKV zo3*e2EfRDYCuf#_1+(rPQ0)E|2sWq@II>T!mH+Xan-(cr=KICOLS+sm8A~MWON^EJ zy^=*BIJF!tS*pJ!n=UZq8$Aslos-S*izlTOvDg;&DufBqOmP#xwg3fNCEhrIYQUc? z{Z3Xh_DaVXEg-Bif2Nw}0j!4O}Z&$9ZD#n0(#Qw6!Pe3e`pvU!% z3H0SZ;K8w2PlIIK)4pr%IX$Nb)6c}7jdmDOh-lWqi{-K@?D+b-73Bl&_0t1ne~T++rozioLfez15VUyYKr1W(s=u5gA2Qa*9?mc^&E2RPWJP`D)A zd8dr$e#qms7)>g(DPDKR1y;aEm=XNUCM=?x>;3TGe4aRd;c4m+)P6n3s|M5F8;%q& zPKYrKxWlKCJp5FpRZlB_K0A+X?J{!|g_cA8va~IOQc@j-$_zRIlFFLus!yub49t$B zRkxgeDD}K-f->aK-!^mAgV;^3a@4ZCC}x4Ex2}W7tp<}cTuKdArzdqAWY4;3uL);T z1e$)o+I*=A5lMqcA(0Ue()tR!N%0$|v|t1fxbSY+i69CXE|O-Z%JDtG6t!Q6e`s{E z3=1+2^oIB~YbbkWSliv4fHDgghS?Uq@CxX~99653D@~(=(>D~ZFtIDH-KH7@-5d4FHj-?4$ z=;>Y%hPgoQtmfu6)Hh8+b=M{;GT%E~QyW;a#PfZ+P~<>jYygF25&M>bNMJg7Mt@cDZKuQXlY;;WY+=(^vA(FS zXzs_8xjW6*0w8&b%U!XfgU1cVOvq*2Hi$~aHSfHgtBB`H@ltJ}z2%A&s^rU1J(#7% zcU34FOx1p2NlUi*cHfQn0h!nv>FC+jA`b-+bZSGnI>2`JH6LuTW9iUJle2cKopsgj zjq;56FdIJC8ejnj8o3BF@O8_AZyCV5^7pgI|0@r>~K@y`iE6={Cf?X~+jUB7l2{BAD0d ztcQH*B`UHNhJ5QuHpf?#VQLGB%*wY*lL{r1%qpQMBqOke*&w9-n6_S|&_2@=YWcI9 zyp3cF;&(w{Qjc#xIy9Zn!nh$TE(=P&& z&!A9GDQcknp(OG#zjs?fhJv;|q|nOrJhbKIe04t}RHfvO*4UVAzc>BhnIjql6^@6h z`M!IjP-%XnAs4+gaB|+3s$;Dlnpha5XDwpuR%WnTEJOd)W|&mak~lArr@&FfdsCX6 zi*^*55dQuJM;76?zu8M?z+uu=lhAYj87Z(>ZA?rC7l-Kien!1J?^e*si>p)+2-S5) z0Zbz3C8_*F=WU)=B5zYL?_5%W8aWlYSgT8}J+#8C))Pm-rN^p}WoTK_yFugK;ok70 z3MrQ5x@7s?2}IsHhZE(lvwbC#J@)x5)KkguzbFzy!$6dK-w`l+$#sy)484>z=_|E% zE5m4IhP1#jW*@q}4GZMJ9{+y*^}t^b{Pnx71+5Qt?8i3#%I-g} z#QvYO^siBWJ@DT=(0(cg_TKGUP?Wu<`QLu#@4x^5@_=6wik^)Pn|WT|1bHw;IaS#l InR}1^2LbeNN&o-= diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer.jpg b/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer.jpg deleted file mode 100644 index fc8d1cf7657df9d5a385f5bc5d573dc390d295a8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65080 zcmd?RcT|&E`!*WKf`G`-I~k>SkzORD4j@55I)pZeNE48zK>``2_mQepF@z8xAiWbI zlmVnS0Ri5Gh+$# zGt|Jy0CeuBbD(>`8^o9g-36WdpM3po%hyGh#qLyh@>qMC+l#rB?mXTFeQ`gYc(!Tqf zzJcLABdDd7wT-QvJkY~@~p)Vq%qGMv?;uF%+GcvQX-@MIvk19r& zU`nxNHMMp14UJ9BEnS4}p5DH{`Ul1*Ca0!nKF`h(Nz30>R@c@yHYt1i2Zu*LsK+ON zkLw)h{C~OZ-v{mjpcRH zt-LvJ4ey9y2eVuvUIE?jTesR=Pi{S`o@Vd-s?#KlPD z)vA!w=3w?$i*+773{dQx+HiTu+QtGW`3ubDC4vvd+J3i3Ph#v5MY1B@Vg12o%gXi( zx3rSx98+Z|_qQ)Az8;tRkJ}w)brcRJpOzgfNe@_r$mvTPoLH7?mbYJT|BQPywJ^8- z#2eh$*r`6&0p1e5JW5wWI50rbvE-KhW%rb1GJ)t3AHo1}d4gH$b`03K%qji}x*Eu< z$rWA`FrR%95L|tH!kii%RzA0>p5qebkFPo8tA{2XwkCzT_lFvZue7I6`QncDC;i$K z_;|GT?-qY8$Rsj#72@rjSmfEshL@MZD zIK;7Ws!f6H{9~CLa9`s3@gjPG_RQ&l~9 z#ECrMke0vz3nM25WV^4huI`-ioPLj0xtf4wdZM+I%v^x=!{Vit?~RxreiM!&Yc$T? zo*}XlQK?gc+6e@?4FkliE3`ZUqB`_AtDj|yHv#imq3b{j!+DreX1c0)sjSQXgb^@J zg_>UCm2K`dx3%7(R!H29_X~EpvMKodk2bLY)V-7BaMz##M6KhnLh0n#ENt8&3MIX0 z9THMmd15x@QX8!Y*Sj|$Gmuqn@!TRZX<6DNq^{X1-8{Me(2O712?4U)KS}Q@HPfGx z-u29q>fuPehV7@QbzDhQnwzb2iF^wGbyJjy0g6c?XLq|W&r++pz&!qK3K6g162qI63HcYL`X11fQsFSz+tYY3snro@T4&qN%;1UC(aVJfTrdF$7g| z+zbi#gi(&lpqtNrG@}$GBx-HWR4apu|8w2?9|hchv{4zLqb2f4512P%gaP`wZZVbt z;`EE)@qAWrj+$6WyG?<_=zjup?grmt?SyrpFmPMqQA<+80&6oxw2G}AAFul)K7=F! zCfwLm*JDdHq?lHhz$d!2Edqdv4J}l)FeA45*efgg8or|eV@Qfd?(gt#>k@e9JJnOa`-mC8h%ZY7^~(}z z1XEB%mpJXMS_k;}8X>1`hLtUA7_;?&0R|Z9Ghs4LJ zegzaK#YP9I`gC*Xt2B(Lm)2=@fIq1=;aU2r$t^U>d%!j=F#9vl+Yb-U{dy6^4f?;l zFeTE*A$xudkSGJRok+7_fF^~U%47T3DSi9DSf)&kWGG`3}#;=zqQ6xoq6#^c-(`sSjV&S)plt*2<)?YBb-E zr$t(#kM=TRbSdpKngVK>FUE=t&|WmvrEfE~zG?gEHO@~6TyZgc><%23ODJX&bZgDH z+^|KRZLX~zFU2f5WS4hgD5!Mde#57sBVTm^Cwbp%^3dKtUtz^wzLe-Ln}GhRIDe;% zCMhQr*E2Lw{O$exN^!2$Qj3I0?yh8Wp>2)J8@F0{8@&n)tT;mmk>MtZwM;&MYD@04 zeak~n&5Ae6E^Gedmia#nz4JISKDq5SQRgo|)veirS*M7x5;OT!*AV`t;y-!kQtMs| zQ!ZwSy}~sHHa_MPiR&_f=6zerSw%~J)lwYRk@79Bm??>kV7l3}Bx?}t-QTG;VqoEy zL8_}w-p;SHNJfjM%t5LvI7d2IwjN;7m;P7Y69H zff9WQxoN^hJfXVn-}uMPfb5?@zeNE@(Iq23>G6+MDiFsbq!A93=TU7=+|z%4F@FBu zlL4yT?M;nSzX2QhMafMTp4<82;=~t+%q-)u(2Ci%#J8C(>lV>a{*8V;Z~hk{GPAQX z#(XjF=bgd_(rv(*kY^{UCG{kMBgbw+X&VTP}(UL2YVlQT2?HpgD2uePkM z%?(fg2x;;6aj<+Dc{DfCrd9kEp1Q2jh3BS;k-v^?|5&b?^Ct*Ke{Urgayfe!vULcJ zEXn!vZnP}qqMh$72vE-Q3c1>To?a00$+tCbGkKy9j<=!rSyb0ftyBg+AlPE!a(c!) zO{9le)XHTma}(2kY(QLX_Uj%sN3d!(y4d>8bjOoVmr89RvjjK@_c59}=%Hd*TSnIF2Hb`O2OWIYrs~g0 zRf0_&D8*uWI@Ct>bgBMF@K$m*VKH9x5(OVyMe|u!y0YOcLta1?(F`Yc6==7@hz->< z!Sl$UofhXev@tRMJBOzxcMZMjd>f1Rn(q|5X)g?CkZXI&GCilzYNmNB%)P;|Aj>wP zSB_&jPXjX1@5n<&Qi^shez-LO{nB1j1A8`kEx>{9V zH>R@%dn0|z`YPwwPclBBzHWTjQijUpPG~o z^=0_*S! z9vjqaL7F8a-WK)@O7}{a*9%%4-{pV?BBgt3i7iyi{a^kuH7VdE2IwN!@_Cx}q=KF} z?Jm`l1;+9u&)MAc3@%Aied!m7*?x0B)MW|Qw(w{BBpZe*I_()YMB=ea7z za(2J+-jJ#6&7|WUcjj^S?Vu**-R*IoxBoW^`60Hm`>`_Mi zGN$&39NXs^0lrHLL)IYFsD?{mw)*cei&ulOG&#iFd1HS?EG{`1{z^c#G&rAEtBc;p z9$Uocz~+($w#x`F;hB2+92ZxvV?1wU<#@8!mz}<_ini}t!dzB-5ma-O9AvrL*r#PIYxrxrKPy4Y+*pY>c5S6_L zC|PnTi_0$E^I#4JsH-FV=4p=yQPqqnR~4hDGEPGh!_7=a5nAN(+hmWp5S>(R&lk7W z5+o@=Ii~CCLrRq6DDt*3BsAK#==c8FcPd-AIod@m)eLiXH`@ZYS#Jzvkk+*Ku$N0s zp)462O&XsHwJl6+Rx;dKf{A`UrTeE}C;@4{L;Z3Zg9FQhVVLmysZ0^Qq5|Z;xR63F z??s*tIqJN{3|*S0OzkhNisK?ad+P>Nu4`S3%sort*TaO^FClJ{aSd5X|w@m4{N6%K7&UIEwD%xf*nc>J}`R&Dvsj9_3R7ax8x6F2NnF;KHqB|&LDXie;8nlQrTfuetY>x%RB3+v=hATf9l4}6;EKl2pYK!5 zxPyKp3dg2cT}`@t=d%0n2Df}lH1Ge05-`ZiNwY8=v6`BgubvUUDjSfr4E4L2kO51o zx3D$y!xp}?RVO>mk9dV}n)wD@`P1?4^_Ud9$9ILRTMAfOlx;*h)lRrk0=*B?ERxcb z-XEIYwDGFMVjf3SYZ??wF!w;Y5|frC-~%^pGGLoI&peOKdcfyH<{kRo$*p(GyvB7@ z)oZ`>_#&UMUl$PlM)1|f9=pA{pYkzcPeLpVQ6P7{in%d*rQD=pnc2vo7?GQak{Hu_ zv}o0cWKAI^S;<1}L}3Sy=945eyDIqh8a4Qh6f!c>!yh&xQfV>_P#pe1kpWt9xC4CT z2QWZxtB?bB1}L)>xi>stu{$2cH0C z{lFyT*b`ZPj6eGcxzfwW0A(}%pNxY)AE%z32X5mHrqw~FXp8ihkiBJAz)U0^C=9z@ zPWX^}xFY1BA|nJ&s>HWpmm_QQM`et zH9?4GG<^oB=s5!?k&tX7+x=`P%3%eX#@<-fB2q@%v=m;7HA) zvxD<|c*(#jbH_-iuYbr@zy+ksXxn7N^v6bZ84QCX#(pnLvtpsia90`3=muG z6moFc=U*l@`=4hD{J*64|9>3e0n=26rDyFSVOKh&_k!EiC2e!&gx>g7RsQk&{mVyH z3hgbL7j##j2VV#c(9%h-WqtinN|j=RHqjZ7HP19+fI>7i_%YLtLzVI0TgkdA3Hf$| zB7=95V1ZE{0#n$t$)%OcQb2Q6Eniqwx$h3KuJP6MoLbDn96xR;`VyzKtA2{~=rb#Q zYxB?NT^p(WZ?&Z8SOopSD z4Wi#I3}L*cgM1p*KaG7cV(Wt08{Xu^u{ge|6!b8-aBu9_(a5fWZ#P`fAIZaVd{DJ= z>l98d=!2wpo7Mp@u$n=)_0?)JGFp2)Bb>TLWgi{u$(FF5k1o%hDU!L<4z(u6bloX- zNWvQayfKq$f4c-D8|);$jk556YnTq~l93D$))2^U2Iy?1k^wq|&`Ti4UJTHp)G)Fe zxR&`aKq?SA3p>>la*~1E!!tk&FvuhWbOJ0QD;Nd{5lb^DJSoBNt1v)I@BYOi>dXwV zpGt=zfBf~ouQ^S8JeUVUbVcY;cK<+rK-NHrnq-#*#I-Zr8TJSO9Pm`5V4WSlQ^RfW z?6iyTDc^j=#F1G8EF3ei7cp^(0aBCI6&nN!;yR_r`5oBVMfxkF>}27w4CGX6kbXG> z^u#v=n5Vx=^@M0vz|Igx-u>u&j^0V1VSu8*oe{j`*rUcH%x6eFgeW+=9qh6L{($sw zK1jPLwrKCr;9i;FEA%DaIN{bu-I>9-FUu-%S|?PhDP^qb&v-fJgRkpQRj<=TrEa^D=UOn~z z0U?!`%x*j^7*Z;_a|*`)LOkC5XGTWs2BjF@WYcxieh zR;KPKqBn0#{Iq1FczVe5z@wistk>Ys6*{fNF2yp)X2#CkXLNU~DTiOiRLRudKryclU(F2Nbo#=#EG?oeJ9*NjF96Goh1MRg9c^<@*e@oPu!k z!(4NxM^o8Bt*qH@%GG(~1L2J>C?{5C0%c^C=BJtoo4}Y_KS~ck3uvc7Hx&e7OuhcG z&TXX%nGx9F`a3QP6tWB-+T8a-+)zW3Fk9 zaei8Ee#h!tMkdMtF#{cvnEXP`TxNi-(xj+Cz%?O4DeXI$nWjoMOK3xq;wuzjCnri* zsTz0i6y8RO%(R>BD|22gzJm^PC^^UboD#n~3I^qQb%jWfyEyZ3if20;(|Pj5nRGan_BEq>jbB3M$=YSF zs&X7`?GF3@Z$0Sa)1fS4mN!&e8 z_2?G8N%5=lcn5dr%27v?z#Kj=qGIGkr1ZzOSMiheteA6eZhLuMs5Po~Hn5r()XB71 zk?y*gkz<|^m>wpcIe&azdK~MF)8=-S&Ga$l##*M-R7hjLm^bq9Us<>C~74?L&*SMeW#=aVu} z0{R#rTI*{~oYXd>Z8D)e-sU)F%W{Jtv_)Pfn&r>*B%;JvuMuW7dEl`t06lx?S9tLW z;=V^;!eUHt8x79@yc^XM?kic2**K}l9^J^EX?X$l+j(**Q#S;aV|`0Z5$abx{ni*N zI36I_(rRmKq+V5KdgSZx;o)}_p7=B;%3IUujzxx>25*tfZT(^8%!(Br&1Ugk9_fV{ z+p&_{`!mr-}7JBEmV%|$7K~_YqL1L7Ql$=B@n8iWVfgzg6fi`w#d95njh0=aJ4lrNl5 zHFHrP>+&o0Wf3*In~yFu;fuJ}n1hT)NG&rf?Y7B~6d;6bm~B!a%tVq+uE+L8u(Qx~ zNLWD8SG{`FhBo?p(R;%Hj2`A<6TWc)r6x&gu1j)%E+AbmH#hE+*{){v)yXAbk}!!j zReQ7%IDqq(RmPcGTdfBMHIF6b+uc6Z@7L;5E}d+4Sn=?;33@x-()&6bS?2W&R+%4) zT@Qra#d%FPPS5b2=r$f4~+^UfePfd~^}?;w-yYiDPya1`925 zshOLf4^1=VPBZ%U+iU+c!@<9H8-sMxK6Te}8dc?$6X3F$-c~CsN10N3cFp3=-&P)_ z8%Y4klQss%xavL_CO0{%3+2v*yRaL?*5yR4LEKDLkJq%MWT1|1;~BXbxxNjvyGTCd zw*q)9od`KWO!$1GKBpVgJTzv?hRHlNRvU}w7Y2P`1kI^_sM zGK&O@AU#vLob3>I#~hYZV>+&ykXx6*FFVfB?w+A`RC%f4ZsG6Y>BJQL(r%2PXl!8*UHhFY#E*46mMk zpHRb`F~s-b4b;X{3OZ$QZ_YeR5c!}y*{+RO>;}O-4SV?304;B={ki2&jGDWkb$Fo~ z)Q9~lT-qvWgl&8m@3~_OCELbje-u?9gM0CO5r4Tf^mUEIW@iCVk6J3LX9PcW0MAp$ zx*Ns1ti$%xVs`-_)dLcDV8#lAcd!{cPZP;0^$H!Rp!Ys=#g=oqNtTt968+NFSwcLf z;MM^1@*OVBr}t<<#DJ`A`pS#C*+uD*rDP+jtvLOJ4@(3lCvs*b%MUP?8Iyp zTt>>2%|z?>=4-&1d#o^*@|%_73NGngquNr|2!*wVT_bbn^~dVXJ~JJm$+11VSK+&z zVE*7l21tbFK11Uv`i!_XrtpL=wvkwhu%Zqt_H{e>-tCFFjO4gm>UC1_Y_2Nq(0>Pi zQD1zR4+B$`nKoLYaFh#ll?9b8Wr}A^ZCc}Rt+199)I5LLAc(K+Svd^4@9bpplq6r$ zaJ5pjgC&5lgq^|4`x?rt9zT5}6mH*}jC{N-t-h z4*PaJMaY15`gWvFg-yS zmHM=V&=cJ;TY8d{_2Bb;9wrX?V;Gn0&3$W>k`J>fj@bWrMzu`()K+HPBG5$q+ZTnT z$0g*^PXWyyb)gM{aF)rAqIgVfr>tFJMNn7Xl%S^3 z2ts(^1pCPYHr9{Ra|}>CPSpuwFimr~nhQw`uKH;~fwu6=j7ma|b+-61k0Xy==~xy2 z9DkhlFh`;4y!oq)iuBCp47Jtp%zWm_!PPBdN4+i|=kGql4epuKxE*DC8%Y6#GS7#F z-lS_5=;x*xWzgXDW?7YP!%R!eiNGn$4q-yY5>ulJSvxqXzG|2C#HhQ3jvJI!)N;5+ z&w?n~xsW_aEY1S|yxXR*Tt4r@?(97$0&MLZv+%ngv-LP8>$YyWz{uaLmW^e6-6EmW z`i*`PR#?H{jb7S*QpqZsUp9D0L!iTEMmNKCVBAm97^UYgmBq(wE0EZ0y#jZ<1=EC& zW(sDmB=pF*qF_-@_qyH_EHaq&&vRy|m8|J#aq+T6%C#Wr&brDC>zZBuyACprhnr^dpB`f?tOdFE zbwz~=Ig&DEL!Q3PvN}sDCvnS~&Gw1BI!bFnWLER120(*DZ3}5OR0&FU*W6+hO@jjN zah9HL(_-J=e>YeHPc)|WsYe_6=a!S75{q%2@@@T$&n zggOP1g!5PUoqX6dt}8=gXJvp~sU>WFvKHll*ad3*x!lO2%HhXezL8Bvcrwf{Z$3jy zLP0Ay3FYO2VovR}j`nVXSz)I_GxcRH`&Qoi*jvl%@us*y8)wMyCK8#9F=*+rcsCIO zzU3Vfj^;D_>Z`{MKLK+^Ocef%@FFe($kWB7TMmI2PFD;nZa z@ovTPbHB+%Z9uY-j+DeF(%cOGLN)xkp)BD+d1NP%qLrnvY>6@EE_E%?OZu?IGj!Po z7hPmB5CAYD26bXm1GzsUlqN$)gy8I5kUHyvSXT+PSv08&_Dz*NHp71liFc4VnSKEvus5C3qxphA=Y|A?RIAA6~38>rskb zkffHA_XfMQ^EyN)>hPCfn>H=!Cd7OXLIfu@QoS0gl(`2}&V&!wOOI7au=u4%DVdxK zPF<7za}|NrS3R5IAEvhS+mukC*52cTa;`PozV``oD0x!_w+&K&Y=JEM&B3Q{a_n^P zV{cn!Y&A8_Rme^gWRazX)g>>hsLz+aW#!Do(M(7}LRG@>5p!}zgcQ|wc_i{CHLS}P zhNTaZ7b2I~6ZF(6Kf0ZSI!_RkMH3>wu2bdhQU_1IEbZ`uWYl^C%qb%xhWBQ&oR(2# zsOG+9zMR2{{+;4AHJ&OZmG+8?w0p<*zoWIArlc*!GdMe4}NS;cDqWu zk|%KDq;hT5FHZLcEU;PwfqFqy@Toe@k;3;cI^Za24t zOuLqwAD4*n$%4wzJlVZ2EAch$vb>C)%acZgKDr1sjP%fJk!d@EnX0ku5ee}^Q|&3* zkh$6(K*klU6n|nHedr%&f#9T=7M7Thl=$G^*{Rk*o33%l>HRNMDHpO{`4XU^VYG^sF#u0qCaAClLWkJ8J0@YRNY$8Ktie-HargzG4Ts7}R^%wl1R`Q%e0d^@#> zEhUm7wXC|JmfdY$m4X0=kaX;5TI#L-AtdCsLReQkJWsetb1eGN0&km-?CY}TO(^qi z4NIe`)z!N9-y`xLz-Xg3VGr=I%K4JLfaJH(}F&b{gXNbkev#_Y?t=U0F{$jY|cvS zI=H^E#~7J%6aI|t?maTF)#~2DQC&JYH|FpuN;vuL5H`N9Ku_oEQwt(LEkir9zG2L* zR;;bu&6JEvCnFu3atda$HlT_(YIAG6)zim6^+u6D8%9}|{VoBEQA*gfN-85NtIlE5 zmliHs1>{4mWu_|8EF6Iu9`;sf)=A3*!rA*anNr$i0D7whn3;1_WDm@?3;Z*}@GQ9s zp|Qm0lF-ukN;jy?WebpGrS|%>cxEkJX`~o-%bl{Bbvd(=r)w#Ecc_-cMFDRak(U&v zL|r~CJjj}oKQ%d>h}v*+S%-9YECqll;tGcsHZ4%fl`7EORl{^gEyt`hxI7D>5c>XFl z6@)OZC6^ST38gMH7YaB5p}(X6=_=%C6JBH~>=87LEM1Sd?4(5c5z{6_xxQ~~as}oy z=UiV2xBPXYGNnG$m6)t#My6-Gx%x~*K6sFmXnT~F@eZYVs`n@-x~x}C!AEt4yWU$y zU%V~im&{PojT)RlQ{P2sru=W^uI1tWpNEURZG$?ea4|_|=#63S&B=^VG1=NoqXCQ4 z_bnrTD|OlNx^6{)EKRw>v-)Q7Oap3rM~eBVJk+hF;cDnya=-=e@8_h8KU_!x_uu?t z(dqW#^IOSfD5fKemY#$i{yG=+ZG7_pQbR?_WQG0tIMiB$f2zk75c90=uveI4uNa7! zl<)OSqfDSS2yC-PlD|+X*6M=~DD9d( z&he_7kh6aCuX?lqx-OY1k}gSo_@i?{dvu3(o49qFeDlabrlhqlXVGFzO~=IN?$p7M zv1wE7lxn#D-Ra;{Hn-oV4|3-f91N$P;M7Fa_onye1kyY%PbMbKs5<4>!%A|z%2g+f zfJ^a2a|f8ifz?Ls!=Z@wYm0<5BaGmmnc9D5JVG;f6-!84VKfK0qVnANc*o+&TSO+p zZuDJ;^iG(<*ODTVeeiLcVbxZ_b)bZySyzc%pEEYOuHezLpgN7XO$s-ixYv+F5|JGE zVVg5zY+PRLdJ#YO{eCUW15CtkyK+GqGNz{U&X$4c#_#4zy_wH-y{aAUjC`eZJr>X^;B2?rqa78uIG9&ubp|ZMDc9CSN4+Iq^NF(|BcjUmmhVp+>{Q>#r2 ztiS~J(y~CHRTOH7e_$rTOHUM`MAqnPS1})NV@Da4=vQneSNgQX6qRokNKn}0>eJoa z(8&(n1+o+abOklH80YY4;y{5aNURR@C6w503&ew;jN7DnyBU(Rcli}dwVDQ&p*76 zEf1G-rfFbeAxBL=&hsd{R(6|@KW$;_@r|Fq!ul@`UQcbp$yjO0o)e$(!+$rXT^M*zA6X?AK%9>C4t zaiRUSx=339lS?~9k)!Q@^X)-^MJr^5ec#z7cJ!Ov^9RBO!Z>z_Z7|dHOXBAb%LmW`J&LH8MaNu{2qw*un!P7o6Kl^(Blb$-2 ztE+&6szM3sDG(jU*8=NdYK)#9mof@Yh!<(;Bh6t1;e<^C+#6=;FyXnG?> zykEca_bfTvfJhtcg+IwpJwH`Hbk{C_rTK05Z-t6eB2hn=z^#A%)h)GM?5FRcQvBzs zYjM;@ZPF7bM*|3-jb~h4F+LWMda;I<5}<)_=;+HC)XMbnME9WlS-tnHCdAc$+=?r9?fyV!U9R8#=e39XB<} z3+NQc@@T~Kqjz+-NayDX3Y8Z}p3F@sz@CNnI_DgC)t%zlsQz?AJEi}?#Up3-&YhJ0 z=ZOTSAtWbV5?)1s)FHR~$n!=m^@)IXT9pAxd9J6}kTt;ky%J59ibiOZQQ>pWO)7SA zN@_Zork{CEEp8erT7#;dpZ|j}^Vj}%$i05j4Xy#Vn6zB;gdURI1a63@cvmpM98V~B zD2Nc_uql8j5_+mU;!~8TOeE(~rg_-e?-k#6eNXc6JNBnmgQrUCW7M=CA9)1M4u?mhfQdYvcnN4cherq1@h_x)-U5de?md-b#+rw7g2}y|_C4rRHgt z7-th#_pmWSn{Mq%Wt4I{%Gr9I2)x}tZ}-;ad-zw_+QIzzpYTYz+q9KI-v>Ir3GFd` zFrxCQ!0Dp-XwpVmh^#64z73e|0=E2A)~~L)Y^H!*g5p;#RI_X3kzYHjrAaAfE_)s9 zGEracc5AaIM}XjFoe_LZ1#H^;B-6sv`}S~IWORYzm|x}ZH#X>^Egck%cva-jU!27Y z$*GHoo0P3Dbqz1>KewDCBB0nP%kHscmd}pMTnXL!T7Bnr6X2z*{NB}xZCkzE($*;c z`kkWG&eAz+VW7(Q84th>LQzrR86YB8(>N`RBGn6FL5P%Z;05U1TLpUA-4Se&y`23xc==LECP^q2 zOoe=;O9G{LhA8)p(o*xZc(@?}M=HERHCHPYxFziW)8jY7RUB_@PCj}6PS$kn<1!6( zb~Ct`o4K#I46<_sCWL?h?_L5)$M0i#4!THCY0av*=5-~BcH33y|<IZO0#DhlGBn!0f93{4kUFXXH2;M{_+>hdg` zoF^2BQ;`7cp_HJ?=HIO+K3<<_9ABlhTZd#1FIglUnVWEPuA2mveRc79p>|e)4#Rlz z#Vp@zq-e%UqW)aoTzUNo`TZ>AP~68wFGkm1(!b4{spJkQ_~RG~sqGMVX9N2>uV0h{M0Jvw|=@eTtm2(p#M_4DxV?N%7rdB2_u z@hr2VaJ000TVhd1TB>$a)0#FBav0?*xS=VXVC91HN?TZbhPO5uI%8|ShRlwPl z*eNPZ%_iB9k&%Z&8Iv@m4|)HisN5*ca``mQKUmDoCnx|-g(>mFQ%$B+jUjm-pSJei zl|fso5L|=g>58F>KcNNp?J_7KWNuzLuFEZOEw_L-FPad70k{$AvNy3YMcN@An-5ix zuJVtTqstvwvsJ4<#|`I&r(1cEWW!`F5K~huJgye4roz~!32SeI2ljdzXN`(&A|9uf za9jj49Aim!J(ZY;r$I#FKj@skmqw|mZfyX*Q=(jgT&3xe$MwU?x6+N?!^_PC3oxZf zH?J4rpN_Ix!iQVobD5Q|OSq!N60+HW60!d20flBL%gz*e#DhM3p;rASiK{BP@{5c) zv{E!;IPVtDs0V79%zvt7l`u;vu9d}GmdmT#m4$iJ$64nlqXk2zw7IueAc4i9XvKRy zb?;Y?Gj94reAyEl^i*de1k6ss@HwTp7ffAo5w{k-ED)Ad|FcM6aseP}br@H``Rf|9 zecE8}nNRj1oMff*5w^nBe-PLL&I3xQt|7rYMp(tyL_~ufO!Kh&v+XO~n?b2){p{uT zx?29OWP7Npux02IFI$ZHvg2}Gt@%l8o%e;dMecI5t9xYwZ8rvmq}xovuU#E67xI7m zT5b9?Bm7k&CInd#c-l9&cvUxyxSLSOuFFKZ+hyToLUf52y+X#PB2?54%k7;9iM#xs z&-AX1J!-3c=26!Yu3_HblW64Rh(gtI}y(u5|h=U#tIEY_C!zEhgwy;4@Qe)P8|r z-Sv*ZG{aXj6ynK$oz!C*#tR`mqNsf&WQ?fhIB?R~nyL)s$H=lWF_nuZ-$*q=hTf`5 zRc+U}M`4SpDgn+<0LP69**4>IUeECEY4g9;bkt`IY{2hEyZFlv<7;nFX%hDv6^Cv# zCv=@6^jzf!I4n9R^PwsjOZiWMbCk49qaYdUJn@RWU`^#W?6^t7bc!5T6fXLxW4J`X z_0b%Tzqi<3*qM*Vq~tx$Q$}iX55gBgfO(|u9^jt$F4k%ERdK;P*w0Z>MB8{>|8Tcc z-Os=|jzu@B zaMuJ6; zdpqPt^)5Pk7cfEC4#M5~p1)WqDx0swcrEFeB?vY~AG8~HzZeGi&`Z=^5Rn;PsQ!O`%?1?Iid={%M zQt6V{L%BOQSFuA59I{{EAevpv^v8Qh)1o|b)-lWjsZ&Jl?}2HnRyLBs%d+X2+G=vc zdE!$jPm2eKRe;>~iYoi7LY59;+An0>LRBf;hnV`RO=HRAr_*jeUR&78!fT?hecH2UuZ9=SuQ5`*$ksAbfFQQ#y- zk~c{fTCogO$v5g{vGA4+krKy+8f}KS-Wcs9exSlk12Hiu(1EPt|9`rN&<5OBc=v5EcJ`@JjiVHo6vP96<%!5LZh~nH=Qre9Hz3tqw8BuOwTIA+gcNA?^kJjT+{S) z6+v8OSNbS_3uf*8J4F-GkR@8c^Kkt%L_0nCz9N_#V~Ue9<1@&7Av$119PCW!e+mZ# z8s}}SGj;V5rq`yTh=K=Y4>1EvxgIK-)hkH?oT2^nzMuoTTzb41(F)+{nvEX;fG2mgvpMU(ZSpt2lh{KP$2wH_qa*yq=Ac*_bBLrO1 zU`_dpahB~L#eCX)T(`Om@dg+S*L5gYTIsv>Qdev}kEzOPkRQraR>znLYM0Z5 zkvWAn*)4rbPrZ&7%mg@vzoni~-T}Rx3IAfMC3)*NDumohZ0vGjbMkIn5T-gU zfte86+B+t6hEfKQvAG$*cY)F)39u5qxUox8S8v;d|}T zD_s>~U&oOfeUGh|O?mdOJeCrIaa*CFY5^f@&c$8^L0HX+q)oMpTEkEo0NmbL5MsZg zD8wFJvLXxj=Keqq@*3(&)|^PpO-ORgdzhD#Wcj?t_-=cp%G3YcUD?FP{Q*su!q+r? z+dDiTtp9KqOIeC^J`@!k_8CJvXR%al*kD-dz7?sXO^dCF;*-7}yg7|QDRwk*32A7Z zRgsi{98QAoapU^ffz|6i)xf*5@~@%E!f^!I^`&yTtHg{y4U4LFS`MU1r8b0zGF|mo z^kp-AEJ`zqY%=Yd0Jy(h*n?TKKjc0wkSkvcASg6VIg^Oet${CX%UQQ;3VD z`Z^sM7Mrv2%01#(p6w7+jmKt%oN4uHbw_*djV_rx4PKwDD)NxVJ)f$A1A zKn3UMHIMKIfUztp9zb@5xE1-F^`sZ%{f&Bk|r};a?C#rPCh+mOC1=nIv zAp|=AScuEdTiNtSwn*2p0|$Xa1=IcQ2Bn}d0FagQT0Om$U*9tc0J3mEnyk(j-S%wG z_&RWOi)z{5i2U(@i{$uPazVXca`9AM$-NvnQ+AHnuD##0ATp|JPh3aV=|V{1e9*r^ z-&K9Dqa84;I`ZEjU&PT&!=hJB^DF<_1qX+(ckzQwuVk#3&8ZH3UUhp4mbCy_ubT_} zFPadLqj8#QPVZOM90urd&-VYp-g|~MxrO=Os9O;cPQ3vLSaxnM@i!SSbuTlLk zbN@VqfsTzy`uoB)*wMj_C~JE9jm3$*d@UmGD^Ks`~^AsIP4 zpHHxCpWz#;QcVywdq*{0$ZpX{ahx%{Tde;m-ePUO%`2o=;eJeNjEjg(gJncwu$^|# zuD$yS*P5}r(VY{KCvtCM%v7*4xP=o{SQ%sgF){H;nKb=t#OKyIEzA39GLT2loo(v8 zL3XpXX}N3DvVjlh@T4mIaz1Kd3NT`gKfzPEey}XVf#4*{H~?xzUZ^oJ6HuS51JQ{C zxHqg00l(wm50(=^$tfDh=vboYZ+@_B3^6Buu#}+a!0q^@gDJeiEZt)|=QBBeM!z2{ z%wixg0xvLIz^DZ9C%^;SA1sYgbhSx_H!GHarqJRCIqJ-HU;A5wu4qu@%<9Eve{l8)1`fPQtB@`8asRAh2Ha~|q5hYwkI&u0+Bx^`#l7%6M% z`LNBF{JIbL0mBO{3&|+Lv98_x+R)mK)fsroZsu-q zmC5f~MYFrODq_Q!PBReKJPg$q;$2r2`+2L^f@*QV=MBXF^&)5dZ77od9~m%Uj{c`i zaveab#vIAxrJAAPMJvLgJrUJ#@mt-X}7Yx^q=+~S&WG- z57x>*;Yv|;kM0EpVn1_=}&Ez zV2OFg0*9DQuw40`*h2eHJ;t$5pv!beS_q|(X#9>k8=Ya4;nLJtJEU5EO~sxn5RF^O zmbGbZOS(b9NBXb1c0L*WEHAL89lq3LJr;Pi&eyq4R!)Dj(k1PCdkl=XONWFIV%SQ_G_p`c}z1Rj8x|GRis*r*)ElcaBgudf$`69Yp;FT z3rwGIxVPdR6cQ8whk5Z%s`T*qwgmE7`C}ZEva*WzQ|CoJ);vNC`OGuxlathlF!`HL zY@fAVr1|v>&I{77$$v?hgNm#`xqv$nN5GXNbw1%}%k?YK_q5MmQ;*{>^|;sMTrMkc z#GoU$bUXqI>qSjlrAbda&c>P&o9fbE2kyN7q`lJPa6G@0gPoK!;>^X1i2E=jOP$E( zCaT!0Kb;$Zf5I0uODy$JwM1*iK~H3o;3=PdBguv_`ID=P2t9%c7RNmqnWWU zR>*5+=dRnNOS_J6%|BwRL~p|K>)hi3ZMXR-eGB|q&Rec!sqh)B_)xy(QE+YlMVLgg zY4(f}ThKlGn0uzkh%m1H6`M+jFKrW*#yK^<@veg!3U*!gQsq&hl?z4A+p=|z$ZahkPt>L^9SAS0ulEpcn+8mZquqwmgyGJ3^)N>wp*2-yE(Rb@W|{;sdLkCwvV#^yo3-= z0ydbg{1!P8SG%$a4JK>k9i@%?%_%;$$>(`M-3|A0JLF;xY70C+KDfGnc$5ARF!?dn z2?$%8dRnIIy!)nrPQZ(1y;7A%Ro0K0x*C}^eit2XQ=ycSP zm47hU5#{)<3Eu!^GxsZTH1UH44Lfo`thP>7+U8x(6Xs%Q=;*Cps_F|n%M5|REwrxm2EjB(dG$LTfWo0RcM&{luTu{l({GIOZDDJs6jggnhf zHpQ+9%@05!WB#*K{qt3&oUIf$qzO50@D$>~%KUb|Pyy{IP*PR{N9?(XjBKwRNAeQ# zBX!N{Z$Md1sj_-wxP2vz$=Mx@OiHP6zmSb-4_i!fx@aC6%Nl#o-hRk>yYz?W5>p5p4ibPdB__Acx$Tg*smQE;1n~|vZ8G{B_1c(rP#n@&SIaek zHG19c-KhVxieck3c>yzA!`rvvW(eVv_V>B*VMk-J`eqhcm6q8A?L;$g>A{z38Ucb` z7~W)?hLT~5WCC4_dSTC!QGjH}fY${4DiCqImFDLVA#4$8_my^>2bbE zIlv*LEs__zLXHdGaH`HS;SupAd|Ky*emlrRmwfgLBw#y%ZDIBYumC(vl)(I={evY5 zq~ejsPm%20hj!tQ)7>fMmUR>tEZT|YbxV(`^P!gt5M6JMEc4PmccC< zM_Q1r*zF8Q3b;Ei=`#UJ5au%8(%+oP$+1@BjbP ze)jlIE6;mv*4Q1d1U*LpL#Y@;aCsW+PU*Nq%^eGqp*^0HmYCpar+6{Bnf)Wy4t6ox zqL2uO$97Ks@aWx|611Fwumwgq*~iM={(2cF`WxD0<|_o-&mWdbmu?#lP>js`rIoS~ zK{uI8;D(?2j-SfoqNc}{(L%by1aOJnf+^Rj{7IGHtm-CIYa>4~sur3wY?X`v{b9i` zRH#!3?>T2-vopDxn}+GRxBApBR#HAG@1>@HTAN-83VE}!V)2hlu`vE5cn;Hcf>Dq6 z5G86QPP z0(yK}$GmO4y3&6@eZq@)xHMm1hika8Wv*I@JInhd&xD(gm*2OnD1OBT6NQ1a!n)IB zEc{9#KEv_%s)(p$hlWDax+uXuU%+k-h<6?92OGEXQ&GP%KaS#YYHTT%hX5-&wgZ0p zMdjYq{GmhFr58eC0vCQWyeM!-@M#vk4KH$!(F(t7tdN8&U3J1pNSfpxg53Ix8s!6q zzlXf&m)`8jJp59g)whz>#|X*4%p7!4q3IoR=u^c=QQb)_G%Ts7lYJw4Mq$jA)`Lch zQYYj6p@fk3Ck+Wm>uS0(RjnbR?Ht0e^W_Sj+drS3mOk6zqC`2-DuGSh9^|)Q2_o1e zmD9{^hILIUkjSxe-#!jw-STN)qtQTP;}Or{rY5gxTz!y9aZZ6=mP%ZFPJUZ~v`lHA z8CRB4P*z-BA>-?4?PC}o!#w?+DM{M~c4AB^x(9``oCIg3m3P|!iG(9VFb+z59{1>uw?+p@I-?VC zMm0ZbY`dE-VSX&W)W~7ya*pS)pEo`**KQ0`SnC^atI|reVZD(jmK9KXC7VG=UdVc8 zmptK9mY10~CM+i6h`|juhR8Jq4t|zcB~fQG@yPQXut_ z0w8Yj&qcSTfLBFz{$Qaup&8y}(E~x$LNP0lCJsLY1A<%-Y7+xx-UgHMm}gK!XdpiU z%vK;PvYfQj5RLi{oK@3bi83E>tFA+k!4s2bMn6`9tAdOK5o|w4= ze2OTh!*8hlSK$A<|KmX=8j%u2>Ukb^8t{6#N44lM^2uDMkxJCPXn)`ky%rkyca?9M z)07h=5c)cNkbxiLf;q!|4pxeqK(W zu$hImHs-V z%*eN9eY0rE^v=v)B|KDavLoEEOG~(P9AbFvs7Z`Pn;7WZdG;6Dmz6#kU6jX(!Y*r(+2st6JNhj6d{odm z`#dX63t%`87@IcvI_CtM5smDtO*!eDZgKCe;=hllJoCznFPfI_g1L)K>3Cp7jIW)q zL%G@`Z%}XxxtOUj$d_pc;+O1T>yK}Q$VRu7c_GEO;vR+;RV`?W|28?Lr_^qCCLd?< zE#9qPc1E`sWP5*ne07!3cyL~K_6Q_bJ8$Odx99G?;``_UJ_UPwHN?!Pf$~bBOe*pv zWu?uB5geiDi=n*k8+pHwu}N1;x;2u`}8in@M#gLv{e!jVHx?KDVP zb*cB>IZRw(4d_#$)#3sOe)q|j?KMHo&0Y{9cgJJo`L{vS(dRNHU55tKb|Gf=A(ahb zCWBe(#uHvMh9lYnYe8glA-X5+SO|$!Y==BTdrHBRb_Ns0$0{K@3<0uY5BMnkHZ5~b z6QC;9z^)`S&y0f3=#xT&jM8kpavkSIBWxD?v?|%B9``Fo=&H+`Z3q~*TAQRT$!I#% zd5qUt@3$7!oTFsiA}!mmRGB?X&@P=Gk6-Bb&XMjrlb#!#Yj*$8G7uCAcsN+Q^Hdnw zs~`nL;^L)B#AHXF5^(;F9yNb9pqhUik$q>f#gYQ(CHs5hTi25)2V3od4lNmf>;*Md zwlcaFCo>C@l6Z`BbzM6?gw$cp;&V2+8s8e zii#QM_A9;95+&_v_^O^m7Ee*p7QITZCTPq>#*1QFf3;WJ@dnC5b@W<596UU#N4pis z1+5`XidNeAaN}W9=RantYJW;7^_bX`_i3=PbQHTZp^5mx;#nasCUkv~o#Ujn-=oKf zBsV^bc8ijEFF(ybWzSt$YH4-Fp?HeR>=Z_pFf*FwXJ6{rn)QGcU=BZ&H+i3t&p#E$ ziRkGHJHp7VpkG2!!^u#(_zdTpJPxi-&~YG5vVnthARLt7p=3aR_=GjueWJy(NCF$N zy)(MKIdiUcJV1wt;=1m?xnK>%P=fSmd0YVhg(wNb;2RZ7QlFlb82CMlmk4gghZ;zK zeEb^Id-zA`_~M-?xx6w+tSKXvQ2S(g$4EBkTPGJyD!(1df^?t?bcFQkim63CWnKhu zPWcJ&jW$K3){t%poDAmECuM?&b3j>yP+u;P(Bem`=D^3*mFOCk!Ft`p+-s@47X*e| zEI<*eD$a7~v$kBP`U#Q%5ov6sVgS-I(Lc+TshsB>})$H7lF z*!gq8APt&Rng56wEq;fd-_ZgvPDqlon z&mjoXZq;R3vMgh@G+ve_T!RrD%hg7{tk&%2sbsT?%~d1#aA$?zKRmHyTy%d{&Q&@j z*}~{ucb~WT*POKZulr6FK>!6gqlfDZbLavODq4juHy(D5CTV#Se%yr?)!C$ut)gM% z1Fm`9noz_H`FngbWi^gnF^c?lqp@}TO6#P5r7_{aS>;whh(o_nWF$AX)_UHd2o+tx zW}vzu*e9E1vzS&imYjf=!VSpxmYYH{B(+mBI+J0BHz%rts+v5;s|E{Y<#4p<~ zBfqnx|H|_I$Y&PTUr!*8mS;K7&h!i@E}}v}b2QxP6#6x)umL9X-ss}Rz{FYGH+eP( z;ug)KccG87v2xAa8J_S3;jypGubBrAPy%vj!NRIBn90Ived%60ntw170I_`h3$1$# zxI<<+Rl(u;ruU3pXWO!3ER6t4hxG>b0|(&o8X8KE;| z2$u6^7P8g`)~Mj^rtpAh>gDd{x?-=ZfkCw^Gtc0{4|VjI?QjmF!{>)_tHQRs!K0U+ z^?x)%C#}|pxHfEwrSaVJF=Q#7`<4N&^VwYd=Dm})pB+~1$KJfr2))u=ZkQ z|9y7}n8Yuugz)gUjxC05WiqO0H4Cdnv-(=d;<#~nLiV(H9igWLNO8GSBx73S4{y*6 z0Gbo^p*DO?7Tr;ZSPn5^2uJklJJg^mnjC6PRII;=JeOJW z<~PW=E?XJ>SISO)%)=J70wS&7gaS`!(ZbTA?M%dnu7euMr2*7?8^)^3cfVVwY5QV~ zar)L{ST-%=UV{pl>4#5dIc1RSh=Y>Ruh*)Z&{8+dlpe<~3=9a?PgfKbt`Zk~<2;!b z4Vm`LP7ttgUZe%}WEpmWW3%pqxM@pd6oKbgN3eknJx54i_{irNF4zXG=R7qYWvz;WEm8T6|dpnm+}x zYR!-f#n%**WK@e7xl*Nl0wtIMkk>(Hj+4{H{n9cs7skG8f4|#cKYz~U?O4-F%Vzut zkX>bpd`F!O6GcEt4Utti$2InLhY$b|IMaA7_fw@VN5JDim)#Kd^U9x$ko_5VhU^@# zybtgH!mh#hWTewN7gkbgg&fdV<6TXUX$0juD&1GU!JVpCBqi>6H(_5-BO{$_mH=M7$LWz~ zX}3y(^g95<>#00$&8fMJs8t$X=^~a}Ug|+Txp7!ltM#y~cvJ3iU z@;fBnUFe39v9g(R+_+}1eQ{H9ZstKJN%%#oB%hR-f=Ybayi~7)g;yDD3cUd4MRH7f z!zD7>*EmbDw^oM;gMNHC#v9$}`ynYDb<5a=#JkEmM-Kl;O<3DRRK&JinGr~@R0p8q z62dPxeQe@0ZtkaqLZ;~oI+03uN~M1b_8Ly{xtcnE>J7B-%a^<2^$>{ZPVDq1&`Ig< zEbM@uzk#;R!T)ij?f-{a&;Ls_^De67??^|IDCY`Oj>?(B$R`1(evc-gu~7dHmWHK_ zzpBQAF`>zEF` zIr?&%i`oU#%qycEp1JuOB|i9dDYK{y9|Y1-iV4;KhD{IsD{mh4--{yu zUtn(cIFjH`2|MGf$~1KE$oA(<7cUQ=yG?CPN1k34Bec}-exEzDl{r0hcA@j@y4gns z{pg(0E{8PL4XIKCYe|47{^ef2y`OrMWYY$~^p*(#n2++Olwf^lbKGsPdzRs;7k@t8 z_R)F0t+JsZa%6_x(PKg+#N%GWGhV@41rhw3=<8D!eAb4)9S^Xd0X`zWUttWS6&KmjrK1ve9KvH&G4RPf%S$M1X99nBi!Zu4V9p4UVr9 zdoMr3UOI{Kh9EYwW;RVSFS}i~jU2BWBe~|;aMm!=?LWL)4G_Giq*m}0YawkJztmS5 ze7(H1FF9B3WUpEu#=<<1#OJQ4-R5STnsZ6Ox>CeEU1JsF9uj0covo?(7uAVF}1;$+fHSEsLJtweIKd;#Nli!5ENv9rD0rZ5deX z@-G4?pJrzVAbrji36RI3blKUcu%i?%`lVR{pw0`YDYnm?`KcCs9eqHS*MS}q!eH@jpH zomVSSq}=Mi$Otiimm8x0N0s>Rs1pDGPxDVdSb%(19X3MOvIOWG)t&#Ci>-ePUsSF(nG^Zhlr@Lt)Bnfr%VbPgf%)t$SE7*j2)cEXrN*!w?N>;-n~3*3uZYI2?>8=l{4@s*wyiFgTe<6EOF3g;>6^nbTDwwkuL zPIVbMXq9;t!QWJ>ZKMj==(pX~m3;uqVBzY%mF3!?1Rx~&IxDu>hxY|J{dqXnohK`H z;iK@UbEOtvvLr(GNz~M)$*Qy?>$%EFV6zdKTz2anv2!-F$ z#mlT=6VFPGALmGf7%MC<)lH_Ds*b)jRUP>)AkFnJ+v#sB`2_Ub@r?L&`jHZ((WvSm z_h&>I_}FxDc`+^AWcVUAbpw9BA%fV|6#3GdBvv&&7zFcnD^{qV3~C6n*-fu8_0xT< zA=`7;+R)eQUpF9x00<)Yd%WeW*pqwX@#M$zY5dL2}1iX+_h` zp( z(xn^_9r@%R^JYNGk39TPF#fDuSb@HUg;ZGAeZ|T=PH)|?;dc&L6{{&1{0CUVk!^#_ zLjy#Q*{DAQJgCBW);-eyDNE`~7QB|&!9i;O^tSLx!Df7s5^+i;%a zmHlg;W2OB9Qr)^+z6BQ3l!%30qOH2AcC!OeQaWs?7#lg(?e_6Ey9hiBE2ao z-D?4dHxME&lut=iF~i|7B_s#UkF{$>l*0j0W&Vqk7l;i=$8G{ZD~k!mZAB3ylCy{f zZc?7kalSsTgmz;FOf#ut&o$s}-Kr^-sgr>U7~e_o=s6o%SaS*hL8fWfwWh%|$G*lv ztSSY}X)oYbgD;Vf-KcxrPi))e;pBkSCiC`YZ^zy6C}$|m4WDp9rk>hXtr63r=tl`B zyLTy0&62n}tJ#tIl(tHML~2jlvj9>B{lS>;d9p205Gq%AixwGr7Ga~vP8qW+ds!gU zpZ+XeF`p54R?WtVf_O6iCRO%E-MQCgeOoQA>KN_EIix>uOdLIt+~1*{9_K|_oZXGZ zFZC=hrp^k?3n>QDk@dTI#r)ZK6)WJku(Rm^?N+AEVzhYFF zA}wKFddHhTl&4D|10+3!bS(2y^eYRGEv)h4i!ORI+xQh7y&Kz5SF$j8%C{7fYpp*V zA)Hp`qM(ALlq>}8t&K7-ESOIv^#C3<6bU>v*qx_7#JozYzC{gMb)OUE`rJ>#pF+%& zi?X6R86us8M0BYK^CF&-ms$1sd)uXPe+Jy_5FA6dSC1?3yhB)4>96X770q6&n=AX2 zrYm7w(`jn1i2a3hDvekvR6j3irfiW7vB}wYmDL#VXjZ^W?B$6C&nYI@f{xPoU{rOu zsE2wz^bOGGW2*r1s}plDzs|gSP=E%V_r^v9c)??9?K>!OfQzB2dt2K9k!w@xWi-f-aL?Oa#AGN z{S`bZrcHTmD?bdNr0!`))HSGQgz~)^moUFtHXclt zCINylml5S%O8abH{G}_cMP)@r=#NC>Y;R|cS<2zd>E$Npm$csbHN$z)XJ8L59;E&l zNE|8GOgmmd*BEAs_oHE8&ZH>N1vx_4Q$UKy85%{J?Z`F(i|SGN+LS&c_Buf#jg$Ut z*-UOW9GHFO0&ktS8!wg5<#L`(fU_qTJAhp87j{bZK|}a4`Z8tJ6MXhveOxoX&tHdc z=r+FxvC=ve*Vw&E5dEEP@`DQpfc;${ZSJ~i{u$VxWg%2Zf%#--)J>{HROg^T zZ$(1_j!VcHk%;1%*25wiI?)~J8h3#5h+~3=>8NVRtINawD~=&LnyTNPzK%%0F1t4; zRpxKCAXGN4)rv0px;T=v&tn;65$|AOB;oZWLG&nMwj=-SMgW)~@V?2CIUJ=%|zzsj|RG;rc+tX+t&@ra~8PT1fAq6O8+)8nk){8snourt1E2aa@%KVLJALU-&j1C=-V%+W1M+&qXbqc3reth|f+F)eeb1+ZK zxqKcH#d#xvy&_X`U|ivw6NW>5s3&Se_MxR33g*Gv=^N zbrL&V4x6?NxN9^}((Od8O@`9};}!-V72vDd8-PDX_jt!hzyQL$$0Z)jNhr>BD^f6m z4VZIp?!}eNo1`~qVRk%`iT=sJklG&fRZ5MfLgklwyDTj%pf6^HkziU^^)yl$H5CVF z)6OFK3aVER4{cyNDZ>@qDE^Iaevby|%} zXxflm9d+Sovh!VC#S?gv;%cjeH6}WxU`NUt%NNmOKuhuxj#ciwixIXcy|Q2skShFo z8T3T8bzNTuS>wKB-k!ZC0HML=OlCQ0(9X7#!&fL(ag0o$a3cgHJEB@_Ng;NtcVkpV z&w3op1eln*nn*p|Agnv01N?Y19z#9PnDg!Q)LicxE+VmwCW!~b#^TYCQ?sY%6@!f) zu20W21T}LqasRXems7%-ryS?tM6cba3_#3s7vR|dL@9t*0X~MMrYBKFx(NiN>KKz3 z;bl(|jp=@g{ZN9)|CJVA;u+>1LpLfma(zvhLOeNO>PM2vn%iK1nKx*&%IB1|HN zU5^5PUiU{GP^yEQHH`S1%CPyk$99<2nWpd73W-TH*{w0mMLJCOr}U<9<&uB-`m%d9o*OO!tmC*FD-kChO_N*hf{G(SAd zH(|W;=UN@mIkdc!sRn*y4+vi>)G58EW<&G9xC)5Skz}REi@*3D2{;lhRNcQqtyd&Y z?D7_VKXl0J>VrW18)x4xIyIRjPdZCmO&u`8=XWpMhZXYcWT+(c;IAf|K$)9}yo%@tTMkSe370F3} zT5h?GpMr{Qar1^#L`ZuHx?ng7Xqa%GX8#%izcqpC-7q(MvX>+ewi!jLkTa1cq&1xe_UuKWdsY<9zSOj8h;(L52N)L}UN4GPf>A`pgmmDim=`HM9ipN?)40sZ zNgrlMg-09TI8I)9Yqf!Z8AuzPs`h@Jy}Fk^$^Z7@=K96-D+oeCSIe&pyjvXSl2NPg zFvQ`lBod4A0E|hma_3^tt-0OU0?7AgpV8kpD})Z6S%5X; zAM+qDdh#@yz8$+#TkW7KHC73Z%sH2}yjJVt8)!1{>fYls?WC1^GDzOL(sd1i4*J1; zHnCft-`Z!Ssa36H;#5_DNN@U7X4^nYyYO;V0ey})`6$t1H(4z2 zVr}Om>-u$@)%N)sL8DU3oE#pfu_7W?Vyfrcw>~ANvXGWy zn*d1Enuas;1$sp}TlAk93g4Pf_vK z7)FIiF!XfTgMZmFu#$V;qXA2G|5A0mhFZJ0q$?~i&OPKjsF766c4w?;zC&{IJiEk4M!lWHb?jzP9Y6?^ix({TeD(5H=YmPR;MG{#~Q(g0NfEd(mi7cBZHkjcjT> zE`}ctyG-|^D0U9UQ2_|O;+58hPVDg&2ewP|8%bm9SRaEo6HpE;6Jk{1g{uaG{Pk1r`sWmSe(r~FG-TVD0g zd=zgB;4DEJRkd`qTUS@08R)b#=(uJ5EMB%(XHBLLols{1 zldp1vC?(kbQWqC5QMDOs3?ClMnwYX92js^ctfJnWD4-s8YELv{Xvthtr{M8TyUlZs zC{71j;-p})b?COJCWuD$%yi`J|7PDoFH`%bI`xT)1DpLdN)+AV9p zp_F>zw>Lu(wi>Jg(YqGb=Oly^)f-ge(O0S;WEy834AvT{tueA7t(9w9wrW{v%~Qp{ zAqIEb$6DBesooiL-{O(BbD~p(!FGWkEP_89bhVq#t+34v(e>t_rx5FhHk8Q-H2>ZV z(B?+tH6k+9t4sTa0|IL93^tc1^-tScwl+|<1NvTRn=C>jh~Re0xgtc=n@px zuIjT$jh2*l6+46|nKdFzh&I7I1pJJ-$9n3SVpJ;^p_H{YF50zn&l&`=2P8}((wdFK zJF|*&(t{R56wh8Kg^G0Xe7Dq78|hQxdqk;dEewEJRhr6OgN;1G4q|TKytf3&VdON5 z$(Czs^_{x)xc$=|%e`u;zQ;JNcLa?QYwm?OWL3X)S#!eRbuex{vzR&L!b{^K<4+K1 z4tr$QcqHKHIyT@(m3=RaPYf_MX}=&uwT&B$QA>)@<}59t<#(XCT1;Nkz?LnP!PpIK zx{U@?xSpzzK+49SwR6ltyjE-Q>+3-sY9Sv{xz>)5G-%=A_5oYc;IjZ# z1B&%VG|~j^`D7y&GHE$cmvHaEnbjcCf5^Y2s_vDKzijq2#A`apc&xG-GpZV8i>s)4 z<#AUgM8ZX_%#ap)Ce_u98eiCm@qsy*8c7k9P|1Q&rnP#jaN0{z=Z`Zlv?(~8?X3rLR+NaBd z?y-8T?S!x`zzg+7YIgbdgqnQ&Yor5fhSK36Qx?N6Ac;oUt>SRFNFj`d=m zVPxydleem9L3&L*9vOtB=l~J>01*#K0M6KYq!IUS`@+4pn$uQSHMJ4Z_wL5-zClbZ zK=`a5)FuHBT`x4Ey5GiG^D=ALk=;YU6?P%i2T>;g2 zR~rPBD){_zql-LR&@M=LQ~JCFZJvExQroE1!E_=GHRAxo4FWPv(ZK%DTh>JwVy=o|QbA3@E?CHrlNC zLWI2~R>rE_%G%lZQA%n>s%LlVmvdj2`Y_V(pk)alJ;Gs`@^kTd0l`mBUxwqn34O*2 z18ufa_0VYZQiU08!DwTmKdNV@Y>Zk z?PlnJeMXvu$&_yo@le=@8*OYpH4>Xs@Fdo0bP-=+)Ya9_m6cVrurZsi+fwgDuYt8I z-YO!D>ohYIfg80giV{WIi(rZ%JXfsShFT=B_1VSKD=AZ7mPg-D+6uUBxWB0xmHV3U zu`z(-ctq{Ple(?%-#)gjl+(AL$cjxf{5WQ5Vb<%Bow79kR%l=>r+76dce;Mx8UJzn zROgF-qNyGIU0X_5y6TG_N2fZEu4UJ`VNJtJ^J&M>N6$PHvOAmPu$0JiR;t?qd!;Bf z^_(BXnTHY#clvw8eoo}E;AaIM|d^3gU)FCc#=8pn&y3mUr zIUnq^s(9>& zK5JJwD+WmYP-I00JAU702*2;{g!-#O2vF~i(`T7a!JG3y#k4=1$qTf%fF1~-HY*EI zo1cl*{_d!Wkq@rGQ0l)*}u28apxZJ;)| z;CKW0mMi+#s|^0@HI9dJBLG?0S;{(|Ja}daJjE2HbOZL_D5syYu%O-ZqWU%Ykg*L= zziaTnk4y8q@-f@!F3zWaZ%hNV$7_CZV8ojwm+Z|1v@Nycz%b5HW&v>$;Doga1o*#Q3iP3<67VdFUa1GzH=_>l*+9unlIe68wg0~)Q3R9B=>Sl$An0cY z7+{vkeP{N3M12SFRI+z~DOT|JYa9U3%FkCj1MM1Ob#oO>W6~DuBNs)=S6&P99KBZF z5BEtt!ljivedL0Znc3IX_OEs^1G?6wmS&r=scsO|v>8XY^iGC~S7AsMo$E9iYopxb zJA~%XDG=%o@I(b@YTaP_`Sfbn%*rw`1mP_|hpSJx+fW}Vi@0?Cd90v?Ypm+|w6oS$ zfh@KY21?Ie1u=qMQeE&G@wzep?Zpa?24) z?6HUx<8cra4&YAH{FGBRnJV0r19WS@6QAN#X3e|jbJoUpt9;R_)TsSzfWvUzzH3;Y zO;Ce><<#3}|BCEoy$l4se`b3v4gU86nBD!~u87OA?MxylGWi8M@E2&T%rhISNi;KE z0doFH0GrmX2h_?|@iZfUoi%|&!&TAGfl%+Cw6f7%^LmF+Y~NB31(wAW6k(#-XZFte z27q2GI5(NDl=KEK2f^cJ`X-q|K&bm0Fb?`xtQ`11lRfvpf9U?Bd3_snu0vy>$&z+L zTS^6N?*e~o%gal3S&Qo;YcfVTckiat^&jkx=#VbMD*0K(S+lOlRDNAV3MIWC&J*LZ z5nQ$!O}o{eQ)+AB*r(c9CS8O}PB2s(k$0|&6-tw-y_4fCeT~Q4SYvg;4K7SjZxMVMp(h9{j2|@cx`1Z=7S2l-x(a-~I`3xpC)n*D8u#oUeA_Ld9dVo2 z!2`}oZvO$5&iV^7N$>YyWlL=rN49V2y6F&myh(vnpMoMs#I2PEziEP_JuI#gH@!AJ z-ioQPbQ(No+DH9tZ84JO(e3G(y=WO9ipCYJowEi#AQ7MHS}yTpdV2ht(yj$Z+-m1@>@+^?N)ItTXFVkJ@vz4zueQJbR%_mu(-r@%maxS zwM{-6Sh3Op=jD6ofRpj<0{STFera^&yI)v+Ke^UHp6iBIf?c=LIG}%RcAf&NqbkdT z1RkalYaD+42q5+Z#X;HHp__LW07@mW4=K@18UF~LmPLI5Gjxt^Dd0Up9)C(6LGp}J zdH+LE5u`Y^**R<|@5E$_3YvavcpDlIJr;KE1NBKyhfSVr$_D}A!DnF?k-FjUXL$qS z80_662wQ_O?|^>PF+=adWzd<1BqsOT(Qg(JipW(@cO=@6QL2mu3vAOQoh(4;P@N{dK|0YXGNQltcy zAkslVN`in?6TuK6#5>nM`+fJ`=Zrh<9rwOtpZDIe#$do7j4Ddd$dDQ-ZJ&6Yv;MY)*ZF4IkbL_skPkxPixPqFDnsbFGUsItq29``4_}Z zC4ImZUv7uDw+{6*E%nbyN}h)(>AM~51Dv?{CR>I#{U=AF5r_$A+@a%BYN5$S$|DfB zeK96y7ej*?)LE#00 ze_O9cAaw^(dP+5=a4wt5U_smd)egN&rFYj&QH}3LlQVCQW#NsS`UbWUYW<{lQ7Z{^ zXzoS(!TW7T5E#q6-f4jTvjN?Qd~rW10nkxB z!e5&8Vh(Vv_8#!N!ApcYtd^e~r1qWjZ>YWDw$4b|jHBf)%jk1>WL3_MXbtqf zHM4giZTfKcJbxJ9V-U9*`%nPjhQL17_%aReDy9s^C+Xmxqk)Y%Pi8o6d1SD6h-DI# zd{_EBoiA=HZ1$I%K(_rM3f{Tb8>=eeYyvTHOu#1=I3gZ(_9X{|^yJ7&yc~-qSi{OI zzQ;NQhdkQNFH-nze^8Ca_kew_fB(|-nzRNToD>RutOM{A)3l>3tVglF;MC?4gWZlS zIyeywQKaON(nc^h;}^JD3S(@MS#*%aXss~K)<0U5ZZHLqOLV)5d^VGxd$Lr_CNj&d zN&-}ToOLDTE&a!ZUz%`A3pHt zFZiAI`5kt^Jg!tAtZ(XHXBFhPrmtU%H@ZAFJLY{V)5aVzS8VpSP?xsSeQy@dh^47z z0ugh!57kugbR8s^^7Rov`$L;7_CdQ)EjNfCqc^FFpKLonK&!DDWkD$iZadsLV|8wZ zi&e40B?it9YG+LilTt(+zpY)W$yUpZu3v(L5Kb6$!q>(ZN=+^Dgg1mdxkB2IDpyhc z0fvp{Sln7ZHi>?xKA9oI>;8tsTI zOu0iLp62WR$|>K?#E9HmY(Ie(*zRslX-g^fS4ZcVqBJJ$N49qTUB>|_(kysW5h!05 zs)*j^v3X>-Byx`TB&P#BTH4wgeU|~rr>OA`#Ae`1x#y6q(3qw*lx_u@w09Zj*qP# zullB;c+k1mY2X<|escHkUeq?O03w0VLkxP&%& zWaN=0To;dVPq;O+g3^1xB*zKP-9chn?1RnikzQxNM8SlSWY6H?Hks+37s=J4NYhMz z^Cs&8pQA4g(h5_+_nyFTD$aYhw6cAZu{LqEz-||?od|xYW!|C;48w1tp*;w-3d&bu zru&4UsFx*YH^Qi)z-H8Gq))<-@h1DrtSedeljC%ZAUP^{<63YiQtwje;yY{eGh(e&?cVE;cUP7kx05{&7C=U8BO^CP?y$HCTTI#S_pS6M#A zEK_=YJ&s2k8Y?Y6f#IYEmmq2W((*$_zpaVmo>8#^$aJSKI`!_-ZS7Ay8yj0{YahWU z&H`n!Jbl3A(8v3ge&&*xP3+|*(d~O?uu560iey)tK&>_NlIb_Eo11-ytZIV;_+ygg z@WN-8WTj0M%tK}gx|P|v9wamWy|{v?l6f~|b3qA1mtlLpb-eUFN@(b6{E5_*KQ`== zD+s)?Sr4EM)ZYS}X6k4Frdsxr|dlu4Egt ztSl`JZkHY##Y;O~ROnIKlYKq$^{bbGQqkYRa{tPF5eeNi+IMKH8`YX|CIx-Pr%sPd z%yJJvM(W2dTdzf^T(P_6+*h8iNkn8zXTiSX0rQ>7hf?!5K7VN+&5E{+Rq*}NzF$MC0+fNT?^~Fp z{qyKS3g3pdNOxKsNE}eP;Nn3i!EG|=H`Zy3(kV1-ueqZn5e%5>g5^Q?z7^leE;Jm4IQtUkkq9Fa<-E&fx>_}T+lJV$psnzY6!uQu za!OIzyux&Cm2`JvYE~^bB$n6jgPKN2^j=I!IbKoE$d5eduQrmjn_=~iT*l5}Y8Eqb zi=%*orkGy*)_@e-0-}<{TwGX3nNf>yz8R(kRX#rYl%KT3h!%+ng9IPRD$7~~7$6`4 z3odBstm&ane8~}q`nEH%j_&}Uyb|b>`nj%XY7MFj=$0&BK&$6nZ_nA3w!Dw?ZwnaJe;N4hb8VEpSjQL|&ZFu--Z|6LcU z%NVr!ukbqDzmV?0e3T-m}R_IJla`)u3#+J5kkNYn-Om4fm zl>}bUFs)I#Vs8;Q-MC#n)#Lk2Rx(h7=qLZ4u%EelCp4oJrnTCnmL809o2sY*BFI8v zcq{dL9*)X)motLshW^`@$sYUx+0o)db#Bot-0M{bTem;TZ-1)|*snl;I41Dvy>~;B zOwhA4Tff~HiWGBj^U!^N+xe#W+>As5{u0l+s3J7FviX}$S*gR_$!y+fTrObrjHd=k zu4&HsPses54BMx)66Bs}&4_Xf{N5W_YUBLNf<>4{UH9i-zSf?RffDtwv9D7lJ4cHi zc8+GDdyM9#9P)jVy6w#GSlJ^&VqD(Dilc1G3W_C~V6NJlU;YVi^AE7jFMks5|6?IU z;czp}x7$Mq4Z8Ml-Fz%idO(V2BJB?3H8SVb%W1FlPt{lEJne;~JquqG+Iv+fR{22o=?KyHiK67}a%+V*Tfy9Lmzjb+4>-jt^@QEiO9uh5k zr^fBdVYwCXXM_;DrR6jYn@~9z9Bx=#JF;6L6Ng@H@IliY;-L zIVs!R&PsM~ov=?uvT~a@20thj@74bOa+$r1F6NsmmU0dQQ$5GX)#r`Hbc5>s?F=5r_j1xxwmql-sw)8Rro9Kqdmi>lZ9GK z7e6c>QRvxp+wL^1>a)bjk|T(>sy|=DKQmJ-dU|vG_S2R-T_fGicAgo_jUP>g)-RO^ z5&$3bYEy_#v7&6iCkh0-Z-==E3~jq4<^_Sx^`VxCGBTa7CzA6KTa}rI7=ro&g)f7Y z-Z(81pWEu3&iGc_Z|)EWgNfG4wdvlt7HCir(y|CWfB^#NqcC;{EpuYj-oQHKbs#uf zwL{*J*-zrdpT9@__xf|@+m?LGZRwyrMnyQoAOa0=hdDR3Vc9dHrheDt%!NnY$ zR_l(BuZ>s47l~^fq=MJRgF{>6gvohrya(m12s)IS)**q7175Kkb!jmm(da|i&d>2q z6A;@nq_pBF3Qq@j4d^VdSvWy!H)K7yr_l9l)B9Iy8(Tb1&bJnD?kSPldGIF-AED{-b*}Qmu6z`6?({#?*>(9cV$ETR{SJV0U(P#X&wD%vQE}ujLA%@7m;$xt1>(;waw<|Ahr(qDV}rGc%qBWuFRq)M86urgiPH3e~D< zF;L!}d6D+Vp|fJs*kkB)3tU2>#00ALozM8Kt;(RU{B;&lwpMROWt40!3atTixjv%C zp>$%nH4#!O9&s#iB5>uF>#=;+XES?rT-_Yn_!xAhzAMB|ly@#;Zh2 zS0Wo61Y(v8u8iT80ziw<-ZUwe=0SP%5wwmnQ41qUzxfD=hN9JOsSYsmi0FRuo=4Yw z+}v+9{0!r+b z2wERl=p&lcjUZ+}C_;m5AGsiNtWwRgXSvI*6ERy5YM%bZy zLe(WLQhGn+zs^7VI^px!Xcr}aPE%tb#69w9^LNEzJN2iZnt%Wg$w5eAVNGe0fo%Bn zWxioPw#9_9pF?YZgo0of@4*wx{v!j#?Q-DpeXLW2HK9k-JfA*=~~w~&+#NiSFuGnrPE->SQ}URs3WknQ;jicHjJs zOn28PaEvR?w#Fk0k(oCdbDt)M=~o(g=SGyENuydK{LoVyWNb6E7tqk%=X$-?DGArV= zsc=SSxnl~JpFNxH001Jx=8v&78ppLqX^uVf5-rxi?ur<5zZ1)cVyPFgpc7&66z)R838W3_%dAZ_lU5_kNHRW1QQt1GyHMI?k`G@ zXRhKM^8FF#DrcQaO0vrDt>QxqlNw9z{(TB9$o+VXLoSnpgX78qhaLyVM-JYH52ch) z^{X8X6@mPyhVd`ashB$tUD9HsRZ@l3)_HUE4?|1>s#e1_8^kE50%k9zFm@3ujFF>9SAe&U0% zs1_c=*AZ6WoOlrb)TdSKNsJgkN>zTB7PE76gk_9J)*GE2V!inoI^e*4-qF6c_4Z3@ zskBp{M&0Q0zHWVWriWwLJ);?y46S#HH_`b=HP_LVrMborJ*|Y^ zkkd%a+arQ}-3#=*6sG+PoH~h4gM$#U{WGgrKF0+d>ojo38iA3Tz)JL7D=@OKXnxWr zy4E0lj_Fy2&go>!86Bcm<_>nb8E@Xr7GUaL7$rjBMLdUl1(2)d+PjYfzw;odKm6_U zo4y`1x?955?mwHmoKjYY88kz0mOkl-q4=ZuHn8P4r-+I-n2}8J5bV^rPf~w{*X@tE zmy1%j+Azb9ISQczLdnufb+m~mVcKaAwZ1-u4kdR>KUR(HcQtaiiiI!3-DoBrg8`rbtwS2?d68|RzbQZMRi#mAMke@pA!Qv=al zn->&L3of|e<5cO){Bxn(CrmX8#k3YVl|oD5qqU?*?xMpiV_I={{p8;EJWHC1qyl%d zSmHh6c_O&wJavM3YcFg0jbx;&t?T+ZQ0yFNU$_Tn^vqkdWdd?wu(El|3?8pDHc`mR zoG)LltDBjR+6$N;F3qG6yfofl_H`Rn>aW%hl!Dp)kT(|kwrdlXUY1`0-k6q5k0Es2 z8DGY$`Q3v+6QMQp58G7zrB9DT%n0ZgJ-gkY<6(m_JyD$3MmJ=KYiGT?#o>vl7W*R0 z;1UnG&!Pt}`jPR-cMo3w^!__|r#Xa)YEN3^A0>KYnF6qBOSsG0i2tBRY}u&GkEhGd zr}(rABA+HceQMR_?4q(?ku>iUseRwEvC&lGz{59J3gWND6l7+6Im?dE}gM(kVou=Q+qJjhm z2y1uH4^^=jCP7z6iD4kVetduBwEy?b($IP3-E#BU^HJs2xse+quj_K(Oa_D)3HJki z9?)BxKzPj`78%$h%b&u(N4CibW_!K*mL&!z+K?WHDH*J_9{*6FH}-tBd8neTz2*}l z-$)!Qjd5h+=^#gdn;684aUJ=dCg6$|fuCD9IyDj$8l~LDmt?7X zEG$@{!*;16uGGNprD>|jk@&9Bu8wIqS{LJe9pao>6s@d)kFl|yq!+ftUR zeJHvUq!$Lf6-6KY#fk4Flrm=(I^kEyc+i}pzS}#k7vu63b1(*C-uUzS=ZNnRpa}KZ z^_SnM!Okh-JL>6iCN5z7L%Va0$kA>GGDV3ltJtE=)q{3D;^!GvJNb%WDo9W`ct+c0 z+@?Xh-0pR>of)e05T2c5g029mT9lkBV*g6v1a*>;!F)pjrAQlOSX-Ru?Q{;*A_+0U z7O!Y|E^wq&>VsfxK&465n#RmpePdrqNlC8zcG)ZtT~S-0NCb?V&kqKu@)6(OX7hnb zF(lI}Iol4KLBA1uUzl|!`jJ-WGIP@UQnEyrij_eAxhgdvjjK0qU#>Tv)&l~nd~V=G zSLY0AhiGc?iy#puXNM8@q$)=CrQbw*jeH`@`8jjD2Py=yT>L6J$IQ5TP=Y!A676d~ z^bFjhDiRm=HTG1lZ*qawtS_9NzD}D$Us{Au3uh!Z<=d6Y-rKj!w-d6l%6oHf)`~uk znDl*jGasB3r+ehPfnJLPC+8!gq{^yUe6U`*-Y0kOgkGef{*gw2PuezzyXfkXBPfXKrSa|j1y)UfHNY7tc z%qGUwZ%}|hcFa+;A#A zO?`Ii$|ruYUp3DC_V6O-qbqcOItUuxoK)UZZ4Fzm?&IMsj%t3Mo_Z# zA&FLh;6=D(?nAy@OWE4OUb-Qz;-i8EwTmDgM|`6e2^?+l)*mZcmzAN{*yy}s^KTcIZl>BP!ecI z3A=Qs3(IPJ0Sw~!l^?c$0lKQE8#BPS!)zg4a@w9S^jj09o$YFU8wkB-b$IL;0PDkd z0o~a9LqKj)g4%m<9QVVOHV@p=3XRxq*aB%!3cGBm0~j)ZoK`Fk-J2Z?XH}ttfK_H8 zS148Xw&MYB`?gdbcAEp+8OH>?9GCEHM+lH@=ucDw(FGcn+3_AfQU}_9+=u?x>)>NP zb`=JU9VAxo&3V>a5Ur?91v>;pzp@5-fOsY~DV}w?>Ns%ilWgyrAog`W=C%1#&@XmC zkaq=`lK%)c>H1F%0qBgG@I@dy#tioW7DRzc`9BR!-X`N1<0=n;_Wh;dpYf0eDFd5> zQp;mQ;dVR3h()d}|)Bajhf@Y)<`Vy>q8en+6C4^i4<*id?S zd?+7KRD6Lw1PnMH@3<*<8vwpxMpe?kz#(7GxX(nD^)3I+ZxIys=W;9cSgB)*Sut{} z09Ss5>hWuB?uV*eOKfK#s%9Mf{t(b{{_DR%(G~meZ^i#F+)i5obG-@rc^z8x)HvQvs~hmC}B2gDMKOfkIUh&sf~E&^ujM|4==F z($J9N6?axDU2oXh%;%aSXwbgGzT2T9fuwIwbSJ{b%Pr^ZmBup)%-?;_T#=XCuZLfJ z@>f*$zuoog`Bzl-uc+*`|0cEde^XRetu7oG2mCE6d&4gq(#5j zABty#=M8($6bv^y3ocu`&+~c`sM{@Vv38}F<5|uoo8XF(KeMvs86_ET(>%E89dEf+ zN(D@9Fw4Z~)3&Gz{!1YImq7S0f$(1f;lBjJe+h*D5(xh#5dKRb z{FgxZFM;s?6Xo$=0%2f@`7izSzd@M&?-2-VI*+4jEtLST-kZUx4b{@dFiEXgk=@%x zpCdA^*eGoG?7A-87_%dY+pXBRTvBZ}>K|_v$ukW3bmZ0`D5jsrU^F(J9k!|b!RLA;4FF{{#9*89puZkY%7xBnojl>9PK zt3(=b=pHA@t;h%dZZ({^8Q6we+ry1I%k%m~@QN2m%1P%}D3DOegk$}J?xn4(eZ^MS zlEQ;aj}k_$h8;sPn-Fhz|F0vp6|~9Zx_NFlaxi}GDxG>+nTN!NkouiqZ9j88^KaiDp?WN8+#tE*(nrI)A{_3;6D&mH@Zuv$45w*3iljYP zt6j0pr;$JNwF8Zofg~iDSjmc_h8gP^I#W(kiNwo+~cCgnp*|CTO3{LOe#chj71+ldqT$$^YI+<_BQS*G`Prirtl!$!Q({YkW{ ze)1ibXPPTOJ4@@?d~QzVk?sSFB1HjIH?hyrs5@;zTZ?|%0>qd;dP)1qUht}P9DyS6szzAH`H_tLtS^=DOdd_)ra z>3T5dK$GvssM22LOHI+rQwC$yYOSS&Zm$3v`vX!<*V*#17=mps)(-uM(bz!4_o@Op zwZ{zyR%Jg2+1WYcfucCaj~E4>95d#7ukuPJZq`Q=)g2Euw< zT@oBXTKMdVPScETFWK16KBo$) zSW*jvak*s;)OO+dIX7~}NAw-HiBKs4i~Uyl&PjvujuHC^MTgzf@ulvyc&_Lzfnrn4 zwA;H6VRoKEIm(&dif8k4q&X9+x8IgCKfQ)Jci(K>r~wzplWnkw?rZNgj~~y>A4xez zdG%2#D_Hb#AFM=SoB+$nCk$ob9z&(t43KTQ2hS=ujChzNQ@U>=5WV)Gc_g%*0*Rw7 z*!+V+U6ik}h}{IOAfbjdY6KBV@wm(w3b-3t3ZO5jS?vw%-kgp{PU^^QC8tQhSot8tY^8YQr-EiXEs%ls=RndQu`L?#BFv@bMgHO zL_v6xW7^ngB0)SI$WaP3Q-fi<(%f6Ej$@2A4h=mCw6+B`!37Xtd}o+rcAs= z7>4SsJYTA45GE5C8K@B)lNS?x`XCqZtC%y%i`s79i(koC>bRkg=S8&kfl6#V!)9Eo zJ7qw0m7&!=@bc0gQJyFl+Nwm z-l<%*_Qy|*Rt(r=p8ZT)tH`Y_*$Ew~%Nw;_*0|a$@sx_P)$JwpN48bjN5o@#ayYdBaV*Ww4lp4&_y=B1AF3TT|q+l z6c8g3s10|dI(Gblfm6QRB5BWaW5j?cPvA5z)y>5$8bMPd&YSGjnb>Bri0u=Fa<%p?(8Wa%Q-SXRgs*tj0Gn8Q zUgB-LFe&;hkc4z6EqEm2gfQH+!Ol5xy2yPkK3--+cWzZN-J)B>_d+|ZZax;{yWn=O z(@~tapaPBSc5v6HE#!0}t`0HFpeL?stYw}3bau_guE;@KVT9lil41_yhh;iyq~|M{ zVm_Qp!h@scB=@N3H(arJE8$h!&c&sa?gb^!VCII}(b(f3VRpg7B zXGQHu2+?xgu2KKyqK1XRnE)$hYYF-Y`&X9sn2{nAN8aP><`WEvRK=%a_)GnaPL2Q+ z{fjvi-^V3bDY@bb^gXh`y1O0_1fGu%mp+BEFs4W2is#mM5i9Yv4s|V!&lcRo?Doj^ zuya{@pk`AW| zRJ0+y>I=#ZiM9mOHx-E~SFX?UMmjyU%|hB2TV&g82Te6F2+Su`Yz&lh$ITF6Beb2v zc+nkWqGecD>23fK+T#RTh3U}|2_+8-qs`1-PuBJSAXUoKQ&^1Rt~PNR8z;2q)5Q#T z!a37*V)4!wUwR-DI&D(?N5VW!ljW^4CB5~x2S$=-l5OBmFg6)AuER?)d9N4W-Pu54 z-l&zHkd{$@A1r$7rXjz0-Am7?IPpSH4?Zhx|92nw1a(2)my!kXU~lVj5{%y#2cqE^ zuO~42v|>{xP$A;T#8G{Ku*C+Jj$0%pKzM>hVu~BaifFoIq^G8v=nkR9wEZNiHkdQ6 zaz$!XgS>loLzu7ro;V=^x0tuYSGU~3Tfb8N zay(W~TDPZ~d!9~W5B1SS5o71j7?ij&e;jdh)vMZq(<*oN}O6pgh*|h7&uOm*2QE$_Vj|;@CKyTp<`=mb%Cw~(7 z9G(S?8k+JENL6o{3i*Lx=Tv3Q;)E`s9;2;HJYH7PY~s_}L|3n{;=#@1Liy(l{BPSu zWhw(w5T%@0gxR=u-#yBoPL4yrWnbeExWXaIVaUPpg<}w=w%^_oZ^h?$Oy@g_9KZzm zvTPqO(?k*kv{gC{xvqddJ+iL`ByaaIryK`xBmZ&8mAR8lR1p(Qh6-ccN@v$5#dFAi20!O?=_=we zjF68X*P20HxWg><+J>RUhU&V)8F;0CB%CTz40A3U5>p}#lGS~@5iNyRb|vg@yu6f*Aw=# zq9#4O*#h|`c*Xu{7|PtFhyO<^{9|}$|Kd!$B(h!9=$v1$1-;p0`gnimv=>I!;CxQE z%X;3bcEr~AC<}^J@nVp`7stDZo;M}PHV{21Nqbhab1$$?%Uv|yJm(6nb`G?*w{wtN z_GLI{%cyGkpl((>0HIWl=aLs~P`IQPX!MgqSL;WMZs*u3HK&(1^;6Dno8|SyobuRM zOwKP^)3+=91zo;VdOQo+3FP%1gihO+AJ4;I9G*#H7!0XaYHun{Qdh_`mH<7hs)kOY zgp~-g^yX=rRI|qzH|kFg33P@VGb#$)^{kG^pC{aj{xw;JwBwPOiF+_ zO@+TGE|9Xk1E}hv<>R;Spydt9iC5j_G)vR6H8P4E?DX}e(k{@xZv5~r3vlP4dpu^# zvaFeDfT0A9?{Np$(LpKNSYAW_W?V{(B!)Z6TPGISHb4HwNR;+9UW@qaWi2QlTEv7_ zSa6CUxyPu?{m@#$jJKW8%s$;X76S-djz5W!^c3?W3>Fw6t!y&$+Z63GXNkgoFH5J) z6k-h;ubN;24J96TI(AcKTm)^B7#U>GwhBE1J3pHu(-Z>-hTZL28qL;3l)=&^nI4#h$=ycd4xjP?kC`GlB|Jb)fPn zaTPbVJS3k^+}hitxG!H_8_#kS0U-Pap*gCPZ(zsy12@-3CVisPKdyINCtUYL6o}cS zlak4cH`ny)%NK)%)wR6=tbkH*fw>YAFZ*jIUN-{DLlfC}Lj}b(XO%N87%vZsHd^=s z76(tFSJ=WnJ!$*kd9+S2wOw+p4N8}fl^fpz00B+Wa%li5xvi(&-!Ys7c z2n+6g??@?2VvKBBj$vNae7V|#tFs|1?q`ncedrh5Xi87Sc6~+#oT^F#b3JkBu!1@c z)+A88lZc6XkMK_{POz&J2}#8GFxhlS_$z`9@k)JlVhgfwh{UBZaMwV7V|D?>d6}vI zd}^&tIel;;jSLb7sufCdbf=9!tx3z!qtoavm&YnwF46t2k;7kH44^;ie(f}ae6^}e zcXj##4<@+wqOhm%0^MVo74KfUw<-)5WL6e)*`x>W)@FU_*_?ImHwg>Obyho-rBa{i z?9rH79;{RagFfP;XHW8=Lcys0h zx><+2%7~E;nzXnaPX%-PN1xnx$XHAhW$EWo?vniKfjs8Jx=oM`tp<&tu4xT@C+ga4 z2Ue|Qf7@E)KhmNrd1Fz8(<+mqbh4{Y)3zo?>7LV@p5SzxhWid;*zd`pNBpaC|DINe zyCF{k{ux!%-~J_>ZvZa*xC8Jbd~@isFExrL*AmF0o@dkp zT{4yj;7=(=4-lP7Cu7TKyN@6Sb0<=rYuit#DCGFg_C(-)!WQ!fGVu^YD(;90b(7DQCZ7N? zqs|{jH;^aY^;&7Y$$(Q1%EVF+_?*Py2_!WzOOq;EYXKGhlJ4rUF{vivJn3jh@b7r* zMK~9tUWPlN+d^1L6i1|}3zgq97`apAY?FE>Bc#r8Q)wT@fWAuW73CR5tb{JzzXCqMerR|L4Bocsgp67dqk$xR}utE_258zP+~vCsu5ox zr>*d`yl#j1xi-&*DHLj>lJ2+%U2yP7n+- zi>%Oki(ja8^@kO((4_<1xLAzI^XNggG<~ap^bBGl-)(r{ZF%sN1qS%yjA*g3#heZT1${chg?>4w-W`CI z2i@A-sx2@Ol(0#0w7P~FkNO&**UzsMX!Nzl8cpb{N#FhP)z6u1VyL4g*|F@TKqYux z|8iaR;==9kFPDUZZxbWGiW&U=$CqpUi91Wf9rd1e>g@ z_IhKb`lI=-#pn+L!RgVAHS^-F^(1uQ^W2H$v$=Cr#or3WIQ*94>=e)bPpo#71B6v$Sw<`ZBEPizJ*<#d@! zS-_t>E2__3Rt{Ld9>H(;NtB4UhbqD39Irzo1fDdtz@|`6@0xY;rH4_9VC6URpSN0u z_p{BZ?#{q;qA;}ldiCwZ6xhy}F=A{bgYhIq*rrYqnrJtxZxZ#mpW;elDPa>pJ3yqp zuPO?;>)BTey`{&97tw$d9`YuoUo~1a>LT7YOEG z1jZsbD(3+Q^jighVfxmW_R{Mo$I2wzuJk9zyLjMCIz4uPfPVjpK_KV8w`g9e}dK9}}=47{6{-@GBu`7Che z*WWV`?LQv!@i1k`(rUD=2h>46ByL(7L^nz*{nG=sABg*x#C|yH9w6sZek!+reyOf? z43KjroXK+6mi9=t#GBax#RL%+VLv&79?}oTAS`cATFSM5)I48U!M>hC`Y-L%_Rp;e z{okpIA^ml&_91`a}?4G{r45KL5fMD?YW<@Idkx={uSRzd#zF z7Q7oaR;^H!M+`k2C!6bA^v^cC2kve(lQ<3OVCAAX!ik(|@8FE26oYYvvEsPr(*e_c z+Kb2Qd_;W3Vfo(R1{=kY{xu2o>AHs!0q6;dd+6ea$A0%b6DPz^7U%6tC@(kUP@XY2 zQ7FBUZhqI|a@!S_4iS;HHcmo>5FF~v$17~Cz7@dI@8o~p>$3^8+`Uoz{=H%A-MKl_ z#Ipf$?xGdHCZB`6#Hf4ezvHgGeMnx90}niy@(23Z&%hJ!_xvzh0KOLj^aVKf@C8oR zS=`PN@T@}*UfkznnnPFnfkzp;Z^8khrDI2P*jIrWeIf^xa`q<&9?SxItxX}IatFXQ zTY3BddZuXwpi_*=VCT1g13I7DYrxa~Plw_KQ%&$J6+q~Gp2qj=;0f@{-iLrJoo^F` z3t|IDipeo<8^+c~cQp5JwtMXd{SDX*ul?Z;pT0i7T%5F&es2mNWEOIxsaRp|)3EPa#%-KlqQcF1hx9`}+Q0H|3$bqizk2 zVTqRm!o6cvI*R3o+JqiPo~Ytl{B7|!iO@sFc3DNQnx61Vcs)Pg`*dAQX6XJ3XZ7-Z z$lgC8YB-)B&)J>GbxYYQ#cIraT%E|#ooqN!)qkY41RNZ!sp9`lgM1=z$y)3WSEcH= za(m%CbsIOCF5T07DHu)4pcvZGwqc@0ojS{Xg{~W~od-XMSexc;#m}rY8{u!sT`Ao^ z&&GuZh4JM{hD3x^?U?q(jV2*0w59`U5L#!)(lg!0(viECZqQs^M=-5+(|pW9?m$O%$W8~(1!lP?CCOp^W>EB z4C%!Bwa)hp!I-;ut)_KGq9hzpn5L|QYgJW?VeivXVKB>*1Bqnfmw`Rwe=vIfMD^$t zPi$H98*G@k7Mf3UypvD2(y-(RMNG0D1!6bT*qJ65B&9h1;ZHksK@y*)S_OviEE-x| zk!ya_Ov5J-i73>Ih%L21`Qfi;;*Onuj4&_DHFcsUYWqH!vKFfTB|lnWkM!P#=>xdI zWat@hJ!+(z=)X)W^8q7v-ZtfTB}^GMm+HO;^!jRDSVgUV0}1*yvKgc%q6| zbM2Q?0z`U#avZsU;SSQA>49Ju62e+fpD}P`Y!aH*vrJC?@R16(&x>Af0oBxf#Rjh* zO!|JL#LAy-T7-skUNpJ)PAO@6`)pQw1SouexOsX-@PL2^-n;_e`I!FTU!up2jOcYZ zmS6@$VPXd*L79wY8?HB#Vxx`F$?V~vMed40UY2~)z6+(jXVcepI5ZE-%eqMSNxfev zns;e(W0X86{QKGy{~(|JCfnrbYG-#VE*+7M-pF7 zzdXQek7*~*z($aW0k;6`CAxef)Q=PgJ>Kq@#*Mj2^X&yDt9O|F`-d>X!;sVAv?L0t z8~QjGl+f-s0A9%Hp66oO4~Kf3WEqUq0W8kL-SzGKL7}Y`m z>xVrDJMUlunUa!1rxB=an7Tg>;_6~k6OdymIBDTlF&cLIczG~EDWhxD`g2TK0o+$% zKwfCfL#Mg1LThw$15klGF2E17=SI-GJ-B0Sr`r>ZBhg_!^b&xT-NV|AtA?ae<%iaw@i7T@BPwu=zP4^p_SAW2@QkHu(KMkI(`lNRh$F$ zFE{fMj*x6=PFmHoMYtfw_|-6_fsRWK<>NwobT_XJ+}#zgdI0FPPx(s=jQTvZh_8v0 zI#|v8<#ZiJNu)eeI_L9f#Oq+~!_nmahUF+7I{$~d( zj#oVYhY0!a)C=Cr5mMeM7KXo!`BUio+OH!i_Iw5s2|gy;|RUkTK_PpX3oVGF;LUr_|WOuqW?oHYq;-O zT(RG?`XZfLe~o}~xgm@|V;*__Xf!{wrW^dvh`oafVD#vXpjBlM{KIiR)h+?bLA7(A z26d-4Hi?^_^Gh=wLa-+Ut;~|_K1=ZBj~5$`=~e_MMHA%)t^TDs(jWadG#0l5!y&um z>2Fs)CI>Fzr1L)2T;CO2N!}4U_XM>vxUsAu<}1hJn_3kxz=*!t186oD=k$2|R$qojCRiTgi4 KD8R5|>i+<|_U{J( diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer2D.jpg b/PyTorch/build-in/Classification/Sequencer2D/sequencer/img/Sequencer2D.jpg deleted file mode 100644 index 3af4a7a3e161e8589fde42091657db578ee829a0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 28229 zcmdqI2Ut_8c`rFs3pH81Sdye5eBNH?50PYg#r&Fg-|8(Z`*|TTP08fVk_d#da&az*X zyLs-isT0FB9}fB7-sGJZx>ec6X*Ns}R&e%x!N_!li<^g6L{v;%LQ+vlSw&S%{q`Ll zUA?>d2Idx)R@OGQb}o-xAG^83JpB9vo(IB%g2N*sqoQBFib+aNNlips>uw0CrNb@%+<+cz>gHa;==bqY@)E-WrBuY6x!BX95Q?(H8?4v+pg z*D28HzgYGk2m4Rw0+?{>r!!|xpJDjpT&I2t1inwRojH3|?i~9~QwArW%h%+8JI`_J zOg*?;d~FaD2C_SeDwaV|XQ!s%1M%R9{mf`Di= zanSppeq{pvkA7+Uo)pnRp0dBTR?Z+iec`tw-sl!BQKI z)#^CRaH5jFZ_!4LSKVTg*ObRbja~IOO<7_h3uU?^m^BU|fQ1Q)lh84W@en9!?v*0T ztx3yarJ2i?xR`@`<2BjFeYWMYyT`}+`Smr8wP|u+=xURa$D3CpK^&m_r+zv6fB7pc6xnvGMF4&c`XWq@QVxl#=}&xFn@qhx z5{QniaQ4iG8j>Q1Hl;~*4y|AO-?;?|jXc45*=ZQg_#G=t+2W=Y%UJ?mc^7=~fAj$# zm#ZFsG8a0HI>}0%3z2U0^qLxa85bm?A>-|fY*`!abCDCHU!gXsWHKAoQl<3Ytb;pbyeOSTtTE@)DK1TFiVTUW~ zc2H1&Qh5rCW@B{LX0F@S5EQn*488)L@Uuj7paPq+HxLv-O}e?-UNyh;T5`$tP=0FIis?sE*mZS zs~&csD7IeFJP?&_AZ1zx)7?)I9`TSyi*QWtXr-5~eoI%?saZ@m^@GttlCzGmfZ9px z8E^`Oi)u;M*iWN?L&vlChDWjJ2A+zUWNubu-+?v^I(oL@rneY@3VK zW}@gE>rrw$<~W)d$%)pP?YZZ))!k<~s!8BiHFGc1w84{__rkcTo}|Hd#U=!eQab30 zyP&{?68IEVkEESVFk-fxs9DGUw61;bOF-rjdV{iu=Zx@uj(4teE6@_`}7X;{9HSPgBmuj z4WaaOL2Iy^HwjZUP<;3T_SYH+D}|Yu4|i$Di$ykZE%5Ayp1wD#=>J7lGXhr`Z#q*U z90Zl=5p56=xp)UN8i+nO9Y5{mu_2k9dt%3@kFbf4ho0NGf?Hy zPaFa_3+SNeQ97v2;~y)ry3j!;p2+zj3hHh(m7*=vO$AP0@DFw%dtHTt9 zb}&Cpb;g~>PVpq0ciQR2(L@n0iJkb?mtjKi>QCCzq>GUx&pX4;Uuud-0CzN9yh{#| z{7pKgYvry)*;BK6^8yODTGAcHoAl;qiitud*5@7iB#OHy0I!!E80axeG}POI-pN%$ z2(KNN6mAqKqaz|TdY!rF;CPKK zhVMs-9RU;!5gRSYPCAHfk|o=J!cTUwo~1q|X~%Q!`cZW$?m6JeC;t5oo<<)Vzfdim zie^`b_F?5pJn9ntQzjM}a~Zw08P9z}Q318z+sm{oO=eQ^ca3F?C6t7hcu@NVkIVMm z55OGo%lAeAGck30UyEcCOA|tvk#lm%bDzL93^AJeWWMeObQkN(urm~Wl6TKG!fssC zdw~w(TCXcFxBZ?YL3VtVPh}<9HfM{C+jmv1@@f^K^C+HO66h6)5S*l9eilEwVXB(G zFJ8)vbMZu@#*_Pa7o*c>WUM4Sy`190FchRcfI2&SF?s6ye`)f;Twz_{?b9cP z$crz0IVz z3k6LyVSpz)kcX%;($J2i{(Sdb4jd~#Wx0P1z`8&TI-SN ze*B{+N;5*bNxBwytO5fc7QcL-_SpFs%lnLvg+Zr;i5bR5x)Ob6FkK1d>8HKIIIm`Q zyV`pwZe8=N0&ZOyvy_ZD7w*zNv*apMedX!rhE_p)b6wui6XuGS;b~LZj8@9E4e`6S zXTobk_%vSf`-}@aZT*hTFKB>EEd+lA@{b};dyCf#{^1bg$K86Ne4xCkrrrCveZJ)R%&TEa%@n94$AYUIX;9O z7I)CD$)2Nwz5qflcZj8fZbVaMWTU~ma>yki2RdlU0lenM4&1M!gJf@jsbgR=8(f7J z4L(d9KpsXw2tEi^V6j*_NP>3@aG2leAam7pT4d{C5Wvjk_jJ(9*tXVP;F-^1fL;82 zl@4N#CG?zJfF9nRL>^k}9lh&p-Ib<;`ad(!K}rnNJB}>CdOb9k9VpF-mGS_35%S%c z4(iiw1b#F?{)Ki3@fykd0HFCN|e5}cYlx|uz zsu`OtJ6KH`>SjF!M?c8iZ1}AHJRAJAMu5w_WqYYH!9KS-G;LT$R~ znW~!BWrb4{($bXosUDBAu@+{*`EJtiB}VK3FX7Dnl0?X3UA-7-4pUr3z%{|$T3 zBio4-A3EqZ7tKs*j%GPbEAo*&VMcE606uWwCLP4a6^{H4Jn)t}3H&fRVeQGeMCK<> z>P>pdRJ>(Rs}QTMPxnak7M(rfgB3}}3=)NXi&2?Oj97@kYd0P*6>FYf%MFt1lx!LT zFt;{EhRhVtk()tT;HT?fz2~jc_>TK>p6U|Ar2#9zH3C>^VD3-~yvhg3)wP;oC2G>`544 zP*Gs_3EiFnuprek#9K6%V5pYC?3pQt@pvrXHz^^!V~?NKxDZCLn+ngiLoNVtlT2|WWVIrrL+V_41~wf*oV+eDN!v@GExBss=rN_Yr;J zI5wd>XR~L%Mp{^&efRd)_w09nipPbVJCe4ax`cPY&*7(`Hs7Xazs^o~1iA&m6a$s( zp4Z8hO&lmsUaIcUY9cilUKrNsx&E{wuJ6+u#SvIhhLPfrjNqb-8SOYgka@&-bbzEp zakSb<%iKga2a_g#3GgbMPq71cPSV}ve^b@uE5zaNfA!7qSd^93snnDgg+HCYmQui@)2(kHZ!8~|~Ex9?Zy;^0Tlkw%;+= zXj6RMqk(Z0$>2z^t&Eg!Og(k?^!A16aH+B$&b_n!5ve-rHx^UsIq&NFqOat3F!tS# z+5~L(%+$&0*OchUAYF!I)B1%4{n1VfGedk|s{UxCtqe4wT=f>p(0$FoNVqJ$+4>W> zfHO}@XQhLj<78wXE@Izf}=Sb-W!PVdl51A{5n1cpv&WV?#hK4Zy@&3c1LQ9lnQ%T86fUpg^ns$_mX*x=JkaYPwlAU zS8Zns%l0+4ODv?8;Y_NTnpzJY3uZrB>O(P($GP(8D=zRr)69%ii|`(kj*LZA;gh#C zp4FBMWMl^&B;XNC^nRIhiu@y&!1CBTHcW`@I=9JkcBP}-q${`gU%!7Xh;Kq^_r!J3?uvkuMZZwT8 zK{q)LNcgrKI`3%STQ}{zA6?x+6gQ~3!k#UXj(Zg^ek)!0>YjU_pM`@~QBI|AqC9Eq zH{|6oc?26p?^qt2&?1h2OduE+pkEt1!!#0)p`WZ9&Lxr(b_n2hY;=z37ult;8ZEc` zlT>qpI6|!yI!&3y9|={gab+OvhN5M5?}{KVPRg*ih_OU}I=0(I8C2M+x9o|XC(H^) z^0e*TG7gZ~)<3p5IaC^8;QS%*bwinzFLppH>PnN;mqFN<1F4eat};hiHiJS7vGb!S z%dt2I2CiOC8G{(_gDAC=3W&SO#qY?PuvF=V_289oh+ATGD_;vYynv|JAgp~f zZgEp`+b5qnP`El~<;Yj6!5z~}BmRv;tZfU-&j$8PD^)A$acJZ}VL_ zL>n^bao!3jz7^|>F)QTt&olG2fL?#VslzmUkWydcK?haIA0_vr`|^xyU0!}7!kj7< zI#Em(uyG@6j6s&*HO=605^qNs=srQOlLA(M^`blotv_P$l=b$81FRdcDhLm-<%>&{YxqVa+k+ zkx409<1Is!7|MTl}Y{2R1r`3B_%mM z%>z;cjjQ7&zr|ccNm`#wB{C$^l;L#`D3*k+xE9{Y-87Q-W;`iMmT_HMKAfN&yHM_O zZ?fewRd)=>EPI7w4%mU*SK0z2-io0*f?l%bDvpBc%fFSuZYqLnV0HLfjAX57A&J!4 z?z3;3AYh(q869t{3-{Pg!kDny)m&SUc9HeVU{9=v*BhFY&SP$KnlqSXozxn|1?1@v z(YM&qF&*9VC~L0hj?dqn~C4MyUGeUd~SIjC%)iZ$(05 z?U%{r;`1)I%;KRHeg0#Qh-kAYXd>1)YqMEx9Cd!c6JD3s)3MXh9+XACAMqXOb}Q9d z6%(RvlQk{XK2gfNX_d{pCt|E$RFvGHsn-T1L;7DvC{@JFaA<^6rQc1`oL)e{76=}^o zl|hl$EyR?f)4*)H5i(=08>^?_nyjx+iA=-)Lv2L&%OK`#yGR9<8oxD zvxjij{2I}d@coOK2Q{bXVkO(KpWcVDAIR8yNKuV4=wkiy-sAJ`yfqn} z>FKg1y(O(1CouR?<{Hb@Mz5O{y$zn-Z@=Ms72aJy2Rt`RY5lM|^lk@8`ffd#M0}ew zH_6NlnX{ig7~3grmwHutJ54ip8h5qVwZH%5{iLjPdB2ymr@f&K0r?0LnWH*_um@89 zo+C*678`<*@@(!5)_;iKy+xE#o1@i8Xrur z&4lgS`V~Q;k5KM-)?-0pw_$0v%Jr5=o3R2bMX`v2JvZJDIYQH#I=07L#vWN7mJwPR z@zBs-qhn)7tBOfopO`##n>bCf1^H`gycuwOFs(HcJPMEQZBQ?!Zo7{2e|# zMGo9BrGtKk;Pq%jdi#LxkG~4E8=nR5t>w~^TyA@`?j2!iDWL_nd|(mU7j>`Cj#r2G zoFk@YRAGZgb^(;d5vSkQXzOR*X`XU9FkRXZ z#^nvdelh8N@vEj^TWfXQC)w!WSsW1xqH<8o=UXqqV+N0nCj+p|2neY*9JAICh6&L| zmB0?_G9IUUY3{4%6!>Z9VQi~2J&#u%fBcZB;O>2}OM)B7r>r^^n`Mofr>a{lNJywi zh%Rv8Q!;(+RZ7P;%HJ-KFGWgHzFB(sFY(;s^x<^l?vpc(OG?Qwi!+OJZE`NZk`@i1 zS*(cDB5Bb@t-nzPj{>6*kWd1IhOU z<9U8_cfDD8aH2Z|ChbLdrMRzH&V4M2_z;dh+sk(=D=99I(^L1Ngie>8v~Wh1F2tr& z!q}kssRvHTs;s%}u*5PhYr)DpV+X_2XCAi{B%vbwV_I|T=E+;g<2h!=$qpna?80}$ zK-Ap#z*rPQf#el4`yG$!P7gp9&X%{tb2p7vxP94mOX^!Re0nYwov&Lt^$8PqLsc?x zvE3;T{!!bGV|+pE3UO1w`Ou|cbjIP$u(`tLl4-mPIk{%R)Q{+CF!~}XZhWOMDeGj( zZA-uI`H=pECgXhKr5PyHlg2`-s1L!c9w{ed96bdEN0clwP_<9W1T9x z@X@!_9JHd;WKjWcK*^|S$_gh_a!lOXO>>NRaZL)^NZi=a_NI96$5OU+2rKn0Q4OI7 zd5K^n2n2g`Qy%xXIm%?(l4j#e42iktR_l0|X0r*hY`vkb8tXYc9~#LjuVl(<1n$ki ztm*`>PKv%5QB(5V|7ccVfcW8$85nzkGS6U3uR(K(Vg^@tZmJCw8<35CVIhM*xZ`w` zjLfli83mS;`FU2)EK2on8sM~TWeln;R?%k4nHKYzH9}9X$WC?ahlaOt(LpTNC_nl70u4S(mr!=$d$ykkPpM ziuXUZ&i-g@L_ZJCDadmVZk~-S_P{HrmOv@?mYEKufgc5EJI5J{--C}|_Hr+L^Hk>? z_$)tA=ur)Lfv~HjU3@~${(zUU#?Sho`e)4 zlqdlNiobm*!zXQ~;h#x!RN+=qLR%r+`qrq^7bHQkCDf!F^0m_T>#YnZRU{*&%0p1( zn5pfgF;r+~E!QI(f152&mr0TXrgJ;@jc(SpyiKT*vAszDrpT7@#7c1A#t1yiNKeI_ z&G9L=YCj>sLw{^vZmD2!No;5%KLzFoo$9hv*2{wX?VFvXYYGIN6cE5u)$?4DxKl+Q z!Qw5XmA1L)43YWay>j`LI}`r7I9QGVF85f$Y8dOS7;oHxobul7Z_H#aM&WX7a=9Pg zP@Gwv|L_`76;FsR;OZCu*ryW5)hJ%aBb{Y$qU&!l;HTc7R^gIZD(F6xR;0kDG{h*; z{ybH;K2H_XZQrf0uQ=k?U}tCd)o$A6>$Lrp&6Iry%*_pU;rT>uTJ3O|^8UmGspR(A zrc3!c7v!$#akv-9l)S%Kn4~ZK$KNXbCrRJa?>wtr{8so4CHk#!+N<`*w71ptcwMP$ z%o&bTKXR^6EX&vJ&SjRA^!xz246N_4u=$j8v1^K)|h*Us0y{4oX}#0UzG z(rpj@E*P`zSfn!QvZpGd1(9wHsx#18wDRhJH&3_eb6CNiX7sN+{Qcr>F6q+Z#60y6 zk=DJC$LkgrD}5V>9;2qE#`eZp3rYdPL)dk4_0grj7d1~4x4st8enHkGp5!EM0^mp~ z`wv`c6J&iHaw6OvHI7{0gPw>V>sBet;WkejkYzHJlgsSN#|t}Ec_gkY{CrI#HprUw z0{Mqm1v=F&;TN*Lu-A0@GIta9@&!kxQj{HI_r6&iLCVh@!|-D;s|?FkZD8t?u}vL;JE z@26pl)^_)-lf~&E4|o7G2eN%eJN00!Nw4gEDxxUcAlJj9GO2)J)fgVU4OLHqNMYw` zf|h(8Zn)x+;~UuY4mQ)^add2ZH&jwiiVo^fM~(q_V%h;6gp`6(UV-;S0q8H04uZd> zgE|H2ppsYsZ3NRUvr^nlj;%k(7H7LN;U#fS>w9=%CgVWanKv2%|!K zggowq5LuDqIshgGP{0^XRY0E@Lk_)>2iuANOjkb=G$5EJXoem^2yh6He zv2-!C5B=l`6{Lx2I&x3SQWZa7f2Cd~n4E)$_t@$E>ZLTtgU}yy8&`}MGbOXW+_NAZ z{6>{?gJITbSI3>laQaW4AcP3HobYJ9iQJcAqlhBKi9Qp-f3*6;+fozu-gevOo4hHQ?t8dES%FPxW?q+E|_w;D+DvauKp(QPG9i%Uphl3uS0GN`2 z_HbGJ-@v#(n=t@`MG;q_W~3)THvB29bWr&zQUk7ls{2?L(L>JTYN$ z`Y`p>TEs;0GiYv49Za0C6KxW$Mk%Qe2sG0pXf{24(N{JokZpErCRZFzNt_e8Rikm|IDokTJm zUxkh9B+6M9eM4JcwnKR&d0Q>1C#7VL{}>ux9YN_za$wrC%&Ov5b=eGo+Rdd&x%dBE zdHWk9V6dawJXBxRl!GCgd9on!%UzAK4@oWoLR!|6VUHv~-8hhU6dL@9WV>K&nTU<@ zf%PS7^fN5KlwC$!y%7#e? zX=oJ!?sj^2=#tIcS9^!v>(BHRgOaX@9ry|<`iiSA1r~6KB}MADsbbeP!{e&T7Q{Y1 zw)7lno9G;V+QRFb`h60uHjF*rs3I9Qku@Ws<*MIqreB-BMA<0djOd!|XwO<}&)Q^q zB^$qr3P z6ub`8u{MkuTB$R=9 zo}@}$XNms2I#K$og#y2>XpUfhf#b6_*N>l_<0a#j80(#g$*_t<-60KS?8}aM$?^8S zOPwM2l+?{i1%rK35th7MS>{HCB^pz6wdlduwAnPP3~acj^@6=x)8+Xsrm)Z8T4ZM; z-tdz&ng!bJc#6_^EKOP8w$m%^U z8U5zEw(V3+D=@QCY$s8H#`S5@W)SHb+3nhNGlIywOIB#`iB_zlN@71xw^^cMJH`%H z#|(#T@n!{9RsJxu{%VA-)9v&O+yyIp`|D^))2Y7x%l1>#2Bd7>HwUhK`?C@!b>1yp zFhRB41?VReeyTE=D;g-n&h1B#a-;IeVi5>8v4j>dRc#C*PzayauN`Yr3DIgv1XvxM z-pP8-e!Yb=M`}iTw6nprC#uPu=w2|MklGtfxciv%My%xEjkjx@I}bz+UsZf}epbpD z-va@X_pVZsf@|eRP@w*%v38_cq5>43{#R)sy-%e zkRo-{Ak#xN#tf@toNNqfibb2{FE4$YX!U@`szYCC^CHyiPSP~ZL&^5Uz~5-1RQ)QF zIl_pGCJWwIG)KOtsZU^3I?Gv2YKNP&5%((LFHe4*8+c!S5L2^( zc}8jxP$LQAIOMRVI z{61HjVxl|BYD+tb?nF7J#0hZ`gET=nyw0xtP)JSl&b2&Nm`8@6s-IKVLtU_zUFybNK67HKFH?;h+|2MzD!Y$BgaQegPz*8^5@cnbmR8Eh|4zoCq zu{dk36gT12HKRu1&6XClfKAs}vQKQV$B6f%Pyy)l%EHaRSJ?lZMuy6T+)JiW)lU}% z649NUjqJ(aet<2%6(;Hm;;3y4Mv`xcaV*wXFypsg}fbzv0d=Xe|D;~FT; zReLh^T)Wmb#-@P{r5+Qdz}jzXaDTCTeto{A@s|J60I$ufzRcyS;`}9^E>*)^&ZsDv z*aFi*xd5OV&1<%tXO|6^0t#1tXijMvBHl$UPWGLlFplf$96T;MQ92Y1MP9z62qE0n6+w({mnQ&PkS}#l?i@pD2qAs z^+u__Dm)x9nBZ;B8kwN&?*re^lrrluXq~8}#0(cipapdoGXc z*WSvr+&ID2C3uPAv#=G#>lI;r#qhn!dxS1or31>j9vC=*MYj~lRC*RurAbtE%dQrT zm+RTo8!#1&r<5I zMpLp1^t+##X1UE>R}_gs#Xb!iNfT`0l{4~Hb(VBm8|d}j@xwU1&mT+DA0#KkdOmB) z9bZRilTgtcb7&a8C$1=b-*JL!iUnL=01LZ*K*-P)IBO4tRf(JW@C!V6N{QLsANrDa zU_8kkLpXF9m#*nu$ygrYE0vl(qTY<}G||jjbuu4Xy2c}HQrV294x+{L@85d$Jr@`VucAt zee#P~v#hd-{Vd_!47cQ0Jx;}w+3Kl6JUwZvy`tFjy|C}2+P`8q3budwdsiWAY^!tP z;VIe(WIq^!W1y88HcV8I=pZbRj_fA$@&WxW*m2};!6xtTUstHN-`~XAl|7*ueLkBr zvG8<0^4B^3*w=Ov$`u=z`uZyB@uoC|c(@8Up8&MKEH>%>wrJfSJT0-nQr$Y zE)&JSmDu*ECnV_vKDl&qX1hadg&VulWD?2meX9&J(l74vb1gRN^D{OSgjD7z#O#2a z9&-+z(H84n;goO|({HUbXLaZeZm?Lp{UZ~I5wpa{0`ioNwI` zO9t9rKDF*81N}Vj=csjEbkH`?b@MMz7tpQ-3IP2(K*lW}1fhd8PoF&MMSfQSQaCb_ z`W`~KK$bl@jokL_04mMJK%(T(gWSCar0Y3A!Y~A)gKXuYN5Izo2q+yS&PuthX-EhC zGDpq&KnE?B{3lW%6Zg_-;mGYdz?Al#rd=BPqjLz@O6)r1P%@Dc`7MwRx+P2PU8IAk zB-VeSJA4I#cUw4U&z(B`DGkXM2BGAG&%p!DCN>aSg*x6|0YsBap24V%V&Ul9fSJo5 z2f7n`P`2-Iq0bjgMxXvX>~C*aZep6mY-5=1PBHDlp?mq`rTKi*+){i?MaRGSYzHPk zs2nAF>|1+EezSc2B~g<+I8d0>;ruQ8plY9YDrBhY-agQsga`A%y>F5e!?B8q9B}h4 zUfo)810>8%n)NN?L|mKm7*%i!Ay~)init&@%!ZHH+Pt|Fl(v5Bef%@a z;D+JvGY(U4w~dg7OZCq$OMG&Y|HdMs#~5C~tN5@y%w}C;ps;>zJTuj9oOc;x<%f~# zGs?|zmgDrXTq8;u=9Jsk4;(fp+2Q4^;)H6PQ>Sn+zF z<~XTtA@f_D$PS0FVf7=Jbqgc;WWgwZeOB#b>oCsHvstQra-Yk}E;X~ft8D+ktnkeT zHO^smzg(f1hW%&OO@W4vXc6_A)rQKfdT4F&X1{di@b`w?+sKX%h|AWI)ZF1i#C@Q* zPO{+ZI1Nh8fGm6N#XyDt_3a1JC{_yGZ6lNp(g9$w>#Q+gYM1E-P}rBH8G(0L%S>rV zY8sUK@_$<^(Z%`@x$O>sWTDQ9KrwN#3i!TF6XZDtBZnOU^*>FF1~5B|5Z+c`mt;L^ z$2tcxmktt1r-QD6Nu;5#$F!IOnhlr(av1X$VPKmADjjrqz4h=C&_o>-)-$(j-MamW zk_XrqOTljJHn0hxA2*J=z$<_XxY9wtM^iNw_Ofe;9}l?NxBThtHQ;>aCNy~!Bj@vv#H4Az%h=k=tUOt+nZ!SC6?6 zMigY}I*pwy^Nt9PZ>Y@!`cPY=lp1r-Aa2!-Z&DRhHo>H!&ZER=|7^ntq^^q8_fsBq z`>)s37p4c$%FUv0@_I^#5Q;)B-oRQ#=pYyLC*+AcYvg!2m?*0c zG!?VUbt2C>0@i)>&3^`o{%LSWv%rQzCTVowQTwm3)W;OlymdR*&a{lQH)V)Bq^Ng5 zmC}db&@=fHb4#fj-vZ zUdUoD?I%Ebv`~=4LssfV$TFBT0;z62;c9(B2T}Z+{_tRUH8CpSF+V{M8EVjgAOZ04 zZ#~ow8n50y;Ft0bNPvgCm=4(E5ItbyBqW6ys2uO70LFL`LIOHi-y%Ei(guMj!;uEu zA&ihz0KIZw%-se&4SzBXIM2K+H5_kF2j#v7$XK8OW}ijq zSc4844xz{bf@1h-j@n$1gzSY-&jKJ`hyikE8%*Ixem+#$2O3sh5zS60r5s@$?rQ_A=meQd?_=aay-+Npo>cKKZb#Exc>I zQQ0!#3)0UV;ktv{;48?7-+|9?2d63 zplq|mF(*{rkdk!ZUz5E+wuKDk;sKko?@!?*#mp_VQaLhK2$P#!$G0d&vJ7k0R9;Hn z0`&YG9MZ8vDeV^ICDmdH0D?0=7awqrjz$uZeDI7N z(=mbahShGPZBye*)(1&}&+P{d?Hj#9#Hv(F`s5O8I+e_x4ka1+me$n6+E(4?te-wk zoo>|5q5xwGXTpXAuT#{>eES}hRL_OEsnZLSoqv?EImn4G6u~cj5vCLe(s36}re%x@ zCan-8NrlAw5IET zpxx`(?9F}&k+PK9mYi*G2kGY-wPQt?zKiV3C^G{VKR;dhVBPzHh?czqacrP{VMgFd zM1|0oztGvgmxumPB#0XF(Ofu$feK^Tl)}P$2qqogS3HwxQ@J&tFMDc>kYpqGW~|($ zTYjF=@xIWIfI-bT>T@qTW1X;0@egfg@dmV6M-SLI2T1{x;>Nx|pv~VPr1OBM+)m%*1bS}z zPBMY6f3fBdWVc9731F4;9i+ML94=B%gr)%LxUGf%!*JL&%8NT%vlA-XpS+fTwAt%0|>d}o9jT2a$Pqhf(u!lOXcviBUx|M zlhbvO=j=RlR4MTQaoF%CDPlJXnt;$C<}Juh9WStUHbZEa64@wH#DJmj&h#+8(Y^X6 zFG9SO0;5^V(@h$L*)Ch7Qt1ZB{iSaux>Z^lIj%X z`QFtGXxLs3#HfBOp4}8H%sPe@nos;VVZrF(`}p7B#PyB(CgL(0jq|aQg)lo zkO%88cmFI`{F5j8Q@Q?~r0`#qP!cf8B>>ZBUy(EC9ZSopx@6gxIBc{d=U5AyEv)t2 z#*gJMRB#EUqZ28BV8~l@)F4MLzwz@dhZh|ik}A;JvGW5=KYp;XQXFZ=mSm5wz~~&5 zd2N>cd&fZg^B4n@%-Na3_cN)xc5Aa+UKP2bG8x&*i_`F_r7#VhC%EUn z)zDA`JH;DI6{ds~A+#sMMBr#*Qy72axU{AUaftu=Wy-K7ulI6tdy631xS9E#rb`(d zsvF{P&YO45^kxO6i0{0TH(O}6_uMZ{nvs!PdnsOvtY7PFNp}Xm^&w5u!WH10;v?hjk?(2Nn- zcr3p*Zzjgf;Yy60Rnv-Cp%Y(e@1e-^5u_Y6Y(gF3*Ra~c)YMda;f|l<`s>vl^+iI) zEQ^Bq-Ck}Do&E*kr>eb2YX~mOF7^6IRbza2j-aJ;ZpxBfY*^v(_~?AOc06?89h9$gzGPnx=z}ze5P+ z$hBX9U;=og4H!qk9=!t$D-Z~WM*neTE=#Qj3XbmqyM_Y_rKjm287v)i?)yKl2MpAl z+-uz)qJvt(|7p*H69eEAxinik=-?|7le;Gm>;j=3u~IytN3S889nz$zgP;&@S zHs|!uWRbn`ti(F1EHYAkr31VHrsyDR1F)yo!PB|;-09rM04N{^=wVa9K5}DjV-q|` z1I&Mf=dtV{?Mv(i)ay7fHE`d%a_HIopuPkW=Ga>rYCPSAJz@&|;b#6Kw2&WCvl+;R z#6V#ZUVFIa^7>5(X`*%SXXMg2pg=QvN2$QsA%C8|+GJ0GRs{W%M!>VMcmiiF_Z0qQ<1y>MOO?PMB`ObAW{-nP zTX?u$6RCZ$U3H5#kx`p_Ics4vsmIgR&znzqj2%@-F1P{ve516l2|jqK`#xIotp}-V zyR-q35N|8f^lGqBcmkU`D2;O8(JyY%gfcpR9}T!MPKfof_CFNhEl2vOjYVWwKlh7; z*=de=$gUPICfJPFWQ)XD*a{^nTPNI|xh+(Hk^H`PTS`K*@8+3ssPJekFvQ`dIG*9D z3(Rrk#49Y~|ClRR&hQveD)?iH!(aRLU*%nGnUfgEBE#`rVC?ZbZ47)=DYI_Z*g|mfn4Ns33gh zQV*Z<0(a00FZLE7a8*^05DM=kK4~t$cRPQ{%TnE9wO2_F5Y)(>LqK{g3&yeAoI0+i6E3UXsM^fXd3|=Iw!UROc|^f)P{F{m8rZ z2Gml^fk)G(4Zw`XG;Y^uBT+lbGu_#vJD%TCZ9Fq6!b|m7@2rHrxnG=%X}4TTMtJg4 z9IEQcCPIn~ZPUI{PukBef_hl-&cz^9u<#BY(ol@18c~*kQnJt?$U9jx$Kt6z-V=o| zN5OcPGhv>k>*|_JN*a9nE3uZNKi)?f+7~Y=F@4CDG{P9jXSs;?u`k7?*e@lS=_}V6 z`tLv9^+}OZ_lZOErot@BnJj^M2g?zZnIc-npzNwDO06wq*a*{wdi?GUnQNh^-6Y~D zEwccqg?6W@3t~%chDz|Orc)kAgDAmxFLnGkPkekrIC?GJ4if#o!Moa!$l@p^z$0es zXl99{jL%cetOxyAB#}% zkp9$WrSDGGYW?DaH*#dMm*3AX@TLlxTA()HuMTF3FvVE3!m~uA+P9}Pf=wc6H>e6^ z^ebrFEDt#)3bzQ}4Dc@Pn8#kJ*kXmh@Z{?c2_jUF!7Xh~WxN(2#r$9n z65yew=kEWS(wuC`RfVcmm{bgC#cKq=Q?!Q{9cL@(kHm*NTd*XaagX!eg#0 zB~yii(EBH-ursSUU@~irOp7Sorv}PQ)oG^als&NR4a7V^cb=$=|B!8BK(A(amCs_z zDf##bD6Y5gdzxMRm+F|cjjOfQjcGja3?}*Xn)m|Kk>`zJ;~uMp$-V%bB+=-w`&fS9 z1{>O}w99-E^(yV+Q_WGNU5v2}yjx-rWs!X-(QVw@oLr-xxT!(nG9+ z7gQPd-j!wc*q8Ij!0y-97xrL!%iJr!!k%+wWVc}Rfsvs#vZtNvQYvmg(XR-K_jqPE zV`sv|dimxpPOZWq(QnS zLM^5Oa+pJMFuQXu`HDuTBkBDq`r!!$fq^Bz?^nfmN)R&B*)=ExY- ztLWMUdAJ8t*=oog%+1M8$%>-a&n+bmTF>sua#E$p-W$COtTADXly4+E+Gi*j!H4zW zb!*FwYh@4clIpUX?14rG(;^Ef8U1a7%s|OI-(f8b{3Ss8wvm_b#Gq8B)H&tt58J6V z`AY_H*m$JZ4IDwVc%pc0CfLlt;8JF+Pps!VsY37HQlE%CsHl~RQty}iEN+qV%;wuY zf)$)9V(_e#mqW@TJXPH{B{Vg6i8DF3?ZCCl*;)N!~If8BrOl{J8I0@v2`BX?mJ)I& zs&C8uW6emw0MdZ;VO$o+xXJrt1S7t*ovN*`Tbg>eTRC21o^0A^u*RHW8lPE}lJay0 zTjLaiF^LwlXW8%=?P_?NVFAOLT*xuX7-=ft5+9DP!5C-6CdWxyM9RSQyJ-+|bw?{V zg7x?o)%>I&>$BP{{}q?6El zkYa!&yxW;Gb7tnpcYWu)*ZVW&+0Rb)UTd#)uY2835-N<*QP8DV3K_pX<+{EXqyMl4VjL7yx-cQj>g10pz^J?=B_Tzqq z@^ascuDAw6u zYj?6szUo9j_5v&1A$8{b*&!<9_`Z&2^pZ{+k?wxgjL;?1Yv@%&_ff{k}#n)I_2 z9#n;cw8*E-khClD6cy0nA8O_5CRICn>dYazqZScPyPo(ki?dK!7XEts!fow!G*z9A zV&K`A>TWsmT!qupQ#5l`(%i0CvH9`N&=+du`VG87DwHDqhQ(le5!D;QYaT3+efy`d zvKWdr(BRmw$K~i@0I7sB3<-~XI0n|AD^Ajf58M0Q4{*OIKfj==V{^l9)bw;u*{W-} z1#$E0!b=7_FpLl4YfO(XJf>&utw1}Zu@X!=wD+VB4`I%q zSH>hi&^xYys7_ ztz6nkc8M7%@);eCnp|0PsT5>f@dOE>+pUy!+F?|BZ+*iS1l?mj``w#%0%KX4uG;K@Z%qc z(VtNMcbK0w8i|p3E;3~8_*f~gVGRVl>1WG>RCq`w^~Rm=tUj1YRcja$wjaK5j+dcIm%>7FEqI# zpA1&l7bSJDm(MZ~6x+PjtD+7x8m5&~ytm<8d@-lj5MdHfs$LRRZg0}_D^&8oScwCX z>Jt&yZij{jTz!6rEdYa(g(e#d4WYWHZ^|ljP^jtK3(C5;CAVM_zXCAR*e}Lh6Z?%%N%+s6T(kph{Q|xg#+Cr87Y17pxgjZl zC$vIm6&`FsE>hOrpMsa$xCPGke_je|RsR@Tf1?>xME)Et`*jazH@K`s006WAiv`61 z*v1jc*Gh&b*S7s{(}2!O`TpfT00;&s1A+TYk)7uUV9_`ge0JIJUk3ZjtS_#=1M*7Z zEYuRZcKCpT2i`fMRba$3$OlabdQc_BYFQp&%{KJ(^@MQc{med=JJ&AmW*x0l`=+@o zB1tg4Q0)cQlL1)TInA~huj#5o%-mO=tMiWax`&1c{I@&qlf%3{)VVQ;v3# z?)ap5ex>~@qcjdShND8~7-i`ATDnHRH(XEdi%a>Z%AWGroV>y+9xPviDS*%v8}puV zg_z&!kIOEae`a4*e9U%YBpFxZnN;2{Q(To!M#<7nUH+b-K+hU8 zJ1Tv4{<1ZV5cwyyhbA;iD^biZ23edqF7YK~gyg#MbRrjoc%Rd@ATf&q_^%TrjuR*y{l>eDp*4&Uyhw8ng8ss zN9#mMzm@y*Sc$uW}__NN#`U|FV*iZ88h?*UJ_|o(gBw%8G7+uz~^OKD`WW3$8o4xaK5^(xutTh zeoFO$VBDMFypTE;p;{eLkYcG@+jQ4Mo}SN|bJEJu>HBhB{^2Y!{kk+B)KH7? zf1igRKFhy{vkgDq0Q^Z@&_AXPT>_F?+#t$&SQ~782>Q1vPqyGo6o~#yW@~{R3f${AJ z)^L`{u7J^%nLV7Hl&?nL|JawnK>j{B7|vojZ(}>K;V@u+ioC4rY*pZ471>b~Kz6W! zF-|k3IlTB~(M0@m1+1%LmivhEbre{6b1hc@b`bv=90HPyfHWaQV1;Od_b(5#8=Rr+ zMc)6#tUtf&SqVg36ii~DYw|9U;=e36!hc_1^UC5Q9<3uI+l{wZCZ>dyI_hNSuiJS& zue&9g@Lb%rM3@ixsmZSCxkAjoVt+?r8QHs0VP+sfZo6(__G;vpnOk>bWIE9_+nAF_ zKPX^2qDPOGL{7NQ_qyJ(>=eKI?@;AGVgXOU4y$5D-2$bAWsc=@(q}OznnZ`x-%8>k zN*~I<9*d^#r;gYudx3$}> zqKow(ZJ|5_m!_RT%7LMU&}&&7Hsbo z%I!v?qMutJFU#%t`Qovkcuz!J97A35CaKd%GMDZGc6E)B@>51u*Bj3s-Hz;IhbdeE)sfqX(7SSh>IoU~Yc>U?3xXD-Cqgks> zi~C*Z12l3&>sPYgxw|%OrKo7fo*22Ahr4jUVt@W)1^yG{!jD?VyMC~C3vzP`wi4Kd zSMUSD#;M!F+&VvF&h-}F4nKYFRbSZLxjb(CFL%4vPp}v+GPnd2WwCdcxTB&(0&!@Rpr4_Ltk~=@3jl-xr zogvNmoqL;7r*6Y-3U>~wo>YH@tV2F~$WIRXJ?e|Zp`_mceC8( zWFJo29YJYLGoP8$v_=PVbEH>c=y()7g$@(IbwP!61=(=gnF?P|;5;oG3B7yP4gns; zBRB53&rZq9dd$WKL8d1ll*$K;^favBaevLP$jiRWq^?zLbPW+bHJF4@cbtNs;>Ca$ z+FExNs>*|Soq8j73sh6G#*Z1)vybZUw?5rf=R~xr^}2DaQ^qWIH6O)^OcVVS=zQ?Z zdqHdc{>Hd^^6OjqUtZ`Htd$OAouW!x2;U`zuA%Pv)0HCR<}68V6AWTO<~wr-{O!eA zdl8C@eIIPizY}-DrI2mH@?D}>(O{0$P<&C|Q*BG<5d-^uHh$20%E1Z)W=ictk0^hq zw{GQK&kLC!7>evO%^$WPI}ETrx^jiUwF%VAcrz4*tIsA}UcT27C(%awDYJm>VTL$n z7&~T_sQvCoe&R5Dl1R8wAsj&Fy)Pt;mYTYBHJw|aUQ~5(!V_JPHHCVC2 zAd^b(H=bMfNb6Z}XQR^nX%e*s&C1s&{Lla3!|Y8rjk}o->x*#|JaAiBEzhn>j!#Ut zBggXCE?ukkXZu_$%4jH&3LT?Ng0fYG)GkM|464S_3$+9_e}bjS-6VIC^rDDoP9#fN zdizRF=c1R3Ep*+Pd)9v9q^pMOuPH6z>pevQv?=~#(|VYFof%O%0_9yyki_@96xl+aZer`P#oThmsL(}VxtkpwSh_8&fuQbxg;Ls>i^;ndz;vT{M2CI4$ z9o^`Exv@&#jbb)_MZ&IIfHOqofU_s<*KzU^C2ow6>bCha`{p0+?*G42Sv!<*Jo`mP z@6gS4Wt+V_93P+IZ<*YREvaI)5mXUd8ZStXW<5>vr zG^AS5bel<}SCo;ok)XsU$C{FSx7vT^h|-hH(5QQw(m;SS>qkUGqHW+-=0|3QE&Hku#n$u3JSXx*xHJEFTFEu z(5?d3l>>Z;_%pW^)IG`ig27g5!4XPy!H1y-mhO^dP3i-7DLSnNBNNGb6zQUH4a7c# zJV_y3*s{OeJPH5f)TXo1p}cp|(c=bvXkxEMnwk+i0uIkgrRp+Cb3L0{Ea9t5_Ceah zOz9CozQF2?W?2E;L(D~UPc42g#zrw@r5~ip{fMPQaZah19kZs-zqGa0cTTgvs7u5g zudhL)uM{>x3;B{Kt;$Ouva*Mj>nv*E)crOkS^~&iad(nzw5w58l5C`_0_p^9jFFf+ zm-FllWt*NLroGw$W0oD6wbx-MhL>f@R`#k-}ri6h0BG)eIFd? zoQ_3aoXqTs5v6z8Wbq<%G`mjAMw&a@gt^wlSa%plqA-^#UCc5V<9oZaM2a#Eb>oZ< zd1cb~AkW6A)4Xh#o(`N+*SkMql1Q#WhTM;OdepEUE!0f!shvNsxTl)rCs18-7^_`Y zS;5?S-cF=l{uWbbA}wv^p?J!J5!T4NaBXGp443lm!jvKE+fW&gqDr(`=Ojj6ap6sz zwWC*|U!1m`tmjdYMv?L0Ea}iNS+ktI40M9;=)NK{DkQ5d1(~Fgh2V2V?c-ZRs9>&| zxPL|@3ctG-_C^}f{LRB8ab)1kfYa$p$<8^qXxhIlb5SLaXJ{C+*5i1`O|vlb*Zb$G zN;T(swSK&CoL5iJLUgC)oVEtN%`JYc^g#VR(+Gtm1dVQ8?t5I?E;G$6)&d(t&gf2) zv+*JE)R%rz`&!C9QS0Cp!(3SQNfY^0?XBQ(txRt=TXNvX_eet>(>Y&`ChqQkfx;Xq zKB(JiRuW+Fvld!sa8@j-`f1JwJX?w)*93sr^(iXsweK@A|(8-uj)(iJ@3@E#g+#zrUDI7igSCAwPb_KC7P&YDR%OK>Y zdmFD(xoJSd6o?GG*@EmF=BB{bLBzi>17qyQcbS4T${4f?1Z_+La>FpY0#H6ofX#s@ z!<5J-;lKGpmv+ErK<3c`a)4*G9FW_Q0Z$eGEdyWkehZR8<(g48%L%N<#8q?r1W2oD z=-e_uTmYXb!Zv9zIvTui2AZnFoI(A+%_(m)IssG) ze7~y{oTL+gO2M_MHzQZ16^Fdc!gR(L#>ntbSBsmm-8TAg1LL4ULj*kWON2(uY3`!z zWO4*m)TKcIp%z`0SsYh;q_`^QUW0xoerIvgj!|OmpmJj$-;(jOv3LN$RKQ25$NZ4^ zLqP4T>Kt>N-3JS)5M?AuxvfhCSuqz1-FzKXyAymfrfiCos+zIt8IcZ#xS^?NNg|D&+y`o!3|MSU^H? z6<`@n54Io=%t2xeV6Ds)!2ct1g%$sgRs+h+0ZXF{Ah$*CLJIeUGY17-2Z=kg6rYP< zl?MqPmAlIOx0!)`bXb|2NnqFDS+=kq*hVbK^TC#S7%zse-Q16fMfaB})*No$8;rhG zcR!lv_5-_3i)+Tm0#2qi5OQ^QEP4baWsXJJRsUJ}3>pFAALPF4@NT4TD zHKPG-XmrRVPFykcoiDlE?~eA_2t->-#w^Wp*09tp%s%01-=KAG?m7x}DPv#e_RNfJ zs0$wlw z(*FSn75pZ|Qd)fJ&*X_c$hw1fgFY6cGz{~)5DU8E>ZpfUwQJ8=wLp5>PN9IcIAXjn z=UWi>q~BZj2L>UNm%4qXi}&yEw>our@Ty9f_D96=(wy!WN`|4>t_h?RiH! z07Yb7{ym(=ES^eWXFBe!3%9zZq8Ypq|j@Y{wz`&Py~93+&k^?1Zb9zJ1In zCV=5<)>(K4aZbx&X_B5A>dSeYhRL7NOkVGh!~f82{t~;Wp7b(b6B(|m(q&L(-Fpvi zKM-f(Xy<$5e3Ce-bw8*Wc;a;UX`BbSj`@E?od4u*Zhy4>!p^8i8j?)Nak$F1#Eq)N zFjQ{zOHN(n)fIY*o*7I0VZ`Ze#C{u3HXC3lM+scj3wzeO6Ttey)^~%I4L8}wDNYBh zVk-AyDUgRVhv3~Yl;vgccX_~AT@dKsE`=)LzElbIRLSC~RkBR|N7=CVSW~hek*<*9 zlpdH#HCWCDG?OPE?PPkSHa!ZxNSJm`=*4zltwjZN>3G?uTCY1lWS{^{ILuwwBP(v5*Hu0qg4QbT|L z(OH@C;);TbVityhna`b9xal`pQdW|z63;{Ti4`@K4DX`x`A}XMv%%Td@iNB!G$2sq zpjihZ*rQ2&rn%L`>S z+AI;TB~v>eSlBHZ`ce$E>@+zkT$pQ>6equUaW~tU z1)45u?F&dXlpv^x3Vu7_ir>dcCDb4G%NmKo>Qg##r`-0`iy6I7_1is*>R4Q9RkUEH zidhC*Y`7z@bbJ2N{-#}lpR|)K%>$=JziCNxv%$U(DXb{&(#rC&EJft#%PuHCMTs=H zDyU6Nq?0hZKhivX;tQ8M8VKQdcW7|AaJ47vG^H*ps6$&Y3rEHAG8KcX4o@YY?ih*K ztbNagabl{3vG8^kf*{&9!CJNX10mE-zMHM#p>U#D*s`RBC_oF-Yp+?aYF>ggTmpKe{R80v8cJAczZ zcdk&^*wJ|48&R$DK?ma74bRE$Om zAUy~rEURprHvhio|MliB$UX5N!k@LA8y)k*=HvUqdtc#@GU62*Vvxy<-@Q;vchL=K zUA%4%eV7lUdAky9n(LHy-&S6m{NgT^KsWs^d8c=@l_d1_s*nJA{p++lq~tbk%~zfr zkNMlNLkyW8UVeU21X?{^0g-9SP%0h+IGKQuIh{*>rHs{N1$ylXk#}Y)J|UGXRXxVg zjm6Nd^l<)b+BfI{2Q@eK-!X?nP6h}O>_49=N-{=v%&P>VJXU7iwM?oCp7+YofQE0VCSV2%i*xQTtyR~UvLUVd9N5*P6Pbd&YoS*oBbW3PQ*aX`#R9Z z;X>!R#U;x+1IFTuEy#9VUhLH+#59yiARcB-(m}I=bvFFXuBt2TB%11=ICo;y;F+qS zsr#ICqygHgOI2y7Y51k(wpTQ=pi7&nWo2}HPZeRK^WD8PBadYgkCx5XF!X!3;*z{Y z=d1oqZ@R*pC1nFvdmxkg@c~}A$nazZEQM`c%1jPYBNVkBq<6_jymVsp6ljiDC9S!8 zqu-;i<;CuNV%6nv#cK6v{cG>$xTx1(-{a69>?=#As>Z)yf4*lnPSCV4E0{32p;%dr z573cH{pI&c(+*tcwfF$PG&=U9)o#h-A0j%#e4J``3>*&lyvOn1n+&+wox(y(H?Dkc z`S7xAMYc9YW=6#M$?A6xD#A-IlHdGLf|?UQJP%0XJQ91j>qj8NEIrU^2Ttu%l+8d0 zlo<|4VT)V?{MrTtMnAd*Aq}%ZC^)|Y0fOG(Ni&pn5I*)#K>(iT$tB{yqPHOLKW_tz zIm`i_l4I|;AfCVM;PO#60zuKklRJE24_gAivIz=B! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/requirements.txt b/PyTorch/build-in/Classification/Sequencer2D/sequencer/requirements.txt deleted file mode 100644 index 76482464f..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/requirements.txt +++ /dev/null @@ -1,66 +0,0 @@ -git+https://github.com/BorealisAI/advertorch.git@cc8ccf971fbe28d0086d56defca2255d8f5d7b4e -attrs==21.4.0 -boto3==1.21.3 -botocore==1.24.3 -brotlipy==0.7.0 -certifi==2021.10.8 -clearml==1.1.6 -click==8.0.4 -conda==4.10.3 -conda-build==3.21.5 -cycler==0.11.0 -deepspeed==0.4.5 -dnspython==2.1.0 -docker-pycreds==0.4.0 -fonttools==4.32.0 -furl==2.1.3 -future==0.18.2 -fvcore==0.1.5.post20220212 -gitdb==4.0.9 -GitPython==3.1.26 -importlib-metadata==4.11.1 -importlib-resources==5.4.0 -iopath==0.1.9 -jmespath==0.10.0 -jsonschema==4.4.0 -kiwisolver==1.4.2 -matplotlib==3.5.1 -mkl-fft==1.3.1 -mkl-service==2.4.0 -ninja==1.10.2.3 -olefile==0.46 -orderedmultidict==1.0.1 -packaging==21.3 -pathlib2==2.3.7.post1 -pathtools==0.1.2 -Pillow==8.4.0 -pkginfo==1.7.1 -portalocker==2.4.0 -promise==2.3 -protobuf==3.19.4 -pycosat==0.6.3 -PyJWT==2.1.0 -pyparsing==3.0.7 -pyrsistent==0.18.1 -python-dateutil==2.8.2 -python-etcd==0.4.5 -pytz==2021.3 -PyYAML==5.4.1 -s3transfer==0.5.1 -scipy==1.7.3 -sentry-sdk==1.5.5 -shortuuid==1.0.8 -smmap==5.0.0 -tabulate==0.8.9 -tensorboardX==1.8 -termcolor==1.1.0 -timm==0.5.4 -torch==1.10.0 -torchelastic==0.2.0 -torchtext==0.11.0 -torchvision==0.11.0 -triton==1.1.1 -wandb==0.12.10 -yacs==0.1.8 -yaspin==2.1.0 -zipp==3.7.0 \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/train.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/train.py deleted file mode 100755 index a955bf15c..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/train.py +++ /dev/null @@ -1,870 +0,0 @@ -#!/usr/bin/env python3 -""" ImageNet Training Script - -This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet -training results with some of the latest networks and training techniques. It favours canonical PyTorch -and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed -and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit. - -This script was started from an early version of the PyTorch ImageNet example -(https://github.com/pytorch/examples/tree/master/imagenet) - -NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples -(https://github.com/NVIDIA/apex/tree/master/examples/imagenet) - -Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) -""" - -import argparse -import time -import yaml -import os -import logging -from collections import OrderedDict - -from contextlib import suppress -from datetime import datetime - -import torch -import torch.nn as nn -import torchvision.utils -from torch.nn.parallel import DistributedDataParallel as NativeDDP - -from timm.data import create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset -from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, \ - convert_splitbn_model, model_parameters -from timm.utils import * -from timm.loss import * -from timm.optim import create_optimizer_v2, optimizer_kwargs -from timm.scheduler import create_scheduler -from timm.utils import ApexScaler, NativeScaler - -import models -import utils.timm.summary as sm -import utils.timm.checkpoint_saver as cs -from utils.timm.dataset_factory import create_dataset - -try: - from apex import amp - from apex.parallel import DistributedDataParallel as ApexDDP - from apex.parallel import convert_syncbn_model - - has_apex = True -except ImportError: - has_apex = False - -has_native_amp = False -try: - if getattr(torch.cuda.amp, 'autocast') is not None: - has_native_amp = True -except AttributeError: - pass - -try: - import wandb - - has_wandb = True -except ImportError: - has_wandb = False - -try: - import clearml - - has_clearml = True -except ImportError: - has_clearml = False - -torch.backends.cudnn.benchmark = True -_logger = logging.getLogger('train') - -# The first arg parser parses out only the --config argument, this argument is used to -# load a yaml file containing key-values that override the defaults for the main parser below -config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) -parser.add_argument('-c', '--config', default='', type=str, metavar='FILE', - help='YAML config file specifying default arguments') - -parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') - -# Dataset parameters -parser.add_argument('data_dir', metavar='DIR', - help='path to dataset') -parser.add_argument('--dataset', '-d', metavar='NAME', default='', - help='dataset type (default: ImageFolder/ImageTar if empty)') -parser.add_argument('--train-split', metavar='NAME', default='train', - help='dataset train split (default: train)') -parser.add_argument('--val-split', metavar='NAME', default='validation', - help='dataset validation split (default: validation)') -parser.add_argument('--dataset-download', action='store_true', default=False, - help='Allow download of dataset for torch/ and tfds/ datasets that support it.') -parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', - help='path to class to idx mapping file (default: "")') - -# Model parameters -parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL', - help='Name of model to train (default: "resnet50"') -parser.add_argument('--pretrained', action='store_true', default=False, - help='Start with pretrained version of specified network (if avail)') -parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', - help='Initialize model from this checkpoint (default: none)') -parser.add_argument('--resume', default='', type=str, metavar='PATH', - help='Resume full model and optimizer state from checkpoint (default: none)') -parser.add_argument('--no-resume-opt', action='store_true', default=False, - help='prevent resume of optimizer state when resuming model') -parser.add_argument('--num-classes', type=int, default=None, metavar='N', - help='number of label classes (Model default if None)') -parser.add_argument('--gp', default=None, type=str, metavar='POOL', - help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') -parser.add_argument('--img-size', type=int, default=None, metavar='N', - help='Image patch size (default: None => model default)') -parser.add_argument('--input-size', default=None, nargs=3, type=int, - metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') -parser.add_argument('--crop-pct', default=None, type=float, - metavar='N', help='Input image center crop percent (for validation only)') -parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', - help='Override mean pixel value of dataset') -parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', - help='Override std deviation of of dataset') -parser.add_argument('--interpolation', default='', type=str, metavar='NAME', - help='Image resize interpolation type (overrides model)') -parser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N', - help='input batch size for training (default: 128)') -parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N', - help='validation batch size override (default: None)') - -# Optimizer parameters -parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', - help='Optimizer (default: "sgd"') -parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', - help='Optimizer Epsilon (default: None, use opt default)') -parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', - help='Optimizer Betas (default: None, use opt default)') -parser.add_argument('--momentum', type=float, default=0.9, metavar='M', - help='Optimizer momentum (default: 0.9)') -parser.add_argument('--weight-decay', type=float, default=2e-5, - help='weight decay (default: 2e-5)') -parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', - help='Clip gradient norm (default: None, no clipping)') -parser.add_argument('--clip-mode', type=str, default='norm', - help='Gradient clipping mode. One of ("norm", "value", "agc")') - -# Learning rate schedule parameters -parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', - help='LR scheduler (default: "step"') -parser.add_argument('--lr', type=float, default=0.05, metavar='LR', - help='learning rate (default: 0.05)') -parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', - help='learning rate noise on/off epoch percentages') -parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', - help='learning rate noise limit percent (default: 0.67)') -parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', - help='learning rate noise std-dev (default: 1.0)') -parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT', - help='learning rate cycle len multiplier (default: 1.0)') -parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT', - help='amount to decay each learning rate cycle (default: 0.5)') -parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N', - help='learning rate cycle limit, cycles enabled if > 1') -parser.add_argument('--lr-k-decay', type=float, default=1.0, - help='learning rate k-decay for cosine/poly (default: 1.0)') -parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR', - help='warmup learning rate (default: 0.0001)') -parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR', - help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') -parser.add_argument('--epochs', type=int, default=300, metavar='N', - help='number of epochs to train (default: 300)') -parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N', - help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).') -parser.add_argument('--start-epoch', default=None, type=int, metavar='N', - help='manual epoch number (useful on restarts)') -parser.add_argument('--decay-epochs', type=float, default=100, metavar='N', - help='epoch interval to decay LR') -parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N', - help='epochs to warmup LR, if scheduler supports') -parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', - help='epochs to cooldown LR at min_lr, after cyclic schedule ends') -parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', - help='patience epochs for Plateau LR scheduler (default: 10') -parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', - help='LR decay rate (default: 0.1)') - -# Augmentation & regularization parameters -parser.add_argument('--no-aug', action='store_true', default=False, - help='Disable all training augmentation, override other train aug args') -parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT', - help='Random resize scale (default: 0.08 1.0)') -parser.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO', - help='Random resize aspect ratio (default: 0.75 1.33)') -parser.add_argument('--hflip', type=float, default=0.5, - help='Horizontal flip training aug probability') -parser.add_argument('--vflip', type=float, default=0., - help='Vertical flip training aug probability') -parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', - help='Color jitter factor (default: 0.4)') -parser.add_argument('--aa', type=str, default=None, metavar='NAME', - help='Use AutoAugment policy. "v0" or "original". (default: None)'), -parser.add_argument('--aug-repeats', type=int, default=0, - help='Number of augmentation repetitions (distributed training only) (default: 0)') -parser.add_argument('--aug-splits', type=int, default=0, - help='Number of augmentation splits (default: 0, valid: 0 or >=2)') -parser.add_argument('--jsd-loss', action='store_true', default=False, - help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.') -parser.add_argument('--bce-loss', action='store_true', default=False, - help='Enable BCE loss w/ Mixup/CutMix use.') -parser.add_argument('--bce-target-thresh', type=float, default=None, - help='Threshold for binarizing softened BCE targets (default: None, disabled)') -parser.add_argument('--reprob', type=float, default=0., metavar='PCT', - help='Random erase prob (default: 0.)') -parser.add_argument('--remode', type=str, default='pixel', - help='Random erase mode (default: "pixel")') -parser.add_argument('--recount', type=int, default=1, - help='Random erase count (default: 1)') -parser.add_argument('--resplit', action='store_true', default=False, - help='Do not random erase first (clean) augmentation split') -parser.add_argument('--mixup', type=float, default=0.0, - help='mixup alpha, mixup enabled if > 0. (default: 0.)') -parser.add_argument('--cutmix', type=float, default=0.0, - help='cutmix alpha, cutmix enabled if > 0. (default: 0.)') -parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, - help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') -parser.add_argument('--mixup-prob', type=float, default=1.0, - help='Probability of performing mixup or cutmix when either/both is enabled') -parser.add_argument('--mixup-switch-prob', type=float, default=0.5, - help='Probability of switching to cutmix when both mixup and cutmix enabled') -parser.add_argument('--mixup-mode', type=str, default='batch', - help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') -parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', - help='Turn off mixup after this epoch, disabled if 0 (default: 0)') -parser.add_argument('--smoothing', type=float, default=0.1, - help='Label smoothing (default: 0.1)') -parser.add_argument('--train-interpolation', type=str, default='random', - help='Training interpolation (random, bilinear, bicubic default: "random")') -parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', - help='Dropout rate (default: 0.)') -parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT', - help='Drop connect rate, DEPRECATED, use drop-path (default: None)') -parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', - help='Drop path rate (default: None)') -parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', - help='Drop block rate (default: None)') - -# Batch norm parameters (only works with gen_efficientnet based models currently) -parser.add_argument('--bn-momentum', type=float, default=None, - help='BatchNorm momentum override (if not None)') -parser.add_argument('--bn-eps', type=float, default=None, - help='BatchNorm epsilon override (if not None)') -parser.add_argument('--sync-bn', action='store_true', - help='Enable NVIDIA Apex or Torch synchronized BatchNorm.') -parser.add_argument('--dist-bn', type=str, default='reduce', - help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")') -parser.add_argument('--split-bn', action='store_true', - help='Enable separate BN layers per augmentation split.') - -# Model Exponential Moving Average -parser.add_argument('--model-ema', action='store_true', default=False, - help='Enable tracking moving average of model weights') -parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, - help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') -parser.add_argument('--model-ema-decay', type=float, default=0.9998, - help='decay factor for model weights moving average (default: 0.9998)') - -# Misc -parser.add_argument('--seed', type=int, default=42, metavar='S', - help='random seed (default: 42)') -parser.add_argument('--worker-seeding', type=str, default='all', - help='worker seed mode (default: all)') -parser.add_argument('--log-interval', type=int, default=50, metavar='N', - help='how many batches to wait before logging training status') -parser.add_argument('--recovery-interval', type=int, default=0, metavar='N', - help='how many batches to wait before writing recovery checkpoint') -parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N', - help='number of checkpoints to keep (default: 10)') -parser.add_argument('-j', '--workers', type=int, default=4, metavar='N', - help='how many training processes to use (default: 4)') -parser.add_argument('--save-images', action='store_true', default=False, - help='save images of input bathes every log interval for debugging') -parser.add_argument('--amp', action='store_true', default=False, - help='use NVIDIA Apex AMP or Native AMP for mixed precision training') -parser.add_argument('--apex-amp', action='store_true', default=False, - help='Use NVIDIA Apex AMP mixed precision') -parser.add_argument('--native-amp', action='store_true', default=False, - help='Use Native Torch AMP mixed precision') -parser.add_argument('--no-ddp-bb', action='store_true', default=False, - help='Force broadcast buffers for native DDP to off.') -parser.add_argument('--channels-last', action='store_true', default=False, - help='Use channels_last memory layout') -parser.add_argument('--pin-mem', action='store_true', default=False, - help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') -parser.add_argument('--no-prefetcher', action='store_true', default=False, - help='disable fast prefetcher') -parser.add_argument('--output', default='', type=str, metavar='PATH', - help='path to output folder (default: none, current dir)') -parser.add_argument('--experiment', default='', type=str, metavar='NAME', - help='name of train experiment, name of sub-folder for output') -parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC', - help='Best metric (default: "top1"') -parser.add_argument('--tta', type=int, default=0, metavar='N', - help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)') -parser.add_argument("--local_rank", default=0, type=int) -parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False, - help='use the multi-epochs-loader to save time at the beginning of every epoch') -parser.add_argument('--torchscript', dest='torchscript', action='store_true', - help='convert model torchscript for inference') -parser.add_argument('--fuser', default='', type=str, - help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')") -parser.add_argument('--log-wandb', action='store_true', default=False, - help='log training and validation metrics to wandb') -parser.add_argument('--log-clearml', action='store_true', default=False, - help='log training, validation metrics, and weights to clearml') -parser.add_argument('--task-name', default='', type=str, metavar='NAME', - help='name of train task') -parser.add_argument('--output-uri', default='', type=str, metavar='NAME', - help='uri to save weights of model') -parser.add_argument('--log-s3', action='store_true', default=False, - help='weights to s3') - - -def _parse_args(): - # Do we have a config file to parse? - args_config, remaining = config_parser.parse_known_args() - if args_config.config: - with open(args_config.config, 'r') as f: - cfg = yaml.safe_load(f) - parser.set_defaults(**cfg) - - # The main arg parser parses the rest of the args, the usual - # defaults will have been overridden if config file specified. - args = parser.parse_args(remaining) - - # Cache the args as a text string to save them in the output dir later - args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) - return args, args_text - - -def main(): - setup_default_logging() - args, args_text = _parse_args() - - if args.local_rank == 0: - if args.log_wandb: - if has_wandb: - wandb.init(project=args.experiment, config=args) - else: - _logger.warning("You've requested to log metrics to wandb but package not found. " - "Metrics not being logged to wandb, try `pip install wandb`") - if args.log_clearml: - if has_clearml: - task = clearml.Task.init(project_name=args.experiment, - task_name=args.task_name, - output_uri=args.output_uri) - else: - _logger.warning("You've requested to log metrics to clearml but package not found. " - "Metrics not being logged to clearml, try `pip install clearml`") - - args.prefetcher = not args.no_prefetcher - args.distributed = False - if 'WORLD_SIZE' in os.environ: - args.distributed = int(os.environ['WORLD_SIZE']) > 1 - args.device = 'cuda:0' - args.world_size = 1 - args.rank = 0 # global rank - if args.distributed: - args.device = 'cuda:%d' % args.local_rank - torch.cuda.set_device(args.local_rank) - torch.distributed.init_process_group(backend='nccl', init_method='env://') - args.world_size = torch.distributed.get_world_size() - args.rank = torch.distributed.get_rank() - _logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' - % (args.rank, args.world_size)) - else: - _logger.info('Training with a single process on 1 GPUs.') - assert args.rank >= 0 - - # resolve AMP arguments based on PyTorch / Apex availability - use_amp = None - if args.amp: - # `--amp` chooses native amp before apex (APEX ver not actively maintained) - if has_native_amp: - args.native_amp = True - elif has_apex: - args.apex_amp = True - if args.apex_amp and has_apex: - use_amp = 'apex' - elif args.native_amp and has_native_amp: - use_amp = 'native' - elif args.apex_amp or args.native_amp: - _logger.warning("Neither APEX or native Torch AMP is available, using float32. " - "Install NVIDA apex or upgrade to PyTorch 1.6") - - random_seed(args.seed, args.rank) - - if args.fuser: - set_jit_fuser(args.fuser) - - model = create_model( - args.model, - pretrained=args.pretrained, - num_classes=args.num_classes, - drop_rate=args.drop, - drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path - drop_path_rate=args.drop_path, - drop_block_rate=args.drop_block, - global_pool=args.gp, - bn_momentum=args.bn_momentum, - bn_eps=args.bn_eps, - scriptable=args.torchscript, - checkpoint_path=args.initial_checkpoint) - if args.num_classes is None: - assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' - args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly - - if args.local_rank == 0: - _logger.info( - f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}') - - data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) - - # setup augmentation batch splits for contrastive loss or split bn - num_aug_splits = 0 - if args.aug_splits > 0: - assert args.aug_splits > 1, 'A split of 1 makes no sense' - num_aug_splits = args.aug_splits - - # enable split bn (separate bn stats per batch-portion) - if args.split_bn: - assert num_aug_splits > 1 or args.resplit - model = convert_splitbn_model(model, max(num_aug_splits, 2)) - - # move model to GPU, enable channels last layout if set - model.cuda() - if args.channels_last: - model = model.to(memory_format=torch.channels_last) - - # setup synchronized BatchNorm for distributed training - if args.distributed and args.sync_bn: - assert not args.split_bn - if has_apex and use_amp == 'apex': - # Apex SyncBN preferred unless native amp is activated - model = convert_syncbn_model(model) - else: - model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) - if args.local_rank == 0: - _logger.info( - 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using ' - 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.') - - if args.torchscript: - assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model' - assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model' - model = torch.jit.script(model) - - optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args)) - - # setup automatic mixed-precision (AMP) loss scaling and op casting - amp_autocast = suppress # do nothing - loss_scaler = None - if use_amp == 'apex': - model, optimizer = amp.initialize(model, optimizer, opt_level='O1') - loss_scaler = ApexScaler() - if args.local_rank == 0: - _logger.info('Using NVIDIA APEX AMP. Training in mixed precision.') - elif use_amp == 'native': - amp_autocast = torch.cuda.amp.autocast - loss_scaler = NativeScaler() - if args.local_rank == 0: - _logger.info('Using native Torch AMP. Training in mixed precision.') - else: - if args.local_rank == 0: - _logger.info('AMP not enabled. Training in float32.') - - # optionally resume from a checkpoint - resume_epoch = None - if args.resume: - resume_epoch = resume_checkpoint( - model, args.resume, - optimizer=None if args.no_resume_opt else optimizer, - loss_scaler=None if args.no_resume_opt else loss_scaler, - log_info=args.local_rank == 0) - - # setup exponential moving average of model weights, SWA could be used here too - model_ema = None - if args.model_ema: - # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper - model_ema = ModelEmaV2( - model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None) - if args.resume: - load_checkpoint(model_ema.module, args.resume, use_ema=True) - - # setup distributed training - if args.distributed: - if has_apex and use_amp == 'apex': - # Apex DDP preferred unless native amp is activated - if args.local_rank == 0: - _logger.info("Using NVIDIA APEX DistributedDataParallel.") - model = ApexDDP(model, delay_allreduce=True) - else: - if args.local_rank == 0: - _logger.info("Using native Torch DistributedDataParallel.") - model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb) - # NOTE: EMA model does not need to be wrapped by DDP - - # setup learning rate schedule and starting epoch - lr_scheduler, num_epochs = create_scheduler(args, optimizer) - start_epoch = 0 - if args.start_epoch is not None: - # a specified start_epoch will always override the resume epoch - start_epoch = args.start_epoch - elif resume_epoch is not None: - start_epoch = resume_epoch - if lr_scheduler is not None and start_epoch > 0: - lr_scheduler.step(start_epoch) - - if args.local_rank == 0: - _logger.info('Scheduled epochs: {}'.format(num_epochs)) - - # create the train and eval datasets - dataset_train = create_dataset( - args.dataset, root=args.data_dir, split=args.train_split, is_training=True, - class_map=args.class_map, - download=args.dataset_download, - batch_size=args.batch_size, - repeats=args.epoch_repeats) - dataset_eval = create_dataset( - args.dataset, root=args.data_dir, split=args.val_split, is_training=False, - class_map=args.class_map, - download=args.dataset_download, - batch_size=args.batch_size) - - # setup mixup / cutmix - collate_fn = None - mixup_fn = None - mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None - if mixup_active: - mixup_args = dict( - mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, - prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, - label_smoothing=args.smoothing, num_classes=args.num_classes) - if args.prefetcher: - assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup) - collate_fn = FastCollateMixup(**mixup_args) - else: - mixup_fn = Mixup(**mixup_args) - - # wrap dataset in AugMix helper - if num_aug_splits > 1: - dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits) - - # create data loaders w/ augmentation pipeiine - train_interpolation = args.train_interpolation - if args.no_aug or not train_interpolation: - train_interpolation = data_config['interpolation'] - loader_train = create_loader( - dataset_train, - input_size=data_config['input_size'], - batch_size=args.batch_size, - is_training=True, - use_prefetcher=args.prefetcher, - no_aug=args.no_aug, - re_prob=args.reprob, - re_mode=args.remode, - re_count=args.recount, - re_split=args.resplit, - scale=args.scale, - ratio=args.ratio, - hflip=args.hflip, - vflip=args.vflip, - color_jitter=args.color_jitter, - auto_augment=args.aa, - num_aug_repeats=args.aug_repeats, - num_aug_splits=num_aug_splits, - interpolation=train_interpolation, - mean=data_config['mean'], - std=data_config['std'], - num_workers=args.workers, - distributed=args.distributed, - collate_fn=collate_fn, - pin_memory=args.pin_mem, - use_multi_epochs_loader=args.use_multi_epochs_loader, - worker_seeding=args.worker_seeding, - ) - - loader_eval = create_loader( - dataset_eval, - input_size=data_config['input_size'], - batch_size=args.validation_batch_size or args.batch_size, - is_training=False, - use_prefetcher=args.prefetcher, - interpolation=data_config['interpolation'], - mean=data_config['mean'], - std=data_config['std'], - num_workers=args.workers, - distributed=args.distributed, - crop_pct=data_config['crop_pct'], - pin_memory=args.pin_mem, - ) - - # setup loss function - if args.jsd_loss: - assert num_aug_splits > 1 # JSD only valid with aug splits set - train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing) - elif mixup_active: - # smoothing is handled with mixup target transform which outputs sparse, soft targets - if args.bce_loss: - train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh) - else: - train_loss_fn = SoftTargetCrossEntropy() - elif args.smoothing: - if args.bce_loss: - train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh) - else: - train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing) - else: - train_loss_fn = nn.CrossEntropyLoss() - train_loss_fn = train_loss_fn.cuda() - validate_loss_fn = nn.CrossEntropyLoss().cuda() - - # setup checkpoint saver and eval metric tracking - eval_metric = args.eval_metric - best_metric = None - best_epoch = None - saver = None - output_dir = None - if args.rank == 0: - if args.experiment: - exp_name = args.experiment - else: - exp_name = '-'.join([ - datetime.now().strftime("%Y%m%d-%H%M%S"), - safe_model_name(args.model), - str(data_config['input_size'][-1]) - ]) - if args.task_name: - exp_name = os.path.join(exp_name, args.task_name) - output_dir = get_outdir(args.output if args.output else './output/train', exp_name) - decreasing = True if eval_metric == 'loss' else False - saver = cs.CheckpointSaver( - model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler, - checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist, - log_clearml=args.log_clearml, log_s3=args.log_s3 - ) - with open(os.path.join(output_dir, 'args.yaml'), 'w') as f: - f.write(args_text) - - try: - for epoch in range(start_epoch, num_epochs): - if args.distributed and hasattr(loader_train.sampler, 'set_epoch'): - loader_train.sampler.set_epoch(epoch) - - train_metrics = train_one_epoch( - epoch, model, loader_train, optimizer, train_loss_fn, args, - lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir, - amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn) - - if args.distributed and args.dist_bn in ('broadcast', 'reduce'): - if args.local_rank == 0: - _logger.info("Distributing BatchNorm running means and vars") - distribute_bn(model, args.world_size, args.dist_bn == 'reduce') - - eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast) - - if model_ema is not None and not args.model_ema_force_cpu: - if args.distributed and args.dist_bn in ('broadcast', 'reduce'): - distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce') - ema_eval_metrics = validate( - model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)') - eval_metrics = ema_eval_metrics - - if lr_scheduler is not None: - # step LR for next epoch - lr_scheduler.step(epoch + 1, eval_metrics[eval_metric]) - - if output_dir is not None: - sm.update_summary( - epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'), - write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb, log_clearml=args.log_clearml and has_clearml) - - if saver is not None: - # save proper checkpoint with eval metric - save_metric = eval_metrics[eval_metric] - best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric) - - except KeyboardInterrupt: - pass - if best_metric is not None: - _logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch)) - - -def train_one_epoch( - epoch, model, loader, optimizer, loss_fn, args, - lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, - loss_scaler=None, model_ema=None, mixup_fn=None): - if args.mixup_off_epoch and epoch >= args.mixup_off_epoch: - if args.prefetcher and loader.mixup_enabled: - loader.mixup_enabled = False - elif mixup_fn is not None: - mixup_fn.mixup_enabled = False - - second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order - batch_time_m = AverageMeter() - data_time_m = AverageMeter() - losses_m = AverageMeter() - - model.train() - - end = time.time() - last_idx = len(loader) - 1 - num_updates = epoch * len(loader) - for batch_idx, (input, target) in enumerate(loader): - last_batch = batch_idx == last_idx - data_time_m.update(time.time() - end) - if not args.prefetcher: - input, target = input.cuda(), target.cuda() - if mixup_fn is not None: - input, target = mixup_fn(input, target) - if args.channels_last: - input = input.contiguous(memory_format=torch.channels_last) - - with amp_autocast(): - output = model(input) - loss = loss_fn(output, target) - - if not args.distributed: - losses_m.update(loss.item(), input.size(0)) - - optimizer.zero_grad() - if loss_scaler is not None: - loss_scaler( - loss, optimizer, - clip_grad=args.clip_grad, clip_mode=args.clip_mode, - parameters=model_parameters(model, exclude_head='agc' in args.clip_mode), - create_graph=second_order) - else: - loss.backward(create_graph=second_order) - if args.clip_grad is not None: - dispatch_clip_grad( - model_parameters(model, exclude_head='agc' in args.clip_mode), - value=args.clip_grad, mode=args.clip_mode) - optimizer.step() - - if model_ema is not None: - model_ema.update(model) - - torch.cuda.synchronize() - num_updates += 1 - batch_time_m.update(time.time() - end) - if last_batch or batch_idx % args.log_interval == 0: - lrl = [param_group['lr'] for param_group in optimizer.param_groups] - lr = sum(lrl) / len(lrl) - - if args.distributed: - reduced_loss = reduce_tensor(loss.data, args.world_size) - losses_m.update(reduced_loss.item(), input.size(0)) - - if args.local_rank == 0: - _logger.info( - 'Train: {} [{:>4d}/{} ({:>3.0f}%)] ' - 'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) ' - 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' - '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' - 'LR: {lr:.3e} ' - 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( - epoch, - batch_idx, len(loader), - 100. * batch_idx / last_idx, - loss=losses_m, - batch_time=batch_time_m, - rate=input.size(0) * args.world_size / batch_time_m.val, - rate_avg=input.size(0) * args.world_size / batch_time_m.avg, - lr=lr, - data_time=data_time_m)) - if args.log_clearml: - clearml.Logger.current_logger().report_scalar("train_iter", "avg_loss", iteration=batch_idx + len(loader) * epoch, value=losses_m.avg) - - if args.save_images and output_dir: - torchvision.utils.save_image( - input, - os.path.join(output_dir, 'train-batch-%dPlease specify the project name..jpg' % batch_idx), - padding=0, - normalize=True) - - if saver is not None and args.recovery_interval and ( - last_batch or (batch_idx + 1) % args.recovery_interval == 0): - saver.save_recovery(epoch, batch_idx=batch_idx) - - if lr_scheduler is not None: - lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg) - - end = time.time() - # end for - - if hasattr(optimizer, 'sync_lookahead'): - optimizer.sync_lookahead() - - return OrderedDict([('loss', losses_m.avg)]) - - -def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''): - batch_time_m = AverageMeter() - losses_m = AverageMeter() - top1_m = AverageMeter() - top5_m = AverageMeter() - - model.eval() - - end = time.time() - last_idx = len(loader) - 1 - with torch.no_grad(): - for batch_idx, (input, target) in enumerate(loader): - last_batch = batch_idx == last_idx - if not args.prefetcher: - input = input.cuda() - target = target.cuda() - if args.channels_last: - input = input.contiguous(memory_format=torch.channels_last) - - with amp_autocast(): - output = model(input) - if isinstance(output, (tuple, list)): - output = output[0] - - # augmentation reduction - reduce_factor = args.tta - if reduce_factor > 1: - output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) - target = target[0:target.size(0):reduce_factor] - - loss = loss_fn(output, target) - acc1, acc5 = accuracy(output, target, topk=(1, 5)) - - if args.distributed: - reduced_loss = reduce_tensor(loss.data, args.world_size) - acc1 = reduce_tensor(acc1, args.world_size) - acc5 = reduce_tensor(acc5, args.world_size) - else: - reduced_loss = loss.data - - torch.cuda.synchronize() - - losses_m.update(reduced_loss.item(), input.size(0)) - top1_m.update(acc1.item(), output.size(0)) - top5_m.update(acc5.item(), output.size(0)) - - batch_time_m.update(time.time() - end) - end = time.time() - if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0): - log_name = 'Test' + log_suffix - _logger.info( - '{0}: [{1:>4d}/{2}] ' - 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' - 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' - 'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' - 'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( - log_name, batch_idx, last_idx, batch_time=batch_time_m, - loss=losses_m, top1=top1_m, top5=top5_m)) - - metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)]) - - return metrics - - -if __name__ == '__main__': - main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/__init__.py deleted file mode 100644 index f04000a2f..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .timm import * \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/helpers.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/helpers.py deleted file mode 100644 index 88f5dadbe..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/helpers.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2022. Yuki Tatsunami -# Licensed under the Apache License, Version 2.0 (the "License"); - -from itertools import repeat -import functools -import collections - -import torch -from torch import nn - - -def rsetattr(obj, attr, val): - pre, _, post = attr.rpartition('.') - return setattr(rgetattr(obj, pre) if pre else obj, post, val) - - -def rgetattr(obj, attr, *args): - def _getattr(obj, attr): - return getattr(obj, attr, *args) - - return functools.reduce(_getattr, [obj] + attr.split('.')) - - -# From PyTorch internals -def _ntuple(n): - def parse(x): - if isinstance(x, collections.abc.Iterable): - return x - return tuple(repeat(x, n)) - - return parse - - -to_1tuple = _ntuple(1) -to_2tuple = _ntuple(2) -to_3tuple = _ntuple(3) -to_4tuple = _ntuple(4) -to_ntuple = _ntuple - - -def train_rnn(model): - for m in model.children(): - if isinstance(m, (nn.LSTM, nn.GRU, nn.RNN)): - m.train() - else: - train_rnn(m) - - -def normalize_fn(tensor, mean, std): - mean = mean[None, :, None, None] - std = std[None, :, None, None] - return tensor.sub(mean).div(std) - - -class NormalizeByChannelMeanStd(nn.Module): - def __init__(self, mean, std): - super(NormalizeByChannelMeanStd, self).__init__() - if not isinstance(mean, torch.Tensor): - mean = torch.tensor(mean) - if not isinstance(std, torch.Tensor): - std = torch.tensor(std) - self.register_buffer("mean", mean) - self.register_buffer("std", std) - - def forward(self, tensor): - return normalize_fn(tensor, self.mean, self.std) - - def extra_repr(self): - return 'mean={}, std={}'.format(self.mean, self.std) - -class WithNone: - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_value, traceback): - pass diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/__init__.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/__init__.py deleted file mode 100644 index 269d3443c..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .checkpoint_saver import * -from .summary import * \ No newline at end of file diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/checkpoint_saver.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/checkpoint_saver.py deleted file mode 100644 index ea657b0a0..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/checkpoint_saver.py +++ /dev/null @@ -1,163 +0,0 @@ - -""" Checkpoint Saver - -Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. - -Hacked together by / Copyright 2020 Ross Wightman -""" - -import glob -import operator -import os -import logging - -import torch - -from timm.utils.model import unwrap_model, get_state_dict - - -_logger = logging.getLogger(__name__) - - -class CheckpointSaver: - def __init__( - self, - model, - optimizer, - args=None, - model_ema=None, - amp_scaler=None, - checkpoint_prefix='checkpoint', - recovery_prefix='recovery', - checkpoint_dir='', - recovery_dir='', - decreasing=False, - max_history=10, - unwrap_fn=unwrap_model, - log_clearml=False, - log_s3=False, - ): - - # objects to save state_dicts of - self.model = model - self.optimizer = optimizer - self.args = args - self.model_ema = model_ema - self.amp_scaler = amp_scaler - - # state - self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness - self.best_epoch = None - self.best_metric = None - self.curr_recovery_file = '' - self.last_recovery_file = '' - - # config - self.checkpoint_dir = checkpoint_dir - self.recovery_dir = recovery_dir - self.save_prefix = checkpoint_prefix - self.recovery_prefix = recovery_prefix - self.extension = '.pth.tar' - self.decreasing = decreasing # a lower metric is better if True - self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs - self.max_history = max_history - self.unwrap_fn = unwrap_fn - self.log_s3 = log_clearml and log_s3 - assert self.max_history >= 1 - - if self.log_s3: - from clearml import Task - self.task = Task.current_task() - - def save_checkpoint(self, epoch, metric=None): - assert epoch >= 0 - tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) - last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) - self._save(tmp_save_path, epoch, metric) - if os.path.exists(last_save_path): - os.unlink(last_save_path) # required for Windows support. - os.rename(tmp_save_path, last_save_path) - worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None - if (len(self.checkpoint_files) < self.max_history - or metric is None or self.cmp(metric, worst_file[1])): - if len(self.checkpoint_files) >= self.max_history: - self._cleanup_checkpoints(1) - filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension - save_path = os.path.join(self.checkpoint_dir, filename) - os.link(last_save_path, save_path) - self.checkpoint_files.append((save_path, metric)) - self.checkpoint_files = sorted( - self.checkpoint_files, key=lambda x: x[1], - reverse=not self.decreasing) # sort in descending order if a lower metric is not better - - checkpoints_str = "Current checkpoints:\n" - for c in self.checkpoint_files: - checkpoints_str += ' {}\n'.format(c) - _logger.info(checkpoints_str) - - if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): - self.best_epoch = epoch - self.best_metric = metric - best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) - if os.path.exists(best_save_path): - os.unlink(best_save_path) - os.link(last_save_path, best_save_path) - if self.log_s3: - self.task.update_output_model(best_save_path) - if self.log_s3: - self.task.update_output_model(last_save_path) - - return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) - - def _save(self, save_path, epoch, metric=None): - save_state = { - 'epoch': epoch, - 'arch': type(self.model).__name__.lower(), - 'state_dict': get_state_dict(self.model, self.unwrap_fn), - 'optimizer': self.optimizer.state_dict(), - 'version': 2, # version < 2 increments epoch before save - } - if self.args is not None: - save_state['arch'] = self.args.model - save_state['args'] = self.args - if self.amp_scaler is not None: - save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() - if self.model_ema is not None: - save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) - if metric is not None: - save_state['metric'] = metric - torch.save(save_state, save_path) - - def _cleanup_checkpoints(self, trim=0): - trim = min(len(self.checkpoint_files), trim) - delete_index = self.max_history - trim - if delete_index < 0 or len(self.checkpoint_files) <= delete_index: - return - to_delete = self.checkpoint_files[delete_index:] - for d in to_delete: - try: - _logger.debug("Cleaning checkpoint: {}".format(d)) - os.remove(d[0]) - except Exception as e: - _logger.error("Exception '{}' while deleting checkpoint".format(e)) - self.checkpoint_files = self.checkpoint_files[:delete_index] - - def save_recovery(self, epoch, batch_idx=0): - assert epoch >= 0 - filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension - save_path = os.path.join(self.recovery_dir, filename) - self._save(save_path, epoch) - if os.path.exists(self.last_recovery_file): - try: - _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) - os.remove(self.last_recovery_file) - except Exception as e: - _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) - self.last_recovery_file = self.curr_recovery_file - self.curr_recovery_file = save_path - - def find_recovery(self): - recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) - files = glob.glob(recovery_path + '*' + self.extension) - files = sorted(files) - return files[0] if len(files) else '' diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/dataset_factory.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/dataset_factory.py deleted file mode 100644 index dbac8b6b3..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/dataset_factory.py +++ /dev/null @@ -1,158 +0,0 @@ -""" Dataset Factory - -Hacked together by / Copyright 2021, Ross Wightman -""" -import os - -from timm.data import IterableImageDataset, ImageDataset -from torchvision.datasets import CIFAR100, CIFAR10, MNIST, QMNIST, KMNIST, FashionMNIST, ImageNet, ImageFolder - -try: - from torchvision.datasets import Places365 - has_places365 = True -except ImportError: - has_places365 = False -try: - from torchvision.datasets import INaturalist - has_inaturalist = True -except ImportError: - has_inaturalist = False - - -from datasets import Flowers102, StanfordCars - -_TORCH_BASIC_DS = dict( - cifar10=CIFAR10, - cifar100=CIFAR100, - mnist=MNIST, - qmist=QMNIST, - kmnist=KMNIST, - fashion_mnist=FashionMNIST, -) -_TRAIN_SYNONYM = {'train', 'training'} -_EVAL_SYNONYM = {'val', 'valid', 'validation', 'eval', 'evaluation'} - - -def _search_split(root, split): - # look for sub-folder with name of split in root and use that if it exists - split_name = split.split('[')[0] - try_root = os.path.join(root, split_name) - if os.path.exists(try_root): - return try_root - - def _try(syn): - for s in syn: - try_root = os.path.join(root, s) - if os.path.exists(try_root): - return try_root - return root - if split_name in _TRAIN_SYNONYM: - root = _try(_TRAIN_SYNONYM) - elif split_name in _EVAL_SYNONYM: - root = _try(_EVAL_SYNONYM) - return root - - -def create_dataset( - name, - root, - split='validation', - search_split=True, - class_map=None, - load_bytes=False, - is_training=False, - download=False, - batch_size=None, - repeats=0, - **kwargs -): - """ Dataset factory method - - In parenthesis after each arg are the type of dataset supported for each arg, one of: - * folder - default, timm folder (or tar) based ImageDataset - * torch - torchvision based datasets - * TFDS - Tensorflow-datasets wrapper in IterabeDataset interface via IterableImageDataset - * all - any of the above - - Args: - name: dataset name, empty is okay for folder based datasets - root: root folder of dataset (all) - split: dataset split (all) - search_split: search for split specific child fold from root so one can specify - `imagenet/` instead of `/imagenet/val`, etc on cmd line / config. (folder, torch/folder) - class_map: specify class -> index mapping via text file or dict (folder) - load_bytes: load data, return images as undecoded bytes (folder) - download: download dataset if not present and supported (TFDS, torch) - is_training: create dataset in train mode, this is different from the split. - For Iterable / TDFS it enables shuffle, ignored for other datasets. (TFDS) - batch_size: batch size hint for (TFDS) - repeats: dataset repeats per iteration i.e. epoch (TFDS) - **kwargs: other args to pass to dataset - - Returns: - Dataset object - """ - name = name.lower() - if name.startswith('torch/'): - name = name.split('/', 2)[-1] - torch_kwargs = dict(root=root, download=download, **kwargs) - if name in _TORCH_BASIC_DS: - ds_class = _TORCH_BASIC_DS[name] - use_train = split in _TRAIN_SYNONYM - ds = ds_class(train=use_train, **torch_kwargs) - elif name == 'flowers': - if split in _TRAIN_SYNONYM: - split = 'train' - elif split in _EVAL_SYNONYM: - split = 'test' - ds = Flowers102(split=split, **torch_kwargs) - elif name == 'cars': - if split in _TRAIN_SYNONYM: - split = 'train' - elif split in _EVAL_SYNONYM: - split = 'test' - ds = StanfordCars(split=split, **torch_kwargs) - elif name == 'inaturalist' or name == 'inat': - assert has_inaturalist, 'Please update to PyTorch 1.10, torchvision 0.11+ for Inaturalist' - target_type = 'full' - split_split = split.split('/') - if len(split_split) > 1: - target_type = split_split[0].split('_') - if len(target_type) == 1: - target_type = target_type[0] - split = split_split[-1] - if split in _TRAIN_SYNONYM: - split = '2021_train' - elif split in _EVAL_SYNONYM: - split = '2021_valid' - ds = INaturalist(version=split, target_type=target_type, **torch_kwargs) - elif name == 'places365': - assert has_places365, 'Please update to a newer PyTorch and torchvision for Places365 dataset.' - if split in _TRAIN_SYNONYM: - split = 'train-standard' - elif split in _EVAL_SYNONYM: - split = 'val' - ds = Places365(split=split, **torch_kwargs) - elif name == 'imagenet': - if split in _EVAL_SYNONYM: - split = 'val' - ds = ImageNet(split=split, **torch_kwargs) - elif name == 'image_folder' or name == 'folder': - # in case torchvision ImageFolder is preferred over timm ImageDataset for some reason - if search_split and os.path.isdir(root): - # look for split specific sub-folder in root - root = _search_split(root, split) - ds = ImageFolder(root, **kwargs) - else: - assert False, f"Unknown torchvision dataset {name}" - elif name.startswith('tfds/'): - ds = IterableImageDataset( - root, parser=name, split=split, is_training=is_training, - download=download, batch_size=batch_size, repeats=repeats, **kwargs) - else: - # FIXME support more advance split cfg for ImageFolder/Tar datasets in the future - if search_split and os.path.isdir(root): - # look for split specific sub-folder in root - root = _search_split(root, split) - ds = ImageDataset(root, parser=name, class_map=class_map, load_bytes=load_bytes, **kwargs) - return ds diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/summary.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/summary.py deleted file mode 100644 index 9ed3cd36e..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/utils/timm/summary.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2022. Yuki Tatsunami -# Licensed under the Apache License, Version 2.0 (the "License"); - -import csv -from collections import OrderedDict - - -def update_summary(epoch, train_metrics, eval_metrics, filename, write_header=False, log_wandb=False, log_clearml=False): - rowd = OrderedDict(epoch=epoch) - rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) - rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) - if log_wandb: - import wandb - wandb.log(rowd) - if log_clearml: - from clearml import Logger - for k, v in train_metrics.items(): - Logger.current_logger().report_scalar( - "train", k, iteration=epoch, value=v) - for k, v in eval_metrics.items(): - Logger.current_logger().report_scalar( - "eval", k, iteration=epoch, value=v) - - with open(filename, mode='a') as cf: - dw = csv.DictWriter(cf, fieldnames=rowd.keys()) - if write_header: # first iteration (epoch == 1 can't be used) - dw.writeheader() - dw.writerow(rowd) diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate.py deleted file mode 100755 index a16f6c296..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate.py +++ /dev/null @@ -1,384 +0,0 @@ -#!/usr/bin/env python3 -""" ImageNet Validation Script -This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained -models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes -canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit. -Hacked together by Ross Wightman (https://github.com/rwightman) -""" -import argparse -import os -import csv -import glob -import time -import logging -import torch -import torch.nn as nn -import torch.nn.parallel -from collections import OrderedDict -from contextlib import suppress - -from advertorch.attacks import GradientSignAttack, LinfPGDAttack -from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models -from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet -from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_legacy, random_seed - -import models -from utils.helpers import NormalizeByChannelMeanStd, WithNone, train_rnn - -has_apex = False -try: - from apex import amp - - has_apex = True -except ImportError: - pass - -has_native_amp = False -try: - if getattr(torch.cuda.amp, 'autocast') is not None: - has_native_amp = True -except AttributeError: - pass - -torch.backends.cudnn.benchmark = True -_logger = logging.getLogger('validate') - -parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') -parser.add_argument('data', metavar='DIR', - help='path to dataset') -parser.add_argument('--dataset', '-d', metavar='NAME', default='', - help='dataset type (default: ImageFolder/ImageTar if empty)') -parser.add_argument('--split', metavar='NAME', default='validation', - help='dataset split (default: validation)') -parser.add_argument('--dataset-download', action='store_true', default=False, - help='Allow download of dataset for torch/ and tfds/ datasets that support it.') -parser.add_argument('--model', '-m', metavar='NAME', default='dpn92', - help='model architecture (default: dpn92)') -parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', - help='number of data loading workers (default: 2)') -parser.add_argument('-b', '--batch-size', default=256, type=int, - metavar='N', help='mini-batch size (default: 256)') -parser.add_argument('--img-size', default=None, type=int, - metavar='N', help='Input image dimension, uses model default if empty') -parser.add_argument('--input-size', default=None, nargs=3, type=int, - metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') -parser.add_argument('--crop-pct', default=None, type=float, - metavar='N', help='Input image center crop pct') -parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', - help='Override mean pixel value of dataset') -parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', - help='Override std deviation of of dataset') -parser.add_argument('--interpolation', default='', type=str, metavar='NAME', - help='Image resize interpolation type (overrides model)') -parser.add_argument('--num-classes', type=int, default=None, - help='Number classes in dataset') -parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', - help='path to class to idx mapping file (default: "")') -parser.add_argument('--gp', default=None, type=str, metavar='POOL', - help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') -parser.add_argument('--log-freq', default=10, type=int, - metavar='N', help='batch logging frequency (default: 10)') -parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', - help='path to latest checkpoint (default: none)') -parser.add_argument('--pretrained', dest='pretrained', action='store_true', - help='use pre-trained model') -parser.add_argument('--num-gpu', type=int, default=1, - help='Number of GPUS to use') -parser.add_argument('--test-pool', dest='test_pool', action='store_true', - help='enable test time pool') -parser.add_argument('--no-prefetcher', action='store_true', default=False, - help='disable fast prefetcher') -parser.add_argument('--pin-mem', action='store_true', default=False, - help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') -parser.add_argument('--channels-last', action='store_true', default=False, - help='Use channels_last memory layout') -parser.add_argument('--amp', action='store_true', default=False, - help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.') -parser.add_argument('--apex-amp', action='store_true', default=False, - help='Use NVIDIA Apex AMP mixed precision') -parser.add_argument('--native-amp', action='store_true', default=False, - help='Use Native Torch AMP mixed precision') -parser.add_argument('--tf-preprocessing', action='store_true', default=False, - help='Use Tensorflow preprocessing pipeline (require CPU TF installed') -parser.add_argument('--use-ema', dest='use_ema', action='store_true', - help='use ema version of weights if present') -parser.add_argument('--torchscript', dest='torchscript', action='store_true', - help='convert model torchscript for inference') -parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true', - help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance') -parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', - help='Output csv file for validation results (summary)') -parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME', - help='Real labels JSON file for imagenet evaluation') -parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME', - help='Valid label indices txt file for validation of partial label space') -parser.add_argument('--attack-type', default=None, type=str, choices=[None, 'fgsm', 'pgd']) -parser.add_argument('--adv-eps', default=1., type=float) -parser.add_argument('--adv-steps', default=5, type=int) -parser.add_argument('--adv-step-size', default=.5, type=float) -parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 42)') - -def validate(args): - # might as well try to validate something - args.pretrained = args.pretrained or not args.checkpoint - args.prefetcher = not args.no_prefetcher - amp_autocast = suppress # do nothing - if args.amp: - if has_native_amp: - args.native_amp = True - elif has_apex: - args.apex_amp = True - else: - _logger.warning("Neither APEX or Native Torch AMP is available.") - assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set." - if args.native_amp: - amp_autocast = torch.cuda.amp.autocast - _logger.info('Validating in mixed precision with native PyTorch AMP.') - elif args.apex_amp: - _logger.info('Validating in mixed precision with NVIDIA APEX AMP.') - else: - _logger.info('Validating in float32. AMP not enabled.') - - if args.legacy_jit: - set_jit_legacy() - - # create model - model = create_model( - args.model, - pretrained=args.pretrained, - num_classes=args.num_classes, - in_chans=3, - global_pool=args.gp, - scriptable=args.torchscript) - if args.num_classes is None: - assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' - args.num_classes = model.num_classes - - if args.checkpoint: - load_checkpoint(model, args.checkpoint, args.use_ema) - - param_count = sum([m.numel() for m in model.parameters()]) - _logger.info('Model %s created, param count: %d' % (args.model, param_count)) - - data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True) - test_time_pool = False - if args.test_pool: - model, test_time_pool = apply_test_time_pool(model, data_config, use_test_size=True) - - if args.attack_type: - normalize = NormalizeByChannelMeanStd( - mean=data_config['mean'], - std=data_config['std'] - ) - model = nn.Sequential(normalize, model) - - if args.torchscript: - torch.jit.optimized_execution(True) - model = torch.jit.script(model) - - model = model.cuda() - if args.apex_amp: - model = amp.initialize(model, opt_level='O1') - - if args.channels_last: - model = model.to(memory_format=torch.channels_last) - - if args.num_gpu > 1: - model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) - - criterion = nn.CrossEntropyLoss().cuda() - - dataset = create_dataset( - root=args.data, name=args.dataset, split=args.split, - download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map) - - if args.valid_labels: - with open(args.valid_labels, 'r') as f: - valid_labels = {int(line.rstrip()) for line in f} - valid_labels = [i in valid_labels for i in range(args.num_classes)] - else: - valid_labels = None - - if args.real_labels: - real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels) - else: - real_labels = None - - crop_pct = 1.0 if test_time_pool else data_config['crop_pct'] - loader = create_loader( - dataset, - input_size=data_config['input_size'], - batch_size=args.batch_size, - use_prefetcher=args.prefetcher, - interpolation=data_config['interpolation'], - mean=data_config['mean'] if args.attack_type is None else (0, 0, 0), - std=data_config['std'] if args.attack_type is None else (1, 1, 1), - num_workers=args.workers, - crop_pct=crop_pct, - pin_memory=args.pin_mem, - tf_preprocessing=args.tf_preprocessing) - - batch_time = AverageMeter() - losses = AverageMeter() - top1 = AverageMeter() - top5 = AverageMeter() - - model.eval() - with WithNone() if args.attack_type else torch.no_grad(): - # warmup, reduce variability of first batch time, especially for comparing torchscript vs non - input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).cuda() - if args.channels_last: - input = input.contiguous(memory_format=torch.channels_last) - model(input) - end = time.time() - - if args.attack_type == "fgsm": - loss_fn = nn.CrossEntropyLoss(reduction="sum") - adversary = GradientSignAttack( - model, loss_fn=loss_fn, eps=args.adv_eps / 255., - clip_min=0., clip_max=1., targeted=False) - elif args.attack_type == "pgd": - loss_fn = nn.CrossEntropyLoss(reduction="sum") - adversary = LinfPGDAttack( - model, loss_fn=loss_fn, eps=args.adv_eps / 255., - nb_iter=args.adv_steps, eps_iter=args.adv_step_size / 255., rand_init=True, - clip_min=0., clip_max=1., targeted=False) - random_seed(args.seed, 0) - - for batch_idx, (input, target) in enumerate(loader): - if args.no_prefetcher: - target = target.cuda() - input = input.cuda() - if args.attack_type: - train_rnn(model) - input = adversary.perturb(input, target) - model.eval() - input = input.detach() - if args.channels_last: - input = input.contiguous(memory_format=torch.channels_last) - # compute output - with amp_autocast(): - output = model(input) - - if valid_labels is not None: - output = output[:, valid_labels] - loss = criterion(output, target) - - if real_labels is not None: - real_labels.add_result(output) - - # measure accuracy and record loss - acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5)) - losses.update(loss.item(), input.size(0)) - top1.update(acc1.item(), input.size(0)) - top5.update(acc5.item(), input.size(0)) - - # measure elapsed time - batch_time.update(time.time() - end) - end = time.time() - - if batch_idx % args.log_freq == 0: - _logger.info( - 'Test: [{0:>4d}/{1}] ' - 'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' - 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' - 'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) ' - 'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format( - batch_idx, len(loader), batch_time=batch_time, - rate_avg=input.size(0) / batch_time.avg, - loss=losses, top1=top1, top5=top5)) - - if real_labels is not None: - # real labels mode replaces topk values at the end - top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5) - else: - top1a, top5a = top1.avg, top5.avg - results = OrderedDict( - top1=round(top1a, 4), top1_err=round(100 - top1a, 4), - top5=round(top5a, 4), top5_err=round(100 - top5a, 4), - param_count=round(param_count / 1e6, 2), - img_size=data_config['input_size'][-1], - cropt_pct=crop_pct, - interpolation=data_config['interpolation']) - - _logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format( - results['top1'], results['top1_err'], results['top5'], results['top5_err'])) - - return results - - -def main(): - setup_default_logging() - args = parser.parse_args() - model_cfgs = [] - model_names = [] - if os.path.isdir(args.checkpoint): - # validate all checkpoints in a path with same model - checkpoints = glob.glob(args.checkpoint + '/*.pth.tar') - checkpoints += glob.glob(args.checkpoint + '/*.pth') - model_names = list_models(args.model) - model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] - else: - if args.model == 'all': - # validate all models in a list of names with pretrained checkpoints - args.pretrained = True - model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k', '*_dino']) - model_cfgs = [(n, '') for n in model_names] - elif not is_model(args.model): - # model name doesn't exist, try as wildcard filter - model_names = list_models(args.model) - model_cfgs = [(n, '') for n in model_names] - - if not model_cfgs and os.path.isfile(args.model): - with open(args.model) as f: - model_names = [line.rstrip() for line in f] - model_cfgs = [(n, None) for n in model_names if n] - - if len(model_cfgs): - results_file = args.results_file or './results-all.csv' - _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) - results = [] - try: - start_batch_size = args.batch_size - for m, c in model_cfgs: - batch_size = start_batch_size - args.model = m - args.checkpoint = c - result = OrderedDict(model=args.model) - r = {} - while not r and batch_size >= args.num_gpu: - torch.cuda.empty_cache() - try: - args.batch_size = batch_size - print('Validating with batch size: %d' % args.batch_size) - r = validate(args) - except RuntimeError as e: - if batch_size <= args.num_gpu: - print("Validation failed with no ability to reduce batch size. Exiting.") - raise e - batch_size = max(batch_size // 2, args.num_gpu) - print("Validation failed, reducing batch size by 50%") - result.update(r) - if args.checkpoint: - result['checkpoint'] = args.checkpoint - results.append(result) - except KeyboardInterrupt as e: - pass - results = sorted(results, key=lambda x: x['top1'], reverse=True) - if len(results): - write_results(results_file, results) - else: - validate(args) - - -def write_results(results_file, results): - with open(results_file, mode='w') as cf: - dw = csv.DictWriter(cf, fieldnames=results[0].keys()) - dw.writeheader() - for r in results: - dw.writerow(r) - cf.flush() - - -if __name__ == '__main__': - main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate_c.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate_c.py deleted file mode 100755 index 78a98bfe9..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/validate_c.py +++ /dev/null @@ -1,343 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) 2022. Yuki Tatsunami -# Licensed under the Apache License, Version 2.0 (the "License"); - -import argparse -import os -import csv -import glob -import logging - -import numpy as np -import torch -import torch.nn.parallel -from collections import OrderedDict -from contextlib import suppress - -import torchvision -from timm.data.loader import PrefetchLoader, create_loader -from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models -from timm.data import create_dataset, resolve_data_config, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from timm.utils import natural_key, setup_default_logging, set_jit_legacy - -import models - -has_apex = False -try: - from apex import amp - - has_apex = True -except ImportError: - pass - -has_native_amp = False -try: - if getattr(torch.cuda.amp, 'autocast') is not None: - has_native_amp = True -except AttributeError: - pass - -torch.backends.cudnn.benchmark = True -_logger = logging.getLogger('validate') - -distortions = dict( - gaussian_noise=0.886428, - shot_noise=0.894468, - impulse_noise=0.922640, - defocus_blur=0.819880, - glass_blur=0.826268, - motion_blur=0.785948, - zoom_blur=0.798360, - snow=0.866816, - frost=0.826572, - fog=0.819324, - brightness=0.564592, - contrast=0.853204, - elastic_transform=0.646056, - pixelate=0.717840, - jpeg_compression=0.606500, - # speckle_noise=0.845388, - # gaussian_blur=0.787108, - # spatter=0.717512, - # saturate=0.658248, -) - -parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') -parser.add_argument('data', metavar='DIR', - help='path to dataset') -parser.add_argument('--dataset', '-d', metavar='NAME', default='', - help='dataset type (default: ImageFolder/ImageTar if empty)') -parser.add_argument('--split', metavar='NAME', default='validation', - help='dataset split (default: validation)') -parser.add_argument('--dataset-download', action='store_true', default=False, - help='Allow download of dataset for torch/ and tfds/ datasets that support it.') -parser.add_argument('--model', '-m', metavar='NAME', default='dpn92', - help='model architecture (default: dpn92)') -parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', - help='number of data loading workers (default: 2)') -parser.add_argument('-b', '--batch-size', default=256, type=int, - metavar='N', help='mini-batch size (default: 256)') -parser.add_argument('--img-size', default=None, type=int, - metavar='N', help='Input image dimension, uses model default if empty') -parser.add_argument('--input-size', default=None, nargs=3, type=int, - metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') -parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', - help='Override mean pixel value of dataset') -parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', - help='Override std deviation of of dataset') -parser.add_argument('--interpolation', default='', type=str, metavar='NAME', - help='Image resize interpolation type (overrides model)') -parser.add_argument('--num-classes', type=int, default=None, - help='Number classes in dataset') -parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', - help='path to class to idx mapping file (default: "")') -parser.add_argument('--gp', default=None, type=str, metavar='POOL', - help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') -parser.add_argument('--log-freq', default=10, type=int, - metavar='N', help='batch logging frequency (default: 10)') -parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', - help='path to latest checkpoint (default: none)') -parser.add_argument('--pretrained', dest='pretrained', action='store_true', - help='use pre-trained model') -parser.add_argument('--num-gpu', type=int, default=1, - help='Number of GPUS to use') -parser.add_argument('--test-pool', dest='test_pool', action='store_true', - help='enable test time pool') -parser.add_argument('--no-prefetcher', action='store_true', default=False, - help='disable fast prefetcher') -parser.add_argument('--pin-mem', action='store_true', default=False, - help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') -parser.add_argument('--channels-last', action='store_true', default=False, - help='Use channels_last memory layout') -parser.add_argument('--amp', action='store_true', default=False, - help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.') -parser.add_argument('--apex-amp', action='store_true', default=False, - help='Use NVIDIA Apex AMP mixed precision') -parser.add_argument('--native-amp', action='store_true', default=False, - help='Use Native Torch AMP mixed precision') -parser.add_argument('--tf-preprocessing', action='store_true', default=False, - help='Use Tensorflow preprocessing pipeline (require CPU TF installed') -parser.add_argument('--use-ema', dest='use_ema', action='store_true', - help='use ema version of weights if present') -parser.add_argument('--torchscript', dest='torchscript', action='store_true', - help='convert model torchscript for inference') -parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true', - help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance') -parser.add_argument('--results-file', default='', type=str, metavar='FILENAME', - help='Output csv file for validation results (summary)') -parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME', - help='Valid label indices txt file for validation of partial label space') - - -def validate(args): - # might as well try to validate something - args.pretrained = args.pretrained or not args.checkpoint - args.prefetcher = not args.no_prefetcher - amp_autocast = suppress # do nothing - if args.amp: - if has_native_amp: - args.native_amp = True - elif has_apex: - args.apex_amp = True - else: - _logger.warning("Neither APEX or Native Torch AMP is available.") - assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set." - if args.native_amp: - amp_autocast = torch.cuda.amp.autocast - _logger.info('Validating in mixed precision with native PyTorch AMP.') - elif args.apex_amp: - _logger.info('Validating in mixed precision with NVIDIA APEX AMP.') - else: - _logger.info('Validating in float32. AMP not enabled.') - - if args.legacy_jit: - set_jit_legacy() - - # create model - model = create_model( - args.model, - pretrained=args.pretrained, - num_classes=args.num_classes, - in_chans=3, - global_pool=args.gp, - scriptable=args.torchscript) - if args.num_classes is None: - assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.' - args.num_classes = model.num_classes - - if args.checkpoint: - load_checkpoint(model, args.checkpoint, args.use_ema) - - param_count = sum([m.numel() for m in model.parameters()]) - _logger.info('Model %s created, param count: %d' % (args.model, param_count)) - - data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True) - test_time_pool = False - if args.test_pool: - model, test_time_pool = apply_test_time_pool(model, data_config, use_test_size=True) - - if args.torchscript: - torch.jit.optimized_execution(True) - model = torch.jit.script(model) - - model = model.cuda() - if args.apex_amp: - model = amp.initialize(model, opt_level='O1') - - if args.channels_last: - model = model.to(memory_format=torch.channels_last) - - if args.num_gpu > 1: - model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) - - results = OrderedDict() - un_ces = [] - ces = [] - for distortion_name, distortion_alex_value in distortions.items(): - errs = [] - for severity in range(1, 6): - correct = 0 - dataset = create_dataset( - root=os.path.join(args.data, distortion_name, str(severity)), name=args.dataset, split=args.split, - download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map) - - if args.valid_labels: - with open(args.valid_labels, 'r') as f: - valid_labels = {int(line.rstrip()) for line in f} - valid_labels = [i in valid_labels for i in range(args.num_classes)] - else: - valid_labels = None - - loader = create_loader( - dataset, - input_size=data_config['input_size'], - batch_size=args.batch_size, - use_prefetcher=args.prefetcher, - interpolation=data_config['interpolation'], - mean=data_config['mean'], - std=data_config['std'], - num_workers=args.workers, - crop_pct=1., - pin_memory=args.pin_mem, - tf_preprocessing=args.tf_preprocessing) - - model.eval() - with torch.no_grad(): - # warmup, reduce variability of first batch time, especially for comparing torchscript vs non - input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).cuda() - if args.channels_last: - input = input.contiguous(memory_format=torch.channels_last) - model(input) - for batch_idx, (input, target) in enumerate(loader): - if args.no_prefetcher: - target = target.cuda() - input = input.cuda() - if args.channels_last: - input = input.contiguous(memory_format=torch.channels_last) - - # compute output - with amp_autocast(): - output = model(input) - - if valid_labels is not None: - output = output[:, valid_labels] - - pred = output.data.max(1)[1] - correct += pred.eq(target).sum().cpu().detach().numpy() - - errs.append(1 - 1. * correct / len(dataset)) - - un_ce = np.mean(errs) - ce = un_ce / distortion_alex_value - results[distortion_name] = round(ce.item(), 4) - ces.append(ce) - un_ces.append(un_ce) - _logger.info('Distortion: {:20s} | CE un-normalized (%): {:.3f} | CE (%): {:.3f}'.format(distortion_name, 100 * un_ce, 100 * ce)) - - mce = 100 * np.mean(ces) - un_mce = 100 * np.mean(un_ces) - results["mCE_un_normalized"] = un_mce - results["mCE"] = mce - results["param_count"] = round(param_count / 1e6, 2) - results["img_size"] = data_config['input_size'][-1] - _logger.info('mCE un-normalized (%): {:.3f} | mCE (%): {:.3f}'.format(un_mce, mce)) - - return results - - -def main(): - setup_default_logging() - args = parser.parse_args() - model_cfgs = [] - model_names = [] - if os.path.isdir(args.checkpoint): - # validate all checkpoints in a path with same model - checkpoints = glob.glob(args.checkpoint + '/*.pth.tar') - checkpoints += glob.glob(args.checkpoint + '/*.pth') - model_names = list_models(args.model) - model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] - else: - if args.model == 'all': - # validate all models in a list of names with pretrained checkpoints - args.pretrained = True - model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k', '*_dino']) - model_cfgs = [(n, '') for n in model_names] - elif not is_model(args.model): - # model name doesn't exist, try as wildcard filter - model_names = list_models(args.model) - model_cfgs = [(n, '') for n in model_names] - - if not model_cfgs and os.path.isfile(args.model): - with open(args.model) as f: - model_names = [line.rstrip() for line in f] - model_cfgs = [(n, None) for n in model_names if n] - - if len(model_cfgs): - results_file = args.results_file or './results-all.csv' - _logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names))) - results = [] - try: - start_batch_size = args.batch_size - for m, c in model_cfgs: - batch_size = start_batch_size - args.model = m - args.checkpoint = c - result = OrderedDict(model=args.model) - r = {} - while not r and batch_size >= args.num_gpu: - torch.cuda.empty_cache() - try: - args.batch_size = batch_size - print('Validating with batch size: %d' % args.batch_size) - r = validate(args) - except RuntimeError as e: - if batch_size <= args.num_gpu: - print("Validation failed with no ability to reduce batch size. Exiting.") - raise e - batch_size = max(batch_size // 2, args.num_gpu) - print("Validation failed, reducing batch size by 50%") - result.update(r) - if args.checkpoint: - result['checkpoint'] = args.checkpoint - results.append(result) - except KeyboardInterrupt as e: - pass - results = sorted(results, key=lambda x: x['top1'], reverse=True) - if len(results): - write_results(results_file, results) - else: - validate(args) - - -def write_results(results_file, results): - with open(results_file, mode='w') as cf: - dw = csv.DictWriter(cf, fieldnames=results[0].keys()) - dw.writeheader() - for r in results: - dw.writerow(r) - cf.flush() - - -if __name__ == '__main__': - main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer/visualize_erf.py b/PyTorch/build-in/Classification/Sequencer2D/sequencer/visualize_erf.py deleted file mode 100755 index 7f276020e..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer/visualize_erf.py +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) 2022. Yuki Tatsunami -# Licensed under the Apache License, Version 2.0 (the "License"); - -import argparse -import os -import glob -import logging - -import numpy as np -from matplotlib import pyplot as plt -from timm.utils import setup_default_logging - -from erf.scaler import MinMaxScaler - -parser = argparse.ArgumentParser(description='PyTorch ImageNet ERF Visualizer') - -parser.add_argument('--result-npy-dir', default='./erf_results/224/npy', type=str, - help='path to save npys of ERF') -parser.add_argument('--result-png-dir', default='./erf_results/224/img', type=str, - help='path to save plotted images (png) of ERF') -parser.add_argument('--result-pdf-dir', default='./erf_results/224/pdf', type=str, - help='path to save plotted images (pdf) of ERF') - - -def main(): - setup_default_logging() - args = parser.parse_args() - - os.makedirs(args.result_png_dir, exist_ok=True) - os.makedirs(args.result_pdf_dir, exist_ok=True) - - npy_paths = glob.glob(os.path.join(args.result_npy_dir, "*.npy")) - npys = [np.load(p) for p in npy_paths] - scores = np.stack(npys, axis=0) - scaler = MinMaxScaler() - scores = scaler(scores) - for p, s in zip(npy_paths, scores): - file_base = os.path.basename(p).rsplit('.', 1)[0] - png_path = os.path.join(args.result_png_dir, f'{file_base}.png') - pdf_path = os.path.join(args.result_pdf_dir, f'{file_base}.pdf') - - plt.imsave(png_path, s, cmap='pink', format="png") - plt.imsave(pdf_path, s, cmap='pink', format="pdf") - -if __name__ == '__main__': - main() diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.jpg b/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.jpg deleted file mode 100644 index 9edff0b35a5c06a6a65171f5036d4fdea5d73ccd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36221 zcmeFa1z22J)-GIl2o~HOg1b8j!2<+$NP<&Ha0?WHkOYDgAi>=!tO}Qq;O-FI-L&wePVefHV1_FC_H*V?D+5jIX< zK0!f2S{5-WQGQ8o0YUzs8bQLu#KgwJCd0uYrxHD6#bxDfHbiOIaj__(5#SZnx@N#Lbx5axXnQZjN1=7%h-Z0v$U!Xlz# z;&M;q6%>_}RkU?<_4Ex4jVvs!tZi)V?A_cwJiWYqe1qSHynFv4G%PmmQ+z_==PyZF z**Up+`2~eVl~oXEbxmzu{kQLJ?H!$6-900tW8)K(Q$MDcmRDBS);Bh{wvUcaPS4IS zV3$`vjSC5Y@*l(c?~(mtT=joC|n@0BE1N&d&ng(E_AR!hH1s@;- zxVU1?3cUaC^6xqLw+#IMMg~OFKQ!}a*vk-0hu1dIx4vb4TC_hYW zYjV8h>LJ@r6-h^=g<#lBFkss=WbvBH%}7{U z;&33hurq(!zL2@CM2m5)Hd2rJv64ckq`MaIUFylRf@rf_8@LIr*M=nC{__d0f0$Tx_PHq&Uv zd7K+nmy=b$)U;ao5b13}aKeTdW1{&3tXWddN)D*(=Q$7e;|q6;+^;z7&Z94j(b`K& z*}jsaCCG|z)TktW109RP7U16=fVKp64#LcjpGwNz0n8k4+5wM9KUxnt!+V$aEK~^9 zoVd9b%PUe%6Fo=A(_Ol0SvLwdITY>yoy`=Z)C4EbygxpPO+U3WPzdM8iq2udfK?bW zu&% zb`32S8AN2!igu_+}6OB zV?seU{>vgr>0Qq-ytwuy&F!p(OtVzUt>A;#(ywR5@kILeYD8xiJpJ@M^4ma|X8l0o z=JH6V6h%TGezbXYS8lbfM8(dkDJU_q;ABqAD z`7w_COQ>$$WNG3{VBa4N@w}`WgQC4#e{zBfr0miah}eTr7w=CI`zcRw3e8D5pZ3Pr zLT%g)iIq@`i5)C)7f%+TE{ukwH*80YZB@V#p-TH0qo(kFLf$W0{G}ApxCX-N^Z6bo zFvS`;@fx@b8}q!lse-pP+&@brk!Fq+#YTOS+gZ{PY+I>#ue!Vjn(*+MR~*lm2A2W8 zW9;X+xAy*S?cyTuv9XHGF8MXm*_k5NVvfe%${M58b#O#Tvklz{9O<`p8jUShl9G5z z>nFLi*d~fzSq8XI9!Y<+IYN@ubn|^LHzpY4Xj6#xkU3T?y)Wt&Et@2b@7{R^ZjT1Z zCO#~H%L6K^p3H_twUaR4w#fR7-&l;&B@K_LOVTRC#E#UrpyhbC^d4ciB>!Uz7BVzb zIk%Wdv3{Sk5^-&q(H%f%+}Y_=m7|3~xnPS_6%(DY28&)R2hBn0)mLQ7v9KE4D!dPk zTwr5Zl%Cxw(D*Z2UXz{uFgYdtcQmATnqScZr~T_Pa8Klqq~n%JQr%}lnY@v|kQn>W zKc^HG^*{{{?m8_BU_GLqi1*0b64%Y~^6kjmuig@f4-0xj-$3IWD`cXG>ru%k<1f4M|z6(HAKsG3NK!z#kOy&)0*FYV$o2VGH^Bw9i z%`ilckF#AZ;0TTk973;7mkBH7{sLXf?tji;W4HSx2J*x$DBSo}hYu>Pef>Psi1w4J z38MoF|E0H&L=x@*GD)&g;g*)=+oFS_B~y~7nXu0$c1c_ZGkLM|PaF9vBBgmnqsg0$%y6%L_(h$ZIC6XsvZCx~A{APN4A;gwKifUEe7Pb0Z~8kGW@VrKhQs`yfn z96vJLf-0~mgM_Pf5&O`e#$BZ=u?)|eH@~D(zUW0q z312;HXPIi`v038q%RAD&QJQW{oTzUw4*Bs)=iTM&res{>0>J9x5JuMK^nPmDbhk@h z-*G49rE@cBXnZ;D{aVylicV*#McXwt9TeOl7CedTOf@wnM1-pkiFgD5)F`u`ZWKq}JU_3u`cdUv6H23Osf!l*An@bKa~iiGp!IdTIOzS} z>E?~R7h3M#*puA|)8W*|2lqTzkG}$5sVkrZ!;T!%alt~yJIOOU5gcv!)&f%5YR;~G zjmVj=m^XP}`DwdPj2(ESzaxYNJ%o>4bDi`soA5fb@ha_3wMwrJVdyR3-#kq6ZW6Em ztXj`&z|2qo)M@^W7ach{CxDy=nqTOhE`Tn(HqNigfcSv?|Npy1$+ z%E9etGao3WsDaQOMRtVS)k)!8R?f8=9BY!X*_>3oXi8LzZ*<+GC}CcvNlNZ}iv3BV zjVqLLOb)|$03SJ4ZmCJ34e`>(dFIjAe#M*BRVG`BQO{pslG8dKlL@XNt-Uc%lxeRa zZoXRovVuqBt!Iv>ZfLR_4lQF4-oRmUU zRe?WV+yR~>zWh*w>Nt=r@GUlHqku8k2u^-9|FKvu*Fd@;c6*Yft4}gdH&!;VR&V-2 zxmZu(3PvPH+z8Z|OqvGv{rkvlj&CDYQ8#n;5#8gF2UAiy?=Ym#G7IFXXmDY!6$}|_ za_U%b!mSzbHq`(g;x&&7HO@&1B5^+8luPs^@>C(-a1B>m=W+0G69igG)B|IQ}{95qYnn3 zr!2#6cXlCA!>&(Di&bQJr?2hxfG&9Kfr~ci8_MFr6|uX<4dCM)AT$Fv*capj5*Nw8 zNIU_9;16gqKtsh@phZ;k8#UU)K$rwP#FX$30ARQSEZd@F%%{(r4@7L;uHEdU-vQoY zv@V>oZ>GbLNbdlIiZ^u9xOad|{U>*TulHJDNr26to6Y|D4jXrXC8R^I+xzt_aDY4l z3nMnxJ-pzSP2FLcLu4NK!$5JIpW5C0`4)9|0GRA+>N~(=aOfQ%5Fd0T`0t+od(QtY zE&tE%P^7E%RPchy`w>Z1nlG3iEjrHGWcuSy3ORXS$!;*dX*WKEN7*g)qmL>>;?p=h z82e%>2ig*>S;i_@j*4V{si}Ucn`Eice^QR_&Kd_i8zy&vU#cpZKi5q|7>}A{8e38+ zN3VM?CuM%2siJws6_Pyo=6gIVj3fFH_2(e*2RWn2QwE0iAFzu) z4IKfyoDn?bSJt0k*H+C>Xzo8?*E(nk`%vT-%NGxh^k1mU=1t3{@et_n{?Dk(@lQ}! zI!sOy2LZa~gAq%p`7`JuUv$1AIimAK)aW}h4rKc5bv4%PH$Q70V2fCOO>{SoRNjFZ-LBe)U zAd^$BDU~I!Xm3TS?8e8B@$*_q-4i2D_FwA9(}|pyl2aH3kIZ?>){qp^g?cYN z!a#X5I;7hx!C@*sBB4rY>sXpiru00Vfpx%J^DDybrd<&g8pdg?v={fYOP{_9;F{=e zo4PhJxZOxnG@-5A>onTiNjYk{33P1P<;VwZSqcpg{gyfV+t&yy6R=Jzld-che?CPn z-nuxl7cO=os4up$bqSIc9gt!-SE__$Z2K#`p1P=A8Dh}T(jDN_<{hBp&;Np+yTCnQcYv5E zNaC@~_nU$ApDu|g-jrcqmnpx|y#vU-hihlw0Vt~S?WF0dTf%?~1pbwCZq0ZVOnxxB z*I!qkFT#n$T~wFNqb_lD;y@@jGJW&^ee;CIG82CRjicGgw_8U2H#OBuoO#$hr5ar= zb&_is3yk;09H5r=zBI+f#&*bEvxP_MDqGe3Fwvsqlea$HD4(0gcHsrN|cHOs6E>$U-1k9`94avCxSEH!)UY za-hMS#^YS2=SbaTfb;&Jh9+E=aZ}s|r`wLfI(T5(fw-nLUn$*&2Q>=BtE)iJ(Hf|9 z?QZog<*eP$5nyBI3#~4>*g|z>t7rf!OSYqM7v;nRd0)QIAA$VbQV)-(f1>WS^&Mar z1XDG>)^PH?q)}N3kO}TPzVPfc6VQZEsCC4ZDOwa78+e=Vo%dUxa0q>hEW)l>#J^6U zLkS}FblH-nsrhNB3ght2o8m6hQ-8T{=kqUyAjuMH>asB^CDYc~AHL2??_&&hYS!V+ zgYN*XBARC2>H`LXo2RfUh8%nXi?5!PadgxX5#Qgm5+Rj2w`ac@B1PGXGnhxZ1H^hQ zm0hFFfHreWlR>caxZ4f5bwwk}9f0ZJyZJd*^&MbXRhF9g9~54yjfgwIDvIMB;DxEg z9YB>H99cI=c?al1pMig?_kt(LnNkCYH0A$+@`w&Q4Gnuh7`bt*~<* z_&xxB9OGU+3IHd`$M9{PAFR)rlrg{KEoyX$OYar3gbDjG*^GEJY%&9)OLU59zAh=! zrrMCJII0M*1=L)ZUR@?KljW1M^fhr^ESw&T_i*5S_uV)zt&(#&HkVcDd;2^V7qfFR z_(041qrSN5Czsa#z0phIZ87Gk*M1;=)(Blzw=$TxbG}PYa`+|f4UXqzMTNY}h`kx} z;jV(C!n~A1yE=u^Di*t6Wb%6l3qh7p4~++X6ZOQQH6QTiXXmOr9_?nc$ZSMHRg~LR zi#Mk=Mh3xeh3rJncdRE%C)Jd1^h>9(Eb>b#`#r66Nus%pEGC`&@U&-6OCkua_20L* z*o-;GaO)Bqad=!l;gmM9s7%8IC(0f6gQFPdB$AW2M1k+A-fU)<->O-AcQcRjxF0+) zZb=xbfY!dY!EZr|s;rTTU>%B3GE!aeo%1rvY2lH0yVH|$K)TPxc`Zc=N{^3h4(lH( zn@6myVlrHs@gdv$Yq>JmL_EJrzGro3w+4ezn!Gl!?Dx2gE}YOasE zK3&m#rp8;@(wSD-i?Kdc+TP3xd;X6}!mpPiVuYM`o|^BAoDfUN-T{toNv|Uip*szE zZGU6+NCm|fsJ@mQhe}3}1dY9NSt-|uNYXah#G_93eb0z#CMdQcJU<4jI5l;b zTOes-Oz*bI3HTB%qaS3?o@XsQtE!LAZJH$%CFh`e6Dj4LsS{k?@V+XkIhlkttDO5r z7hAmX$}#Ut!M1?09pE_@ z)XOK`_E;cdYNjEruG8VjKw9u<#crCU(LLGk4j|t8R8cb$)tf#Y5F#eVWQpt_w?Jj3e}hr zJV}LzroR(Cud`!#j4w-WZQ~@cGo>2^Qx}AloMDzZlIpBvyo7-e%28pk{|*q3z|f^?YAhUO%y7?JB_VSv<93Fb&7x`Ys&BbSXS6eMbhOLZc9xD-eF&P zV#F^=KYVA$?D!!gi?_7%^e_T)HJ{59%G2$kXS;hWdTN7GzG8a=tgNekcRZ~M{4k+p zsRfQ03RZP?#>s8;JXEOoP7(6NEbzXgJX8h(IX*kv+EiIr{=VDv8o}(=&(v1QQMaiP zb(K+K^OtuC5yyvs(=pij%JzkJ4@&%{U6z7^6s9vyy z=lSDVz^EwdcCBf~i*TKn@eJWj&I^nnPz>~FzufT(u>yOgH6`Mqy;kxEkK;`b*+LvKF={hgtW4Rih-JUBSHGM>}M zewXJ}*B6|26s6VBJUM^+ACZrG(hBCruEUzl{2e_CY_Im_I@0r$`Ax(~lU^%+UW1bp z-2prr(&12}agQ?6A6p~#u?B{Q)3Um0$Z&^}hCs3< zFXI_!<2`H7Q*DcwhoQ`Jb<}cfXXvSh6#sNk|GzAf#SY;hsIb*oEqGdO!L)lP0p307UJBA~816`w%FAchU^neKYUD>X z=r%2yX}y<^;ViaZ&-0gk=n^$-q+h1Dt$#vjm;S>A$Wh1yKKAKLw>z2UZ+LZ&X{pjJ zkXImj&Z~8vv2H|CF3ZksE9kpkl4^X?+A2ZYV9*wX4V44~CneCAO5bOr`wk#I0tdnc zeU3qckd~x7Km_2G8Q0;%?6PfNNHK1V5A|7PyCZjoXI03}FY76t z@x4KhDyv7JJP)==E~=vyiH5uVDEP}Ye|(U>37b7xy#u7%frJpAPQ(Xm&IzWt*$~8g z!0P@Dms_!NiM^-Dgw0+}f>A;;dqoUp2xG^MhaS6+mbch>p~ib?@(-w^hIaLEep6%h zy8#Jwuh^MxMQg|XX9NZfAY60|qwTi?0OSc-4B7jLPV@Zt9An@EmvHoOXs#xVscF`+ zl0U3QsnpgdsRG?~TYoUpKW{Ier9+|eos?VRqPc!D!c^T{Ye>D}V-d>iqcw-iN6~Q8 zhc2Gtv55x!UsFQ!(^9e^mQog{<_-{LCo>OP2U32OIaa?x4DWe^^DPmyhCtA>j|$M~ z_5kl`ERgWeK7=3*YswP=eoOhmq?B#Paz`F;d-D1j-1}8`{!6w)n|~(h#7mKzFsnmO)UI4y>SHd zP;T`i`cW4{ZNnGNhp^8~V>CbZ%pc>}juP`#TRDpsO8PHwNY9VEEp;5gWZheu)?R8b z%A9oefjDSpge|56AO1P3Fp2*^GZCABtT5KUvpB?dVyiKM3n{{@_`afyB>_5*^fGVL zGx{qkIPU5zc)Y2&JeZ;P8j-7t%b)%Hqm`>g1=Zh5&_vm-`Fn%TwA-zYxz>@Cmh#aw_nxEQTRkhKU`38t56#&oDW9I-O3)WRwCD)~ zdbmfSpx82^M5)vl6=do@E(!;r1Bf8cFvM0smw*}2l)m~MV5AF}4R-6e0~{kEoc?lf z>K!0AdQ$Q4!mQ(M~`_LuoLX|CEg7Y3lD{87Pn;Kt}KB{Xpv+n59C!b?{l}&vM zy5BPdiC`anEPP<+2m(1JgR3YSzThg8Go~SuBA~k~H1<*cjn`jV?ATmKjh)w~l1@6R zktSx86vxX%T4s-m>129qUTlXGa@D#RTmhPS_Vp|BZ{K9I^fpc4x;n0sB8m-cr2(+l05oD#U_q`7uZRu~)6UlY|o z>VJR8miVu2g!`AWYVwyX)2~b_&r=ihJXDq(AYdJ9sMrLMKMbUmca@pB8vG!wfoJHS zHg0OKJo8@(N2Vno?ZDB;`bmVyC1PhfdZHjLePCQ$+*eg4fhu(V8jWzazIy9%g z>a^x4ZZsd%JCOzK(&n2F)KgPx!23rn-h8l&>gT{)w&(^2bbjyX)eY+HOIvz9(Oh32 z|H>*ZEBxBmFKK5dNh?C7MpenS@!autUPkB3U=|LSCdX38cRHyWwJol`i<)_t^iZClN=J?n^tqa-di1p6n zJRgmT+tzK4f6t$(pKk6`7Pob=>)g9n=qghMO4_*~mSiiKMUoCF6!xH&!K%Fj^xC$B zbS?#g@Qk*ApO4JaT?ma~!D?!g=T!B0myUQx^tVbkK)0u3rBFv{${A&p@2p*C7`4)G z^&Ow&G-d5vrsyPpJa^8U52Tz0|Xov!TL#=3`^p zJ%tT4` z<)c+~$(ajM@PsIlo7W0Yweb|GL7hh>I0_@6d)xd#aR;@Bi2_b7RT`Jppp7EDb(yXO zV6q>M8Hao|{%w_en(c~PWXAO}L<##I-2-pBi^6YXE>WDRd)e?7{N=*J-X!+Pagg~N z;?gnFi3(F=rAj&WcZz0jb;dil+F%-O=06As1XCaN{@~jpN?p04S%BB?a`;BC`8u5Q zl?**TKQ6H)7uSxzS!a4mb+XUdQ^BYNgVFeCK{z6Ycbavr+ z8FOXbSmH9?duI;~-0c|q#0a-jA`7QEP1IXN8Hpr)wdC~$4A(O&;U zG?^Cok+$(fX(#wXs&>b;$t_Lf4iJq~a~tA*aLf6wUi?>S^4#KW%v?KCuVV`bd{V%L z)cfUE94UvX2u@3{TK^TKW>*Gc6@6+*b(H@4W$w{O|K<(lA*4%ZZpq%Ls}_ zj?8~Sed;36^%CfV+RJc8RxEt+VD4*ND_tzhb&13nG%?peEpK-IiSTX6Lf>1Pc$R|B z+~=baPez6hu0Uq*x(l`Kn)DXR&bB2?hnFVZzc}PcC|_nrdqi|95bJ+4%DDmuQ_ijI zy^=5HIzG0q+3C&Y57)pLx~LA(^Y{D~gSqf4zvCuHVeHn)uYLnTaAQBW%?H z`^qnAeWrs{f3^Be=A8Wl7A}c&W-`2en*)m4(N9pN0S^lt%CQ~R5-E;$U+Y5p8t#e2 z;)^|}_lXSYon=d!I|1Ay?hTX)rjMspb0ZpSW4;r>79WVa?P&baNKTl3U;Um(=#+PA zcKd3+rO>Ej;gP7@WWv@vs$Bp0BsN~G0Cx{#O)CAE`>U=r&>hh z3l*f&73qSUHr`Vx*rm`&J4(ajbxqCSv2a+tIN#g)>%onM;eZ$NM7puR6$=x80srXr z$UmrNGMOn);`Dkkb5ttM&aSOXw|JGP?vye%`4~$g*uw^Uk5XZT?qxWy&XvAc;T<63 zwe0YLIP+d(LZp9ZU(1(L%9-8>0mFBOIb8X(Z%7$$RnKMQ;J&(e)Wz~y#@kAm{^I)_ zZz5}Pf@F*undRB)o&D4MZQ<75^-jFHBr|1kublqh*oq_mHqbj*uLMX zE_%yflUh@R5dWo!{{zI)f09i|1Kta8S>c6q;?b7VN{Y?DBfW#T-@E#1X=3Kl=#1le zjtVL5aS#2)%<;-e(Oa>#mn=f}W(VGs-smpNfjli78M^zMl9VHA2x%Lqr2Jd(d^dT7 zcGSc@YizmAdr^GCIE%17SVoGbV6TlAW;$N#j_xvG+J6~Xm+={F26fcm@5ohwI%V$X z?1Ibk{d}tIb+$&T(i^86s;Zlsm3o>+1fB7x+qvdS^fjX?#CUZpb`|bjX_vZ7(JF^` zJLXTfmRr__I{`q@iSz=4Idys<^Xo-NV04BiK&=o!_;D6{_iU!G$E;*5x?8!;W7sqn*@ z?1=*cb>elFS5R|X%+0b2nb~Tn>wOW)DSEQMh1&bCYr>0GuruGebqxvy*K0h00kt8i z&(vS-z} z^n53ZdfxA&n_Cttct8cSxmHyjojzYyt#fj+7$XnQjYp}*ma~-yuv;Diy0&GdX!Rye zZ*f_3T2kW%K-^4`ED6e#s7sni){BH+^S6AVkx)zS)62G@eM=TQJv|IGLnSn&Q#y*} z8{^`sZJebD^%-qp5**3C`jPw_zvvLpn-$xI6YN^XwXIkj+Z%DmE9snG`nm`xTav_a zuG%5JMsj4TvU%ct&sTs9tma=ppk>Mb88`jSJGZQcAi_)NQ_t4>Ad0C zB3@(ZjdZY`o}Ko&S5;b)r*X@Wdz^<3dScGe@ny~9c8g~j72TqRwpnju z^<-z9b`e0~UeIWHJW-Zw3^cNw5K;ZSO2xY!@#B4M2^ZopxJH^3RvYT~ z*Xc~8JaF)q;$zC|`OeEDQx)-Uiu#mdfZ|Bewy$nNh4n<@5a+On#kfX+RD{Pj``OB= zl~j=c+k?wQRe5wgmFT$c4-(E0Y;$?urpO$)dbyx<#2=n>~ zVP}6+lH%V^xcl4fp_u;_z7_oDuX3}1O$7G)RapCP!M+Vi=Q{u$rStXut%WN?9Cwdj z7^c6)_J2EaDt!1Qv$ndsss4qdLw2F(3gaffeKsi@YnBZ&S)Bkf&La;Qa^-?chGnu< zasN-2hoFs;baj_~Jg66+92GvNCPR)rZ&5ZtUuG$lR=pYmNgB~h+&%m(5YD~3_St1N z4#B&p3lxe-TsJ6=!g1GA(gyC+0)2cDxwJol-M`O4{@;Owq|2!pS*}Q=tt1a3BsqmK zO6^gGIuA^GW>dwNn8X29W~0~q^V^QXQ_6y+>tr9JF%P2D&mJZhJu3XxPi7g9`{Sj{ zJGo7Hw~>OCUiIvY(u+HQ@zi_?QC*R?*&h60r!H{?M;w^SC4@x}PtTW`e$Ln|eSCXw zZvhz!X;PpzR;_Lcgzvj!V`T12XRHPgyViDrAg`&te5i~}F6uc)zGMlv3CwI9a1Z3z z+?<#WUo7>GJG|<>aNF2K+)AJ-t5UFqaPz8}WaLUlB=enIMeFIb&rPveVk5^q`h$e{{{{gD)0}fe5yJ8gcELf^4+XI02hlyRH{i&k=c3@KHsA7LL5}3% zzD&=@Ogn&g96Dt$WA8a} zmvAisCp0hKiM*MSQE8BWy3$voW7V^-H7dmFAz>~qSzI{73F6;3U0U5j%b!9<8KvYD0cES z=M*&LB1?sASj)U8vML)zitYdouz-Qn^m%c|Prjl{Et*EqPg%>;))ld_TBZp4E9coC zfq4GuXh%TuQ8DPn7LNZ~$>$`p@_q;&zfP2kRN!3PX^E-~$-F5nN#cc#ll#Yy-6dT6 zBwAK>FC$W?z?&0mFpoGoR8{}xWZM8+<*>pb)O8pCs|?tJH!27mjTZ9VA*%E`((*XjES&YOuqo-qnI? zWfmFJ`}F>sa8LFZ zbtldEWr#V~LO+~@P5#?`e!aNyn{#t!*et?+WFj1dzb_K_E9yTmGivv@uS(| zjjQ@AYE--BQV?Dhe^plTynzrY2HW zoQx7{5?#CK1L=bB=)#N!nn!7tMFYBhxN@Sg6|3Sqsqhzar8|HH!!O3xs!2K96xULG z0%(G*E^b6W6ap;jCnso&=I7-$Afe(6*@ zR{ha}hzbD_PDyC({}Wr2_r|<%b`wt0)C^&*%xgbkdb~qWRRjs~d#hy-bq-7&Ib(Bt zt({{q0|WCsX!owXtoXdhR2A(h)yAt%m2Hnw;5FT2nMx?fk?`(q7@mkV z-$fnSfcF)}8rO#EAdxLrF3ebKBr|N^5WE)K{yZXa_k?v;=h%yXMG$F0$wZ-p7i)13 z`xWM1(s8k>If320*x}%LLSjC2uPxDacDA!XfLxUw!-4GGP#8d`dl4yS?5c?fX<&zj z9a?j!JJ6C>Tdx;hnsHw-nM1hEgo?)GjlQHnHXa_#>AUK}x+FfHu*T&U{bgzk2`7kR zA}dfdio6y4!Er54b9Ck@d~)>2sapME=k#*tf#itz;_aZ5_@wi%3Op`1>U*JQFLQ}J zN_fnrIk9|TJD~AIqwOS09SCl02yP9u3^npsaKqF z3_PHH-Xsl~Nuk?71NUxBuC`DT+78$g`Z~c)rJWD!Chg97x2o-c=$_o_#<9v$YKB>1 z9}qUQ)6yhobvCyFfoTeUS0nnT_{flA4>H1)Iu5Z`-pkj&-=va824bJgdkQ}6s%Y+H ze_WHf3ltw4R}!sK(RN?Yc|S?NY@iTa`~wy{^?5v3)$dSxr(+gfdL=R-zAnQ|*)QkG zNvzFdi$p0el&Hf(p;9pqN0Q^#itEj=LaF4U^skE(wf4iY;S+<$)&@!ZQG()S{iK^E zGWYDc3camIl-)MknxA|xgMHr2kWK`6s+b_h=f!2NG^|J5=y#K%7nRc;kWs>Pjgxy(}orTMNmJgsT;B zAU3WsL*vQD1FuOfZGHX3RZO1t;tpB?7H5*npOV9kKUQr8AWoKxTq3JzX~x#? zY~2AcoWD%iRmc3;I+UZXH(nOP4xI|bFuUbTs*xvRwiMn+2piu&1FZa6n^r~!;5a`Q zhFL}GPrLWO%{oS3(SkG%ON}y1rtSc?TiP>GXP@WDPhWJE02OvpIeNsi?fI)#ox1nU ztn{@?P}s=m!h<-XM}9Q2l-&Vd_Gz4(o{*NidL!g643Xl;18?#mA<*D@i|0lj=^EUp zJ#|r%PPWyldDuSPAp^R9d04(pF1E5jP8;FzEGa&2T9M0Jbh{?%43<2m%SHRlR9~U0 z*(3nY?22I#skrFMz9*+9!9S{IiWr2_i+{rIL;BA){}^YML)a9g`rbVFu^*HoWtcn` zP@>*C7K@G{Hx|AUHWXbO5&oRCb((CEa7JiNFQJ)fw9#!TeUFPr12_}Py;fHj^~{<- zK?iq=4=0ldP#E_Wg|QHDTB@yIG3vsfDOqaYA=H1E4t1O78)JqJPxMT} zWjruQh5~&kYpu_}9?UR+Hz~wzlZ=dla!(SiHZ`0_i3h&DZ<`m4-LZ3&QMW)Ame%uV zb?*OgL!|Hc>~`x%+50mnBA4fY&9A&3M7rhYe|_8fj|9$stShl4sxS+|Z zTO4Qe6O)<&-n4+eqry+0^uTlVJEK>-qV3(AWea(!AnZhGdK!aVzeuh1I6H&im8Sfk>}i4hJ5^(jTwPY?MByh2=DAH|%d-K~nAjo>)ztLU-#$48=0Mq+d1=0-h|N*jdL@<^QYC4IT9F+N~D)L9+wH}1S?*y*Nd;+=vqmo z9D2v8Kc3W+BkYh_gV46g;ASHNBF57J*}$TyP9GcgUhH7H;L)ioTKk=XIFq9va2x7} zqvx)30jyAYB0U9!zW=8LaQ+y0z?xNMHF;X=0TX0t&@Al7|3Y`aZI@b^?zy{^(tv0N z4m*c{!4Z0D#_ebGZc)E{zHiaSsuB%ooU#VvQ2@eGs8Mm9RSaCOFyazW?AA|(cd0S zB`-62T?+Si%Lh{yYcOip*jQDw47{bG#?y|{H0oMb8lMz>ey^)tou)W%j(cLEhy!0) zMR*+uSv@8-OhbG1)ZCC z(QZs`(?Y|a1@SenT)~r7Y=cCHQSHJuCHW{ZPXAV{qUb5 z(tnINJA|}3SOA)FhdKdiKg!r4(4&W`E9uyXLy~sBerB`EUR?cszQKH-Dl+U62XNfwoH>&s)240Co6|pfRJsUD zPm;*^tjA1Z?EAS)H2D5)@bE`)pMUEf_%XqTzVWG-wZ{G8yCp>pPYK+pg0EdZM`duSO|MUpapDISNUp_sx_x%u zwwLx(&Vv%`cWaO4=*;JYC4!A0dRlpxpR)GW5RVo$?YGc$t?f2HQRFI}HU`FW4aE2n z`ObwtB*j2oDdOGFPd`>O7-=-kH=R0YsSVa>2o3ilxgbDgT>%S>>BaAVd=b#P9*Fs< zD^@1{{W1rS{$YG|Md59hRm~Gt6ntbzq~f(=K3Q>ueHgBgg`cDI#I^r!sAh0dJe$$= z!fB0pERV5w;*-YOW3Dq*L*@?8Pz)Ijqfx|JGzC4HPNB%xY}v`gP~ndu1I$!P0`Q@Y z!z}~0j2#-%#@G{8eEWpmhNrF(Rdja%%W!nKbyR?wV65Ku*`=Kg3~M&*BH8JdBwfm> zg|IfO;uQ2?dOM84ZPg5y22)yZ@U>0ZXu*Lo(PNhspX~At3{2NvL<&56=$|hCZ+%bk zS98~Y|05p%fuer@Q+B^1w)SUq)jv?5+^FTf`<$c9$P$BQP?f@rsnX^n+KD>V9Vx%> zBC6gOiKM<-y!?`!ULAF-l>~CwzW%@=RyzD3es&YKo*tKabDpl(ClRyEh^HWd^Mq=` z1be>(g?@XoO~JPCHrlK8jG7my2`(MhO!NMv+(WSFbmcqA0$TD*6+Z$)$hh)q;%hYW zX72z+T(_%m1p6mFPCZ}hPWNQuhWZuV+vh@!A9>~X75F02L=>vQ$y%m7*{7iOx5b)z zx`ZEh9Q6@V^M{@geVHsZ8|fGUSiq%|$nz`>dIrW#MNPdu^rrKi>ldTVg9CQ}j)48t zs!pTsu@s~5#nJ0s`@(OC{-_05mlDUh`INM}u*B!dy8vX^l!L0JPK*3WOmngtd37Z*>%%AG3{@f!-lxq`9v4}H;SFFkQ{ zqItz>a%`yd-u@=kp@dOrYWm2^PH4QZnCL-ftTNRi`}dbAYC80{OjepV=iN6?yMX3P7--bng9%{ITz4iyhZ_qtXXRHF(->7;L)gbIh6IsT+%1#fp6l3VDUWzY6iW3_Txf7W>Rx}p!2UFJZYBdm1^LOl}w^8Alfr1nx z^@y)~5iS@Yo#YBfJndtLCrC|XKexH&)XsEa^iT+vm1K6}XXmUl2RXY@?+IIiT;u3# zdMO@B7h)GQq*_7@j;@AD3Jbjb`)BNVz_$HwB!kh7Ew*L1TQ+t|q~cZm#Pa6nK|Y^u zb0w~o1QV|9E~xv7AFDY(egD){MRilJze7)JR;2t)dJaf{M?IzNL6$bLKha@PjkUEh zLltq0(Q;fA!PaZZru)_*S-gl11v&E#no<{IuG_Kqj9iqiF&>nJxms{r;J%0z&(4)j z2SMUDuueYNje4it-xEIk*AD~!Yk&6`GCwbn?^Ht0pL&*VrmIn@;{R*!x}%!RyL1o~ z=_*nL0s=vlCdAM|iV~WDU;t^NMi2-gK#&#$#EuY}fS~j)ZO{NAAXR#k8VJ4jK%`1@ zGiUeP-I;Iqn>lml%$eQeU++2h@rqzODKz}uT4+! zH!rJcoG6#=pFseRZl|Y*mB{a^Tv0W~%u(u5OTK)u<#VO3%T1^BEy{vTfkgvUsUfN2 za;>X59a$9np@r_z7_m!F)h230i1`ODuI%1^H?@JGt1s+v-5^`(fn&o{eYBf{hYy!h zN=nyeI-h6Yo{76<18=`+XHlDCQap45VLT(ebsuD4cYTR+jV0SiUWJo0Kkez?Rs z_2oNY_vwHiW?~x|HKOZyehJtn@x=el0m8_ri+Bx>Mxmuk?$nn9Te5oDo8F@xe4{(} z{SZRxteVBCC(e7nge2%0RF)t%$xN(KP6@DB{j6QivlQr4cWNKqmff2PSPP9PCEscG zK^1eOXVx;o^G(<5I_TO_PsQaaI{yc;c2w$tz;h%VmhK@LJ+aVvX%iY*=fl zZ?PEq?Gu-z#Lljj?Sbb!;lc0nh%rljsBLTV`_QeAnT~-6C5NN52gLAWY8t&naEx!& z3zdex;_Ma4G~UEk1HYdHR;olxHR27xJ+*S{LKcoj?&NBp59;0Sn82OvO;aIp~|yK0_jVKkTQ`0KML6GQ%BQ6k3AdcNseJ5 z7`-c9#N$m4``|bz*!qL{m5_Hflglmr@edP%A`nPEi}C1NM!nm)5aX+G?p!Sky7cK) zQO~Td*{uKxbRoT!zoxcimgp|7&da#_S+={sEU($TYab~R} zXvD)tZLLJ(-~us5=^as0GBXeP<<-nGF%v)!UCM9YNQ`Sut$2xZV1z zq#0jd#6nI|-vTayDgxkt6g^?YR%*l3y7q)IWJkF!pQuy3&&AqHT*m-gllS#?i@Hi zufE0YLNM_^MP(Na=28_uOyXNiVLb&dumXL}OJPnzebF1=P#_%P5!{(NnTgfco!+h8 zmS`CjrcBrrUZiSIMp*2gp`TcN+PPmRvyo_YDE9U{fN{GHPA2J9k%!fG`ty@sK=y{x zOI}X1U-nFEs8cv|7%jXMfA2;G_}1r0K~hU2H$=kMZPwIdlRog{j7RZ?Ox1+#Je+(` z6fja2K*?EA;mWeObp_2*f4K8b!K_R~mp=`kU+3jq)F^CyHRo)Lj4Lf0$}$3k)p#>< zGvEW-^5I>SAzUq21JR-sd_EiU^E3Bf^?JwgR;ntauCOLWl@w~|5Yh}V6TQ8tOG}%w z2z9FmI2yxb4eVQ?SosQh?(k@>Ad>Dt(l*>oEkifcDaNus_}0QUlWKWn3YEObdi?Cy z<5Ui#P@v5IAL}sqr?&0igE;t%r@zIu@2uL<{5F}z*=%AmsXg0^z`Q*mt`!UwifLdjD z9mf-6!{A#=9P;n2F1yiZP8u01Gy!g#+h_q?4`oX!M@S;ggc5v@P;Xu)LLUjXkn+Qo zg}!9|z&C!xkUg9YxMY6F{!pA+YwX-VIT}EDKTrK?^e;_n6Y||vb3|$zTMdvp;fALG z8w{BHwK1J4vpl9N@O7ymU>r-;+6*<<)q%pojMe$t_%C-!lh)oY*slNQBD2hw8JRDW7j|LpkQ_TCkWkLdc|1i!E!y^v{i?KIp2;6Q zx&N2jWXCq{@>+i))aS)@af}M4j=GrM)r?fh3)CxN^Apfp6nmm%a;0!&v6h-ihpsro z8ySTixdj^A)xwjRt9A@F3cqa<)z-9qr~={HKVN)3Y9-qS{T)E(m9+0fP&!Xb#k7BA z4g2M?+_cm{#m-;aer)w09{itt<+C4C7^77V?4AJ3|@UGE`lWY)d%AYW?A@RR1& zdO<&9z0TZO>i8=4vs*%eitRwrE?UmG z z`MuN0MNYvONzHSi_JV=T+|C0U)lEYoflwLs6W-^(F3xEaN-OY~IEjm7Dl;OF&YLL}P0%FRU#zPeOIu73&I>)ZWJC|4{qE5~wv*QP%M zBN~fDu@&NoU6C2da% zP7d$Lrj_AKdb81>EUt`5V%e#K_7F|0DocCwMfcPz@Qwi(&RUT5!7%wY?2Zqg*+}P8 zTzR|6?CvCRcJ}jtJ_7*SbxL=fd73P&xM1vX$X?QYv3qlMlWFfo@}6w@t(L;+t2!d) zPS|pD&ujM{vTQ4g0muFioYU_3vYvV2=b8t+|Zg`Vgj!EPFYu4Lm zS;hO7;>y=9C;0fFg>A;P?|Uf?+c;8MQW>VXzr8^YU&7rx2AB%>NoHn&jV*rIX}oT5 z>#)P%#5Yfw8bzJ1$dPkW^9oVu%hH3~eDHiPOVc4I9@Xfj%m#cZ)+DXSj$R-i?lj8y zQ(vLW9Hw>8VKo9?Sz7X1!pkWQa2Xcybw54FMEe*4q7}+`8QP< z;~O3kAwPGd9cw+ku0Lnt_>tS?zNsN&r8#4I&=`1=Z%aW=DJH`w%~45z(frHX5l@6- z+>Fp7>9nrMiHZcTiPJ80Yr`^2HXX$iJF;=hWPhTU9md|aiGX{+no`Y-x3fDt?y4~_ z7P~KKiQ3X0@bALRJuF-qb6M9WU8W-Rs`BtNjjbVYKKN-GuBUeRlKu*fR9!E(2dFy` zNt8(2GhGf)h>Yf3ObtEV^f(rgllO^Tv$RVH|8)*uh9|^!;0a~7yRYpqGE@TqrvU%} zO?UBczs(n@O`c}gN~?QswaB&AV>Ln&%J@XBHSo`hAZy&pXYMLa#V?D~e;(zwWgpT; z%H?Mds$&bfNZf&*=H@0|Y5Zq2xEm`SXfIw53OKPdxvm>!QuXm;QBO)7XUjy&ed==w zO2kL-DjYzGbZ3D(kYZ9MXV+$v9%kFr@GhWjMs)9AQr`xHa!w^QX)v|Q074>_&5gXq zcgTqh-vK4X4ydV8x$awaAcO>kom%^T>kS%z9|U&w$r|=nHbU5d#Tm%BW=K;A=f^?9 zbDbud@KrNr);`g*Uul-qS=J0z`bM8Z%wyTmvZ0&pu_S?2y&}D)ywN@NPP}YaA5cKI z_{#>~V^8|<8V0+-i&mDAXlpFz1Vt@6ZT_z!A^+e#|J{xpjXNJ3BX-Qb-URhb;~5-c z1=A#SN2J4CICz1&oh8b_%i@nHS8b#g0B%Pc(I-N9UiM!tag zOp0e1>OK+;DbFjXwXtPz_O@Hcu)C_3+*E_VKyk@W_^9hF{K zP*#thtZYH<#H$yOoP5a@o?WXaiOWVHBPnV97?D@N&*Sgzt*i4=r@VAYieh=+zMKHl zDNp0#;0EIytTUb|ihMHk@j-0dq}D0kRaVt}QmgY%(Q(L35A@l)8iI1kyxMJ6^rAo( z_seo?tHHAA6plR4_@{Fua~|}S_e>avsjtNIl;=m2Tmvc?-(l4`dWT3(mR?~OSD^`# z?J*fh#7TUxPW7WyFnKhu=7n;D+_PkcRLZkxRLVjrF+SK3_4t5U)VjWQv56!5Vp{~?RuIpLY^{Pt3)d;;IpCv&UQ2sXl z3~d4CI=x+n)0Wamcty|L`km1xT`1J-Mo?ZJjS1Ubn%v^f9k@YxPl1X$(bxy^;C-_j oeH-h1n_Hl_ihF*|EP?Si)7PKPR{F8upUeLG;m?2V&+mi(2IpsQ9RL6T diff --git a/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.txt b/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.txt deleted file mode 100644 index b03ac20a0..000000000 --- a/PyTorch/build-in/Classification/Sequencer2D/sequencer2D_loss.txt +++ /dev/null @@ -1,29 +0,0 @@ -=== CUDA === -4.605500 4.604200 4.575900 4.704500 4.634000 4.522500 4.800900 4.807600 4.625900 4.705800 -4.870400 4.584700 4.773200 5.285000 5.075000 4.986000 4.843400 5.315900 4.963500 5.185100 -4.813800 4.679400 4.381800 4.726500 5.508500 5.427500 4.998200 4.788300 5.352800 4.815200 -5.005500 4.868500 5.653600 5.161500 5.161500 5.158900 4.890200 5.703200 4.891300 4.804300 -5.163300 4.468000 4.668200 5.157900 4.791100 5.295500 4.785100 5.287600 4.769000 5.002200 -4.607700 4.756000 4.464200 4.796000 4.427400 4.580400 4.422300 4.376400 4.660700 4.478500 -4.610200 4.409400 4.520500 4.131200 4.373200 4.561000 4.492000 4.512700 4.431700 4.509900 -4.408100 4.576500 4.327800 4.515700 4.364100 4.435900 4.538600 4.352800 4.364800 4.448300 -4.398600 4.398600 4.390100 4.132400 4.325700 4.314900 4.226500 4.415600 4.375900 4.281100 -4.583200 4.348900 4.366900 4.351000 4.382300 4.401900 4.349200 4.420900 4.401300 4.013700 - -=== SDAA === -4.605500 4.605100 4.602800 4.696800 4.589000 4.522900 4.831200 4.835000 4.672600 4.792900 -4.908600 4.605800 4.800500 5.228600 4.969200 4.998500 4.902800 5.385300 4.973600 5.261600 -4.767300 4.740800 4.446100 4.766200 5.479500 5.351100 4.987500 4.836500 5.347800 4.788500 -5.073200 4.795000 5.593800 5.038200 5.095500 5.117600 4.916400 5.822300 4.931600 4.759500 -5.274200 4.597200 4.595800 5.024100 4.709400 5.352800 4.876000 5.402900 4.869000 5.047900 -4.653400 4.883700 4.440900 4.846500 4.612600 4.783900 4.461900 4.499700 4.517700 4.536100 -4.661900 4.476400 4.500400 4.225600 4.343800 4.452300 4.432100 4.423600 4.391900 4.565900 -4.413300 4.627400 4.254300 4.585600 4.431200 4.431100 4.494100 4.345000 4.356800 4.368900 -4.397400 4.424700 4.323700 4.229000 4.253100 4.351700 4.308600 4.452100 4.401300 4.298300 -4.579000 4.402500 4.454300 4.313800 4.367200 4.409500 4.298600 4.480800 4.438000 4.073200 - -=== RESULT === -MeanRelativeError: 0.0033275151217559127 -MeanAbsoluteError: 0.014867000000000035 -Rule,mean_relative_error 0.0033275151217559127 -pass mean_relative_error=0.0033275151217559127 <= 0.05 or mean_absolute_error=0.014867000000000035 <= 0.0002 From 0f11a57a66b0b6c770c9ef550c5782bfa119dcbe Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 Jan 2026 10:32:54 +0000 Subject: [PATCH 3/3] fix: rename files and update code --- PyTorch/build-in/Classification/Sequencer2D/{readme => readme.md} | 0 .../Sequencer2D/{requirements_exact.txt => requirements.txt} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename PyTorch/build-in/Classification/Sequencer2D/{readme => readme.md} (100%) rename PyTorch/build-in/Classification/Sequencer2D/{requirements_exact.txt => requirements.txt} (100%) diff --git a/PyTorch/build-in/Classification/Sequencer2D/readme b/PyTorch/build-in/Classification/Sequencer2D/readme.md similarity index 100% rename from PyTorch/build-in/Classification/Sequencer2D/readme rename to PyTorch/build-in/Classification/Sequencer2D/readme.md diff --git a/PyTorch/build-in/Classification/Sequencer2D/requirements_exact.txt b/PyTorch/build-in/Classification/Sequencer2D/requirements.txt similarity index 100% rename from PyTorch/build-in/Classification/Sequencer2D/requirements_exact.txt rename to PyTorch/build-in/Classification/Sequencer2D/requirements.txt