From 2623fe0c47cbe36a61bde56cbe163255e85a58c2 Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Thu, 7 Mar 2024 15:44:57 +0000 Subject: [PATCH 01/10] initial env setup by run setup.py install --- build/lib/datasets/data_manager.py | 91 ++ build/lib/datasets/image_dataset.py | 79 ++ build/lib/datasets/utils/video/functional.py | 96 ++ build/lib/datasets/utils/video/randaugment.py | 518 ++++++++ build/lib/datasets/utils/video/randerase.py | 180 +++ build/lib/datasets/utils/video/transforms.py | 1184 +++++++++++++++++ .../datasets/utils/video/volume_transforms.py | 151 +++ build/lib/datasets/utils/weighted_sampler.py | 97 ++ build/lib/datasets/video_dataset.py | 272 ++++ build/lib/masks/default.py | 20 + build/lib/masks/multiblock3d.py | 203 +++ build/lib/masks/random_tube.py | 117 ++ build/lib/masks/utils.py | 23 + build/lib/models/attentive_pooler.py | 136 ++ build/lib/models/predictor.py | 246 ++++ build/lib/models/utils/modules.py | 183 +++ build/lib/models/utils/multimask.py | 48 + build/lib/models/utils/patch_embed.py | 57 + build/lib/models/utils/pos_embs.py | 99 ++ build/lib/models/vision_transformer.py | 307 +++++ build/lib/utils/distributed.py | 113 ++ build/lib/utils/logging.py | 118 ++ build/lib/utils/monitoring.py | 175 +++ build/lib/utils/schedulers.py | 76 ++ build/lib/utils/tensors.py | 71 + dist/jepa-0.0.1-py3.9.egg | Bin 0 -> 105636 bytes src/jepa.egg-info/PKG-INFO | 19 + src/jepa.egg-info/SOURCES.txt | 33 + src/jepa.egg-info/dependency_links.txt | 1 + src/jepa.egg-info/requires.txt | 13 + src/jepa.egg-info/top_level.txt | 4 + 31 files changed, 4730 insertions(+) create mode 100644 build/lib/datasets/data_manager.py create mode 100644 build/lib/datasets/image_dataset.py create mode 100644 build/lib/datasets/utils/video/functional.py create mode 100644 build/lib/datasets/utils/video/randaugment.py create mode 100644 build/lib/datasets/utils/video/randerase.py create mode 100644 build/lib/datasets/utils/video/transforms.py create mode 100644 build/lib/datasets/utils/video/volume_transforms.py create mode 100644 build/lib/datasets/utils/weighted_sampler.py create mode 100644 build/lib/datasets/video_dataset.py create mode 100644 build/lib/masks/default.py create mode 100644 build/lib/masks/multiblock3d.py create mode 100644 build/lib/masks/random_tube.py create mode 100644 build/lib/masks/utils.py create mode 100644 build/lib/models/attentive_pooler.py create mode 100644 build/lib/models/predictor.py create mode 100644 build/lib/models/utils/modules.py create mode 100644 build/lib/models/utils/multimask.py create mode 100644 build/lib/models/utils/patch_embed.py create mode 100644 build/lib/models/utils/pos_embs.py create mode 100644 build/lib/models/vision_transformer.py create mode 100644 build/lib/utils/distributed.py create mode 100644 build/lib/utils/logging.py create mode 100644 build/lib/utils/monitoring.py create mode 100644 build/lib/utils/schedulers.py create mode 100644 build/lib/utils/tensors.py create mode 100644 dist/jepa-0.0.1-py3.9.egg create mode 100644 src/jepa.egg-info/PKG-INFO create mode 100644 src/jepa.egg-info/SOURCES.txt create mode 100644 src/jepa.egg-info/dependency_links.txt create mode 100644 src/jepa.egg-info/requires.txt create mode 100644 src/jepa.egg-info/top_level.txt diff --git a/build/lib/datasets/data_manager.py b/build/lib/datasets/data_manager.py new file mode 100644 index 00000000..cdb7ade4 --- /dev/null +++ b/build/lib/datasets/data_manager.py @@ -0,0 +1,91 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +from logging import getLogger + + +_GLOBAL_SEED = 0 +logger = getLogger() + + +def init_data( + batch_size, + transform=None, + shared_transform=None, + data='ImageNet', + collator=None, + pin_mem=True, + num_workers=8, + world_size=1, + rank=0, + root_path=None, + image_folder=None, + training=True, + copy_data=False, + drop_last=True, + tokenize_txt=True, + subset_file=None, + clip_len=8, + frame_sample_rate=2, + duration=None, + num_clips=1, + random_clip_sampling=True, + allow_clip_overlap=False, + filter_short_videos=False, + filter_long_videos=int(1e9), + decode_one_clip=True, + datasets_weights=None, + persistent_workers=False, + repeat_wds=False, + ipe=300, + log_dir=None, +): + + if (data.lower() == 'imagenet') \ + or (data.lower() == 'inat21') \ + or (data.lower() == 'places205'): + from src.datasets.image_dataset import make_imagedataset + dataset, data_loader, dist_sampler = make_imagedataset( + transform=transform, + batch_size=batch_size, + collator=collator, + pin_mem=pin_mem, + training=training, + num_workers=num_workers, + world_size=world_size, + rank=rank, + root_path=root_path, + image_folder=image_folder, + persistent_workers=persistent_workers, + copy_data=copy_data, + drop_last=drop_last, + subset_file=subset_file) + + elif data.lower() == 'videodataset': + from src.datasets.video_dataset import make_videodataset + dataset, data_loader, dist_sampler = make_videodataset( + data_paths=root_path, + batch_size=batch_size, + frames_per_clip=clip_len, + frame_step=frame_sample_rate, + duration=duration, + num_clips=num_clips, + random_clip_sampling=random_clip_sampling, + allow_clip_overlap=allow_clip_overlap, + filter_short_videos=filter_short_videos, + filter_long_videos=filter_long_videos, + shared_transform=shared_transform, + transform=transform, + datasets_weights=datasets_weights, + collator=collator, + num_workers=num_workers, + world_size=world_size, + rank=rank, + drop_last=drop_last, + log_dir=log_dir) + + return (data_loader, dist_sampler) diff --git a/build/lib/datasets/image_dataset.py b/build/lib/datasets/image_dataset.py new file mode 100644 index 00000000..84e9b082 --- /dev/null +++ b/build/lib/datasets/image_dataset.py @@ -0,0 +1,79 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import os + +from logging import getLogger + +import torch +import torchvision + +_GLOBAL_SEED = 0 +logger = getLogger() + + +class ImageFolder(torchvision.datasets.ImageFolder): + + def __init__( + self, + root, + image_folder='imagenet_full_size/061417/', + transform=None, + train=True, + ): + """ + ImageFolder + :param root: root network directory for ImageFolder data + :param image_folder: path to images inside root network directory + :param train: whether to load train data (or validation) + """ + + suffix = 'train/' if train else 'val/' + data_path = os.path.join(root, image_folder, suffix) + logger.info(f'data-path {data_path}') + super(ImageFolder, self).__init__(root=data_path, transform=transform) + logger.info('Initialized ImageFolder') + + +def make_imagedataset( + transform, + batch_size, + collator=None, + pin_mem=True, + num_workers=8, + world_size=1, + rank=0, + root_path=None, + image_folder=None, + training=True, + copy_data=False, + drop_last=True, + persistent_workers=False, + subset_file=None +): + dataset = ImageFolder( + root=root_path, + image_folder=image_folder, + transform=transform, + train=training) + logger.info('ImageFolder dataset created') + dist_sampler = torch.utils.data.distributed.DistributedSampler( + dataset=dataset, + num_replicas=world_size, + rank=rank) + data_loader = torch.utils.data.DataLoader( + dataset, + collate_fn=collator, + sampler=dist_sampler, + batch_size=batch_size, + drop_last=drop_last, + pin_memory=pin_mem, + num_workers=num_workers, + persistent_workers=persistent_workers) + logger.info('ImageFolder unsupervised data loader created') + + return dataset, data_loader, dist_sampler diff --git a/build/lib/datasets/utils/video/functional.py b/build/lib/datasets/utils/video/functional.py new file mode 100644 index 00000000..a91d15d2 --- /dev/null +++ b/build/lib/datasets/utils/video/functional.py @@ -0,0 +1,96 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import numbers +import cv2 +import numpy as np +import PIL +import torch + + +def _is_tensor_clip(clip): + return torch.is_tensor(clip) and clip.ndimension() == 4 + + +def crop_clip(clip, min_h, min_w, h, w): + if isinstance(clip[0], np.ndarray): + cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] + + elif isinstance(clip[0], PIL.Image.Image): + cropped = [ + img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return cropped + + +def resize_clip(clip, size, interpolation='bilinear'): + if isinstance(clip[0], np.ndarray): + if isinstance(size, numbers.Number): + im_h, im_w, im_c = clip[0].shape + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[0], size[1] + if interpolation == 'bilinear': + np_inter = cv2.INTER_LINEAR + else: + np_inter = cv2.INTER_NEAREST + scaled = [ + cv2.resize(img, size, interpolation=np_inter) for img in clip + ] + elif isinstance(clip[0], PIL.Image.Image): + if isinstance(size, numbers.Number): + im_w, im_h = clip[0].size + # Min spatial dim already matches minimal size + if (im_w <= im_h and im_w == size) or (im_h <= im_w + and im_h == size): + return clip + new_h, new_w = get_resize_sizes(im_h, im_w, size) + size = (new_w, new_h) + else: + size = size[1], size[0] + if interpolation == 'bilinear': + pil_inter = PIL.Image.BILINEAR + else: + pil_inter = PIL.Image.NEAREST + scaled = [img.resize(size, pil_inter) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return scaled + + +def get_resize_sizes(im_h, im_w, size): + if im_w < im_h: + ow = size + oh = int(size * im_h / im_w) + else: + oh = size + ow = int(size * im_w / im_h) + return oh, ow + + +def normalize(clip, mean, std, inplace=False): + if not _is_tensor_clip(clip): + raise TypeError('tensor is not a torch clip.') + + if not inplace: + clip = clip.clone() + + dtype = clip.dtype + mean = torch.as_tensor(mean, dtype=dtype, device=clip.device) + std = torch.as_tensor(std, dtype=dtype, device=clip.device) + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + + return clip diff --git a/build/lib/datasets/utils/video/randaugment.py b/build/lib/datasets/utils/video/randaugment.py new file mode 100644 index 00000000..4c80a990 --- /dev/null +++ b/build/lib/datasets/utils/video/randaugment.py @@ -0,0 +1,518 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +This implementation is based on +https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py +pulished under an Apache License 2.0. +""" + +import math +import numpy as np +import random +import re +import PIL +from PIL import Image, ImageEnhance, ImageOps + +_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]]) + +_FILL = (128, 128, 128) + +# This signifies the max integer that the controller RNN could predict for the +# augmentation scheme. +_MAX_LEVEL = 10.0 + +_HPARAMS_DEFAULT = { + "translate_const": 250, + "img_mean": _FILL, +} + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _interpolation(kwargs): + interpolation = kwargs.pop("resample", Image.BILINEAR) + if isinstance(interpolation, (list, tuple)): + return random.choice(interpolation) + else: + return interpolation + + +def _check_args_tf(kwargs): + if "fillcolor" in kwargs and _PIL_VER < (5, 0): + kwargs.pop("fillcolor") + kwargs["resample"] = _interpolation(kwargs) + + +def shear_x(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs + ) + + +def shear_y(img, factor, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs + ) + + +def translate_x_rel(img, pct, **kwargs): + pixels = pct * img.size[0] + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs + ) + + +def translate_y_rel(img, pct, **kwargs): + pixels = pct * img.size[1] + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs + ) + + +def translate_x_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs + ) + + +def translate_y_abs(img, pixels, **kwargs): + _check_args_tf(kwargs) + return img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs + ) + + +def rotate(img, degrees, **kwargs): + _check_args_tf(kwargs) + if _PIL_VER >= (5, 2): + return img.rotate(degrees, **kwargs) + elif _PIL_VER >= (5, 0): + w, h = img.size + post_trans = (0, 0) + rotn_center = (w / 2.0, h / 2.0) + angle = -math.radians(degrees) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], + -rotn_center[1] - post_trans[1], + matrix, + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + return img.transform(img.size, Image.AFFINE, matrix, **kwargs) + else: + return img.rotate(degrees, resample=kwargs["resample"]) + + +def auto_contrast(img, **__): + return ImageOps.autocontrast(img) + + +def invert(img, **__): + return ImageOps.invert(img) + + +def equalize(img, **__): + return ImageOps.equalize(img) + + +def solarize(img, thresh, **__): + return ImageOps.solarize(img, thresh) + + +def solarize_add(img, add, thresh=128, **__): + lut = [] + for i in range(256): + if i < thresh: + lut.append(min(255, i + add)) + else: + lut.append(i) + if img.mode in ("L", "RGB"): + if img.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return img.point(lut) + else: + return img + + +def posterize(img, bits_to_keep, **__): + if bits_to_keep >= 8: + return img + return ImageOps.posterize(img, bits_to_keep) + + +def contrast(img, factor, **__): + return ImageEnhance.Contrast(img).enhance(factor) + + +def color(img, factor, **__): + return ImageEnhance.Color(img).enhance(factor) + + +def brightness(img, factor, **__): + return ImageEnhance.Brightness(img).enhance(factor) + + +def sharpness(img, factor, **__): + return ImageEnhance.Sharpness(img).enhance(factor) + + +def _randomly_negate(v): + """With 50% prob, negate the value""" + return -v if random.random() > 0.5 else v + + +def _rotate_level_to_arg(level, _hparams): + # range [-30, 30] + level = (level / _MAX_LEVEL) * 30.0 + level = _randomly_negate(level) + return (level,) + + +def _enhance_level_to_arg(level, _hparams): + # range [0.1, 1.9] + return ((level / _MAX_LEVEL) * 1.8 + 0.1,) + + +def _enhance_increasing_level_to_arg(level, _hparams): + # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend + # range [0.1, 1.9] + level = (level / _MAX_LEVEL) * 0.9 + level = 1.0 + _randomly_negate(level) + return (level,) + + +def _shear_level_to_arg(level, _hparams): + # range [-0.3, 0.3] + level = (level / _MAX_LEVEL) * 0.3 + level = _randomly_negate(level) + return (level,) + + +def _translate_abs_level_to_arg(level, hparams): + translate_const = hparams["translate_const"] + level = (level / _MAX_LEVEL) * float(translate_const) + level = _randomly_negate(level) + return (level,) + + +def _translate_rel_level_to_arg(level, hparams): + # default range [-0.45, 0.45] + translate_pct = hparams.get("translate_pct", 0.45) + level = (level / _MAX_LEVEL) * translate_pct + level = _randomly_negate(level) + return (level,) + + +def _posterize_level_to_arg(level, _hparams): + # As per Tensorflow TPU EfficientNet impl + # range [0, 4], 'keep 0 up to 4 MSB of original image' + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 4),) + + +def _posterize_increasing_level_to_arg(level, hparams): + # As per Tensorflow models research and UDA impl + # range [4, 0], 'keep 4 down to 0 MSB of original image', + # intensity/severity of augmentation increases with level + return (4 - _posterize_level_to_arg(level, hparams)[0],) + + +def _posterize_original_level_to_arg(level, _hparams): + # As per original AutoAugment paper description + # range [4, 8], 'keep 4 up to 8 MSB of image' + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 4) + 4,) + + +def _solarize_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation decreases with level + return (int((level / _MAX_LEVEL) * 256),) + + +def _solarize_increasing_level_to_arg(level, _hparams): + # range [0, 256] + # intensity/severity of augmentation increases with level + return (256 - _solarize_level_to_arg(level, _hparams)[0],) + + +def _solarize_add_level_to_arg(level, _hparams): + # range [0, 110] + return (int((level / _MAX_LEVEL) * 110),) + + +LEVEL_TO_ARG = { + "AutoContrast": None, + "Equalize": None, + "Invert": None, + "Rotate": _rotate_level_to_arg, + # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers + "Posterize": _posterize_level_to_arg, + "PosterizeIncreasing": _posterize_increasing_level_to_arg, + "PosterizeOriginal": _posterize_original_level_to_arg, + "Solarize": _solarize_level_to_arg, + "SolarizeIncreasing": _solarize_increasing_level_to_arg, + "SolarizeAdd": _solarize_add_level_to_arg, + "Color": _enhance_level_to_arg, + "ColorIncreasing": _enhance_increasing_level_to_arg, + "Contrast": _enhance_level_to_arg, + "ContrastIncreasing": _enhance_increasing_level_to_arg, + "Brightness": _enhance_level_to_arg, + "BrightnessIncreasing": _enhance_increasing_level_to_arg, + "Sharpness": _enhance_level_to_arg, + "SharpnessIncreasing": _enhance_increasing_level_to_arg, + "ShearX": _shear_level_to_arg, + "ShearY": _shear_level_to_arg, + "TranslateX": _translate_abs_level_to_arg, + "TranslateY": _translate_abs_level_to_arg, + "TranslateXRel": _translate_rel_level_to_arg, + "TranslateYRel": _translate_rel_level_to_arg, +} + + +NAME_TO_OP = { + "AutoContrast": auto_contrast, + "Equalize": equalize, + "Invert": invert, + "Rotate": rotate, + "Posterize": posterize, + "PosterizeIncreasing": posterize, + "PosterizeOriginal": posterize, + "Solarize": solarize, + "SolarizeIncreasing": solarize, + "SolarizeAdd": solarize_add, + "Color": color, + "ColorIncreasing": color, + "Contrast": contrast, + "ContrastIncreasing": contrast, + "Brightness": brightness, + "BrightnessIncreasing": brightness, + "Sharpness": sharpness, + "SharpnessIncreasing": sharpness, + "ShearX": shear_x, + "ShearY": shear_y, + "TranslateX": translate_x_abs, + "TranslateY": translate_y_abs, + "TranslateXRel": translate_x_rel, + "TranslateYRel": translate_y_rel, +} + + +class AugmentOp: + """ + Apply for video. + """ + + def __init__(self, name, prob=0.5, magnitude=10, hparams=None): + hparams = hparams or _HPARAMS_DEFAULT + self.aug_fn = NAME_TO_OP[name] + self.level_fn = LEVEL_TO_ARG[name] + self.prob = prob + self.magnitude = magnitude + self.hparams = hparams.copy() + self.kwargs = { + "fillcolor": hparams["img_mean"] + if "img_mean" in hparams + else _FILL, + "resample": hparams["interpolation"] + if "interpolation" in hparams + else _RANDOM_INTERPOLATION, + } + + # If magnitude_std is > 0, we introduce some randomness + # in the usually fixed policy and sample magnitude from a normal distribution + # with mean `magnitude` and std-dev of `magnitude_std`. + # NOTE This is my own hack, being tested, not in papers or reference impls. + self.magnitude_std = self.hparams.get("magnitude_std", 0) + + def __call__(self, img_list): + if self.prob < 1.0 and random.random() > self.prob: + return img_list + magnitude = self.magnitude + if self.magnitude_std and self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range + level_args = ( + self.level_fn(magnitude, self.hparams) + if self.level_fn is not None + else () + ) + + if isinstance(img_list, list): + return [ + self.aug_fn(img, *level_args, **self.kwargs) for img in img_list + ] + else: + return self.aug_fn(img_list, *level_args, **self.kwargs) + + +_RAND_TRANSFORMS = [ + "AutoContrast", + "Equalize", + "Invert", + "Rotate", + "Posterize", + "Solarize", + "SolarizeAdd", + "Color", + "Contrast", + "Brightness", + "Sharpness", + "ShearX", + "ShearY", + "TranslateXRel", + "TranslateYRel", +] + + +_RAND_INCREASING_TRANSFORMS = [ + "AutoContrast", + "Equalize", + "Invert", + "Rotate", + "PosterizeIncreasing", + "SolarizeIncreasing", + "SolarizeAdd", + "ColorIncreasing", + "ContrastIncreasing", + "BrightnessIncreasing", + "SharpnessIncreasing", + "ShearX", + "ShearY", + "TranslateXRel", + "TranslateYRel", +] + + +# These experimental weights are based loosely on the relative improvements mentioned in paper. +# They may not result in increased performance, but could likely be tuned to so. +_RAND_CHOICE_WEIGHTS_0 = { + "Rotate": 0.3, + "ShearX": 0.2, + "ShearY": 0.2, + "TranslateXRel": 0.1, + "TranslateYRel": 0.1, + "Color": 0.025, + "Sharpness": 0.025, + "AutoContrast": 0.025, + "Solarize": 0.005, + "SolarizeAdd": 0.005, + "Contrast": 0.005, + "Brightness": 0.005, + "Equalize": 0.005, + "Posterize": 0, + "Invert": 0, +} + + +def _select_rand_weights(weight_idx=0, transforms=None): + transforms = transforms or _RAND_TRANSFORMS + assert weight_idx == 0 # only one set of weights currently + rand_weights = _RAND_CHOICE_WEIGHTS_0 + probs = [rand_weights[k] for k in transforms] + probs /= np.sum(probs) + return probs + + +def rand_augment_ops(magnitude=10, hparams=None, transforms=None): + hparams = hparams or _HPARAMS_DEFAULT + transforms = transforms or _RAND_TRANSFORMS + return [ + AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) + for name in transforms + ] + + +class RandAugment: + def __init__(self, ops, num_layers=2, choice_weights=None): + self.ops = ops + self.num_layers = num_layers + self.choice_weights = choice_weights + + def __call__(self, img): + # no replacement when using weighted choice + ops = np.random.choice( + self.ops, + self.num_layers, + replace=self.choice_weights is None, + p=self.choice_weights, + ) + for op in ops: + img = op(img) + return img + + +def rand_augment_transform(config_str, hparams): + """ + RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 + + Create a RandAugment transform + :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by + dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining + sections, not order sepecific determine + 'm' - integer magnitude of rand augment + 'n' - integer num layers (number of transform ops selected per image) + 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) + 'mstd' - float std deviation of magnitude noise applied + 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) + Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 + 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 + :param hparams: Other hparams (kwargs) for the RandAugmentation scheme + :return: A PyTorch compatible Transform + """ + magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10) + num_layers = 2 # default to 2 ops per image + weight_idx = None # default to no probability weights for op choice + transforms = _RAND_TRANSFORMS + config = config_str.split("-") + assert config[0] == "rand" + config = config[1:] + for c in config: + cs = re.split(r"(\d.*)", c) + if len(cs) < 2: + continue + key, val = cs[:2] + if key == "mstd": + # noise param injected via hparams for now + hparams.setdefault("magnitude_std", float(val)) + elif key == "inc": + if bool(val): + transforms = _RAND_INCREASING_TRANSFORMS + elif key == "m": + magnitude = int(val) + elif key == "n": + num_layers = int(val) + elif key == "w": + weight_idx = int(val) + else: + assert NotImplementedError + ra_ops = rand_augment_ops( + magnitude=magnitude, hparams=hparams, transforms=transforms + ) + choice_weights = ( + None if weight_idx is None else _select_rand_weights(weight_idx) + ) + return RandAugment(ra_ops, num_layers, choice_weights=choice_weights) diff --git a/build/lib/datasets/utils/video/randerase.py b/build/lib/datasets/utils/video/randerase.py new file mode 100644 index 00000000..d1f185c8 --- /dev/null +++ b/build/lib/datasets/utils/video/randerase.py @@ -0,0 +1,180 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +""" +This implementation is based on +https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py +pulished under an Apache License 2.0. +""" +import math +import random +import torch + + +def _get_pixels( + per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda" +): + # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() + # paths, flip the order so normal is run on CPU if this becomes a problem + # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 + if per_pixel: + return torch.empty(patch_size, dtype=dtype, device=device).normal_() + elif rand_color: + return torch.empty( + (patch_size[0], 1, 1), dtype=dtype, device=device + ).normal_() + else: + return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) + + +class RandomErasing: + """Randomly selects a rectangle region in an image and erases its pixels. + 'Random Erasing Data Augmentation' by Zhong et al. + See https://arxiv.org/pdf/1708.04896.pdf + This variant of RandomErasing is intended to be applied to either a batch + or single image tensor after it has been normalized by dataset mean and std. + Args: + probability: Probability that the Random Erasing operation will be performed. + min_area: Minimum percentage of erased area wrt input image area. + max_area: Maximum percentage of erased area wrt input image area. + min_aspect: Minimum aspect ratio of erased area. + mode: pixel color mode, one of 'const', 'rand', or 'pixel' + 'const' - erase block is constant color of 0 for all channels + 'rand' - erase block is same per-channel random (normal) color + 'pixel' - erase block is per-pixel random (normal) color + max_count: maximum number of erasing blocks per image, area per box is scaled by count. + per-image count is randomly chosen between 1 and this value. + """ + + def __init__( + self, + probability=0.5, + min_area=0.02, + max_area=1 / 3, + min_aspect=0.3, + max_aspect=None, + mode="const", + min_count=1, + max_count=None, + num_splits=0, + device="cuda", + cube=True, + ): + self.probability = probability + self.min_area = min_area + self.max_area = max_area + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + self.min_count = min_count + self.max_count = max_count or min_count + self.num_splits = num_splits + mode = mode.lower() + self.rand_color = False + self.per_pixel = False + self.cube = cube + if mode == "rand": + self.rand_color = True # per block random normal + elif mode == "pixel": + self.per_pixel = True # per pixel random normal + else: + assert not mode or mode == "const" + self.device = device + + def _erase(self, img, chan, img_h, img_w, dtype): + if random.random() > self.probability: + return + area = img_h * img_w + count = ( + self.min_count + if self.min_count == self.max_count + else random.randint(self.min_count, self.max_count) + ) + for _ in range(count): + for _ in range(10): + target_area = ( + random.uniform(self.min_area, self.max_area) * area / count + ) + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + img[:, top:top + h, left:left + w] = _get_pixels( + self.per_pixel, + self.rand_color, + (chan, h, w), + dtype=dtype, + device=self.device, + ) + break + + def _erase_cube( + self, + img, + batch_start, + batch_size, + chan, + img_h, + img_w, + dtype, + ): + if random.random() > self.probability: + return + area = img_h * img_w + count = ( + self.min_count + if self.min_count == self.max_count + else random.randint(self.min_count, self.max_count) + ) + for _ in range(count): + for _ in range(100): + target_area = ( + random.uniform(self.min_area, self.max_area) * area / count + ) + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < img_w and h < img_h: + top = random.randint(0, img_h - h) + left = random.randint(0, img_w - w) + for i in range(batch_start, batch_size): + img_instance = img[i] + img_instance[ + :, top:top + h, left:left + w + ] = _get_pixels( + self.per_pixel, + self.rand_color, + (chan, h, w), + dtype=dtype, + device=self.device, + ) + break + + def __call__(self, input): + if len(input.size()) == 3: + self._erase(input, *input.size(), input.dtype) + else: + batch_size, chan, img_h, img_w = input.size() + # skip first slice of batch if num_splits is set (for clean portion of samples) + batch_start = ( + batch_size // self.num_splits if self.num_splits > 1 else 0 + ) + if self.cube: + self._erase_cube( + input, + batch_start, + batch_size, + chan, + img_h, + img_w, + input.dtype, + ) + else: + for i in range(batch_start, batch_size): + self._erase(input[i], chan, img_h, img_w, input.dtype) + return input diff --git a/build/lib/datasets/utils/video/transforms.py b/build/lib/datasets/utils/video/transforms.py new file mode 100644 index 00000000..ffa8e61d --- /dev/null +++ b/build/lib/datasets/utils/video/transforms.py @@ -0,0 +1,1184 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import math +import numpy as np +import random +import numbers +import PIL +from PIL import Image + +import torch +import torchvision +import torchvision.transforms.functional as F +from torchvision import transforms + +import src.datasets.utils.video.functional as FF +from src.datasets.utils.video.randaugment import rand_augment_transform + + +_pil_interpolation_to_str = { + Image.NEAREST: 'PIL.Image.NEAREST', + Image.BILINEAR: 'PIL.Image.BILINEAR', + Image.BICUBIC: 'PIL.Image.BICUBIC', + Image.LANCZOS: 'PIL.Image.LANCZOS', + Image.HAMMING: 'PIL.Image.HAMMING', + Image.BOX: 'PIL.Image.BOX', +} + + +_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) + + +def _pil_interp(method): + if method == 'bicubic': + return Image.BICUBIC + elif method == 'lanczos': + return Image.LANCZOS + elif method == 'hamming': + return Image.HAMMING + else: + return Image.BILINEAR + + +def random_short_side_scale_jitter( + images, min_size, max_size, boxes=None, inverse_uniform_sampling=False +): + """ + Perform a spatial short scale jittering on the given images and + corresponding boxes. + Args: + images (tensor): images to perform scale jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + min_size (int): the minimal size to scale the frames. + max_size (int): the maximal size to scale the frames. + boxes (ndarray): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + inverse_uniform_sampling (bool): if True, sample uniformly in + [1 / max_scale, 1 / min_scale] and take a reciprocal to get the + scale. If False, take a uniform sample from [min_scale, max_scale]. + Returns: + (tensor): the scaled images with dimension of + `num frames` x `channel` x `new height` x `new width`. + (ndarray or None): the scaled boxes with dimension of + `num boxes` x 4. + """ + if inverse_uniform_sampling: + size = int( + round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)) + ) + else: + size = int(round(np.random.uniform(min_size, max_size))) + + height = images.shape[2] + width = images.shape[3] + if (width <= height and width == size) or ( + height <= width and height == size + ): + return images, boxes + new_width = size + new_height = size + if width < height: + new_height = int(math.floor((float(height) / width) * size)) + if boxes is not None: + boxes = boxes * float(new_height) / height + else: + new_width = int(math.floor((float(width) / height) * size)) + if boxes is not None: + boxes = boxes * float(new_width) / width + + return ( + torch.nn.functional.interpolate( + images, + size=(new_height, new_width), + mode='bilinear', + align_corners=False, + ), + boxes, + ) + + +def crop_boxes(boxes, x_offset, y_offset): + """ + Peform crop on the bounding boxes given the offsets. + Args: + boxes (ndarray or None): bounding boxes to peform crop. The dimension + is `num boxes` x 4. + x_offset (int): cropping offset in the x axis. + y_offset (int): cropping offset in the y axis. + Returns: + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + cropped_boxes = boxes.copy() + cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset + cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset + + return cropped_boxes + + +def random_crop(images, size, boxes=None): + """ + Perform random spatial crop on the given images and corresponding boxes. + Args: + images (tensor): images to perform random crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): the size of height and width to crop on the image. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + Returns: + cropped (tensor): cropped images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + if images.shape[2] == size and images.shape[3] == size: + return images + height = images.shape[2] + width = images.shape[3] + y_offset = 0 + if height > size: + y_offset = int(np.random.randint(0, height - size)) + x_offset = 0 + if width > size: + x_offset = int(np.random.randint(0, width - size)) + cropped = images[ + :, :, y_offset:y_offset + size, x_offset:x_offset + size + ] + + cropped_boxes = ( + crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + ) + + return cropped, cropped_boxes + + +def horizontal_flip(prob, images, boxes=None): + """ + Perform horizontal flip on the given images and corresponding boxes. + Args: + prob (float): probility to flip the images. + images (tensor): images to perform horizontal flip, the dimension is + `num frames` x `channel` x `height` x `width`. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + Returns: + images (tensor): images with dimension of + `num frames` x `channel` x `height` x `width`. + flipped_boxes (ndarray or None): the flipped boxes with dimension of + `num boxes` x 4. + """ + if boxes is None: + flipped_boxes = None + else: + flipped_boxes = boxes.copy() + + if np.random.uniform() < prob: + images = images.flip((-1)) + + if len(images.shape) == 3: + width = images.shape[2] + elif len(images.shape) == 4: + width = images.shape[3] + else: + raise NotImplementedError("Dimension does not supported") + if boxes is not None: + flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1 + + return images, flipped_boxes + + +def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): + """ + Perform uniform spatial sampling on the images and corresponding boxes. + Args: + images (tensor): images to perform uniform crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): size of height and weight to crop the images. + spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width + is larger than height. Or 0, 1, or 2 for top, center, and bottom + crop if height is larger than width. + boxes (ndarray or None): optional. Corresponding boxes to images. + Dimension is `num boxes` x 4. + scale_size (int): optinal. If not None, resize the images to scale_size before + performing any crop. + Returns: + cropped (tensor): images with dimension of + `num frames` x `channel` x `size` x `size`. + cropped_boxes (ndarray or None): the cropped boxes with dimension of + `num boxes` x 4. + """ + assert spatial_idx in [0, 1, 2] + ndim = len(images.shape) + if ndim == 3: + images = images.unsqueeze(0) + height = images.shape[2] + width = images.shape[3] + + if scale_size is not None: + if width <= height: + width, height = scale_size, int(height / width * scale_size) + else: + width, height = int(width / height * scale_size), scale_size + images = torch.nn.functional.interpolate( + images, + size=(height, width), + mode='bilinear', + align_corners=False, + ) + + y_offset = int(math.ceil((height - size) / 2)) + x_offset = int(math.ceil((width - size) / 2)) + + if height > width: + if spatial_idx == 0: + y_offset = 0 + elif spatial_idx == 2: + y_offset = height - size + else: + if spatial_idx == 0: + x_offset = 0 + elif spatial_idx == 2: + x_offset = width - size + cropped = images[ + :, :, y_offset:y_offset + size, x_offset:x_offset + size + ] + cropped_boxes = ( + crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None + ) + if ndim == 3: + cropped = cropped.squeeze(0) + return cropped, cropped_boxes + + +def clip_boxes_to_image(boxes, height, width): + """ + Clip an array of boxes to an image with the given height and width. + Args: + boxes (ndarray): bounding boxes to perform clipping. + Dimension is `num boxes` x 4. + height (int): given image height. + width (int): given image width. + Returns: + clipped_boxes (ndarray): the clipped boxes with dimension of + `num boxes` x 4. + """ + clipped_boxes = boxes.copy() + clipped_boxes[:, [0, 2]] = np.minimum( + width - 1.0, np.maximum(0.0, boxes[:, [0, 2]]) + ) + clipped_boxes[:, [1, 3]] = np.minimum( + height - 1.0, np.maximum(0.0, boxes[:, [1, 3]]) + ) + return clipped_boxes + + +def blend(images1, images2, alpha): + """ + Blend two images with a given weight alpha. + Args: + images1 (tensor): the first images to be blended, the dimension is + `num frames` x `channel` x `height` x `width`. + images2 (tensor): the second images to be blended, the dimension is + `num frames` x `channel` x `height` x `width`. + alpha (float): the blending weight. + Returns: + (tensor): blended images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + return images1 * alpha + images2 * (1 - alpha) + + +def grayscale(images): + """ + Get the grayscale for the input images. The channels of images should be + in order BGR. + Args: + images (tensor): the input images for getting grayscale. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + img_gray (tensor): blended images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + # R -> 0.299, G -> 0.587, B -> 0.114. + img_gray = torch.tensor(images) + gray_channel = ( + 0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0] + ) + img_gray[:, 0] = gray_channel + img_gray[:, 1] = gray_channel + img_gray[:, 2] = gray_channel + return img_gray + + +def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0): + """ + Perfrom a color jittering on the input images. The channels of images + should be in order BGR. + Args: + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + img_brightness (float): jitter ratio for brightness. + img_contrast (float): jitter ratio for contrast. + img_saturation (float): jitter ratio for saturation. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + + jitter = [] + if img_brightness != 0: + jitter.append('brightness') + if img_contrast != 0: + jitter.append('contrast') + if img_saturation != 0: + jitter.append('saturation') + + if len(jitter) > 0: + order = np.random.permutation(np.arange(len(jitter))) + for idx in range(0, len(jitter)): + if jitter[order[idx]] == 'brightness': + images = brightness_jitter(img_brightness, images) + elif jitter[order[idx]] == 'contrast': + images = contrast_jitter(img_contrast, images) + elif jitter[order[idx]] == 'saturation': + images = saturation_jitter(img_saturation, images) + return images + + +def brightness_jitter(var, images): + """ + Perfrom brightness jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for brightness. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + + img_bright = torch.zeros(images.shape) + images = blend(images, img_bright, alpha) + return images + + +def contrast_jitter(var, images): + """ + Perfrom contrast jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for contrast. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + + img_gray = grayscale(images) + img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True) + images = blend(images, img_gray, alpha) + return images + + +def saturation_jitter(var, images): + """ + Perfrom saturation jittering on the input images. The channels of images + should be in order BGR. + Args: + var (float): jitter ratio for saturation. + images (tensor): images to perform color jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + alpha = 1.0 + np.random.uniform(-var, var) + img_gray = grayscale(images) + images = blend(images, img_gray, alpha) + + return images + + +def lighting_jitter(images, alphastd, eigval, eigvec): + """ + Perform AlexNet-style PCA jitter on the given images. + Args: + images (tensor): images to perform lighting jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + alphastd (float): jitter ratio for PCA jitter. + eigval (list): eigenvalues for PCA jitter. + eigvec (list[list]): eigenvectors for PCA jitter. + Returns: + out_images (tensor): the jittered images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + if alphastd == 0: + return images + # generate alpha1, alpha2, alpha3. + alpha = np.random.normal(0, alphastd, size=(1, 3)) + eig_vec = np.array(eigvec) + eig_val = np.reshape(eigval, (1, 3)) + rgb = np.sum( + eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), + axis=1, + ) + out_images = torch.zeros_like(images) + if len(images.shape) == 3: + # C H W + channel_dim = 0 + elif len(images.shape) == 4: + # T C H W + channel_dim = 1 + else: + raise NotImplementedError(f'Unsupported dimension {len(images.shape)}') + + for idx in range(images.shape[channel_dim]): + # C H W + if len(images.shape) == 3: + out_images[idx] = images[idx] + rgb[2 - idx] + # T C H W + elif len(images.shape) == 4: + out_images[:, idx] = images[:, idx] + rgb[2 - idx] + else: + raise NotImplementedError( + f'Unsupported dimension {len(images.shape)}' + ) + + return out_images + + +def color_normalization(images, mean, stddev): + """ + Perform color nomration on the given images. + Args: + images (tensor): images to perform color normalization. Dimension is + `num frames` x `channel` x `height` x `width`. + mean (list): mean values for normalization. + stddev (list): standard deviations for normalization. + + Returns: + out_images (tensor): the noramlized images, the dimension is + `num frames` x `channel` x `height` x `width`. + """ + if len(images.shape) == 3: + assert ( + len(mean) == images.shape[0] + ), 'channel mean not computed properly' + assert ( + len(stddev) == images.shape[0] + ), 'channel stddev not computed properly' + elif len(images.shape) == 4: + assert ( + len(mean) == images.shape[1] + ), 'channel mean not computed properly' + assert ( + len(stddev) == images.shape[1] + ), 'channel stddev not computed properly' + else: + raise NotImplementedError(f'Unsupported dimension {len(images.shape)}') + + out_images = torch.zeros_like(images) + for idx in range(len(mean)): + # C H W + if len(images.shape) == 3: + out_images[idx] = (images[idx] - mean[idx]) / stddev[idx] + elif len(images.shape) == 4: + out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx] + else: + raise NotImplementedError( + f'Unsupported dimension {len(images.shape)}' + ) + return out_images + + +def _get_param_spatial_crop( + scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False +): + """ + Given scale, ratio, height and width, return sampled coordinates of the videos. + """ + for _ in range(num_repeat): + area = height * width + target_area = random.uniform(*scale) * area + if log_scale: + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + else: + aspect_ratio = random.uniform(*ratio) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if np.random.uniform() < 0.5 and switch_hw: + w, h = h, w + + if 0 < w <= width and 0 < h <= height: + i = random.randint(0, height - h) + j = random.randint(0, width - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: # whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w + + +def random_resized_crop( + images, + target_height, + target_width, + scale=(0.8, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), +): + """ + Crop the given images to random size and aspect ratio. A crop of random + size (default: of 0.08 to 1.0) of the original size and a random aspect + ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This + crop is finally resized to given size. This is popularly used to train the + Inception networks. + + Args: + images: Images to perform resizing and cropping. + target_height: Desired height after cropping. + target_width: Desired width after cropping. + scale: Scale range of Inception-style area based random resizing. + ratio: Aspect ratio range of Inception-style area based random resizing. + """ + + height = images.shape[2] + width = images.shape[3] + + i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) + cropped = images[:, :, i:i + h, j:j + w] + return torch.nn.functional.interpolate( + cropped, + size=(target_height, target_width), + mode='bilinear', + align_corners=False, + ) + + +def random_resized_crop_with_shift( + images, + target_height, + target_width, + scale=(0.8, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), +): + """ + This is similar to random_resized_crop. However, it samples two different + boxes (for cropping) for the first and last frame. It then linearly + interpolates the two boxes for other frames. + + Args: + images: Images to perform resizing and cropping. + target_height: Desired height after cropping. + target_width: Desired width after cropping. + scale: Scale range of Inception-style area based random resizing. + ratio: Aspect ratio range of Inception-style area based random resizing. + """ + t = images.shape[1] + height = images.shape[2] + width = images.shape[3] + + i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) + i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width) + i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()] + j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()] + h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()] + w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()] + out = torch.zeros((3, t, target_height, target_width)) + for ind in range(t): + out[:, ind:ind + 1, :, :] = torch.nn.functional.interpolate( + images[ + :, + ind:ind + 1, + i_s[ind]:i_s[ind] + h_s[ind], + j_s[ind]:j_s[ind] + w_s[ind], + ], + size=(target_height, target_width), + mode='bilinear', + align_corners=False, + ) + return out + + +def create_random_augment( + input_size, + auto_augment=None, + interpolation='bilinear', +): + """ + Get video randaug transform. + + Args: + input_size: The size of the input video in tuple. + auto_augment: Parameters for randaug. An example: + "rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number + of operations to apply). + interpolation: Interpolation method. + """ + if isinstance(input_size, tuple): + img_size = input_size[-2:] + else: + img_size = input_size + + if auto_augment: + assert isinstance(auto_augment, str) + if isinstance(img_size, tuple): + img_size_min = min(img_size) + else: + img_size_min = img_size + aa_params = {'translate_const': int(img_size_min * 0.45)} + if interpolation and interpolation != 'random': + aa_params['interpolation'] = _pil_interp(interpolation) + if auto_augment.startswith('rand'): + return transforms.Compose( + [rand_augment_transform(auto_augment, aa_params)] + ) + raise NotImplementedError + + +def random_sized_crop_img( + im, + size, + jitter_scale=(0.08, 1.0), + jitter_aspect=(3.0 / 4.0, 4.0 / 3.0), + max_iter=10, +): + """ + Performs Inception-style cropping (used for training). + """ + assert ( + len(im.shape) == 3 + ), 'Currently only support image for random_sized_crop' + h, w = im.shape[1:3] + i, j, h, w = _get_param_spatial_crop( + scale=jitter_scale, + ratio=jitter_aspect, + height=h, + width=w, + num_repeat=max_iter, + log_scale=False, + switch_hw=True, + ) + cropped = im[:, i:i + h, j:j + w] + return torch.nn.functional.interpolate( + cropped.unsqueeze(0), + size=(size, size), + mode='bilinear', + align_corners=False, + ).squeeze(0) + + +# The following code are modified based on timm lib, we will replace the following +# contents with dependency from PyTorchVideo. +# https://github.com/facebookresearch/pytorchvideo +class RandomResizedCropAndInterpolation: + """Crop the given PIL Image to random size and aspect ratio with random interpolation. + A crop of random size (default: of 0.08 to 1.0) of the original size and a random + aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop + is finally resized to given size. + This is popularly used to train the Inception networks. + Args: + size: expected output size of each edge + scale: range of size of the origin size cropped + ratio: range of aspect ratio of the origin aspect ratio cropped + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__( + self, + size, + scale=(0.08, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), + interpolation='bilinear', + ): + if isinstance(size, tuple): + self.size = size + else: + self.size = (size, size) + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + print('range should be of kind (min, max)') + + if interpolation == 'random': + self.interpolation = _RANDOM_INTERPOLATION + else: + self.interpolation = _pil_interp(interpolation) + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img, scale, ratio): + """Get parameters for ``crop`` for a random sized crop. + Args: + img (PIL Image): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + area = img.size[0] * img.size[1] + + for _ in range(10): + target_area = random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w <= img.size[0] and h <= img.size[1]: + i = random.randint(0, img.size[1] - h) + j = random.randint(0, img.size[0] - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img.size[0] / img.size[1] + if in_ratio < min(ratio): + w = img.size[0] + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img.size[1] + w = int(round(h * max(ratio))) + else: # whole image + w = img.size[0] + h = img.size[1] + i = (img.size[1] - h) // 2 + j = (img.size[0] - w) // 2 + return i, j, h, w + + def __call__(self, img): + """ + Args: + img (PIL Image): Image to be cropped and resized. + Returns: + PIL Image: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = random.choice(self.interpolation) + else: + interpolation = self.interpolation + return F.resized_crop(img, i, j, h, w, self.size, interpolation) + + def __repr__(self): + if isinstance(self.interpolation, (tuple, list)): + interpolate_str = ' '.join( + [_pil_interpolation_to_str[x] for x in self.interpolation] + ) + else: + interpolate_str = _pil_interpolation_to_str[self.interpolation] + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format( + tuple(round(s, 4) for s in self.scale) + ) + format_string += ', ratio={0}'.format( + tuple(round(r, 4) for r in self.ratio) + ) + format_string += ', interpolation={0})'.format(interpolate_str) + return format_string + + +class Compose(object): + """Composes several transforms + Args: + transforms (list of ``Transform`` objects): list of transforms + to compose + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, clip): + for t in self.transforms: + clip = t(clip) + return clip + + +class RandomHorizontalFlip(object): + """Horizontally flip the list of given images randomly + with a probability 0.5 + """ + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Randomly flipped clip + """ + if random.random() < 0.5: + if isinstance(clip[0], np.ndarray): + return [np.fliplr(img) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + return [ + img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip + ] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + ' but got list of {0}'.format(type(clip[0]))) + return clip + + +class RandomResize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): + self.ratio = ratio + self.interpolation = interpolation + + def __call__(self, clip): + scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) + + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + + new_w = int(im_w * scaling_factor) + new_h = int(im_h * scaling_factor) + new_size = (new_w, new_h) + resized = FF.resize_clip( + clip, new_size, interpolation=self.interpolation) + return resized + + +class Resize(object): + """Resizes a list of (H x W x C) numpy.ndarray to the final size + The larger the original image is, the more times it takes to + interpolate + Args: + interpolation (str): Can be one of 'nearest', 'bilinear' + defaults to nearest + size (tuple): (widht, height) + """ + + def __init__(self, size, interpolation='nearest'): + self.size = size + self.interpolation = interpolation + + def __call__(self, clip): + resized = FF.resize_clip( + clip, self.size, interpolation=self.interpolation) + return resized + + +class RandomCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = random.randint(0, im_w - w) + y1 = random.randint(0, im_h - h) + cropped = FF.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ThreeCrop(object): + """Extract random crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w != im_w and h != im_h: + clip = FF.resize_clip(clip, self.size, interpolation="bilinear") + im_h, im_w, im_c = clip[0].shape + + step = np.max((np.max((im_w, im_h)) - self.size[0]) // 2, 0) + cropped = [] + for i in range(3): + if (im_h > self.size[0]): + x1 = 0 + y1 = i * step + cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) + else: + x1 = i * step + y1 = 0 + cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) + return cropped + + +class RandomRotation(object): + """Rotate entire clip randomly by a random angle within + given bounds + Args: + degrees (sequence or int): Range of degrees to select from + If degrees is a number instead of sequence like (min, max), + the range of degrees, will be (-degrees, +degrees). + """ + + def __init__(self, degrees): + if isinstance(degrees, numbers.Number): + if degrees < 0: + raise ValueError('If degrees is a single number,' + 'must be positive') + degrees = (-degrees, degrees) + else: + if len(degrees) != 2: + raise ValueError('If degrees is a sequence,' + 'it must be of len 2.') + + self.degrees = degrees + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + import skimage + angle = random.uniform(self.degrees[0], self.degrees[1]) + if isinstance(clip[0], np.ndarray): + rotated = [skimage.transform.rotate(img, angle) for img in clip] + elif isinstance(clip[0], PIL.Image.Image): + rotated = [img.rotate(angle) for img in clip] + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + + return rotated + + +class CenterCrop(object): + """Extract center crop at the same location for a list of images + Args: + size (sequence or int): Desired output size for the + crop in format (h, w) + """ + + def __init__(self, size): + if isinstance(size, numbers.Number): + size = (size, size) + + self.size = size + + def __call__(self, clip): + """ + Args: + img (PIL.Image or numpy.ndarray): List of images to be cropped + in format (h, w, c) in numpy.ndarray + Returns: + PIL.Image or numpy.ndarray: Cropped list of images + """ + h, w = self.size + if isinstance(clip[0], np.ndarray): + im_h, im_w, im_c = clip[0].shape + elif isinstance(clip[0], PIL.Image.Image): + im_w, im_h = clip[0].size + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + if w > im_w or h > im_h: + error_msg = ( + 'Initial image size should be larger then ' + 'cropped size but got cropped sizes : ({w}, {h}) while ' + 'initial image is ({im_w}, {im_h})'.format( + im_w=im_w, im_h=im_h, w=w, h=h)) + raise ValueError(error_msg) + + x1 = int(round((im_w - w) / 2.)) + y1 = int(round((im_h - h) / 2.)) + cropped = FF.crop_clip(clip, y1, x1, h, w) + + return cropped + + +class ColorJitter(object): + """ + Randomly change the brightness, contrast and saturation and hue of the clip + + Args: + brightness (float): How much to jitter brightness. brightness_factor + is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. + contrast (float): How much to jitter contrast. contrast_factor + is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. + saturation (float): How much to jitter saturation. saturation_factor + is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. + hue(float): How much to jitter hue. hue_factor is chosen uniformly from + [-hue, hue]. Should be >=0 and <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + self.brightness = brightness + self.contrast = contrast + self.saturation = saturation + self.hue = hue + + def get_params(self, brightness, contrast, saturation, hue): + if brightness > 0: + brightness_factor = random.uniform( + max(0, 1 - brightness), 1 + brightness) + else: + brightness_factor = None + + if contrast > 0: + contrast_factor = random.uniform( + max(0, 1 - contrast), 1 + contrast) + else: + contrast_factor = None + + if saturation > 0: + saturation_factor = random.uniform( + max(0, 1 - saturation), 1 + saturation) + else: + saturation_factor = None + + if hue > 0: + hue_factor = random.uniform(-hue, hue) + else: + hue_factor = None + return brightness_factor, contrast_factor, saturation_factor, hue_factor + + def __call__(self, clip): + """ + Args: + clip (list): list of PIL.Image + Returns: + list PIL.Image : list of transformed PIL.Image + """ + if isinstance(clip[0], np.ndarray): + raise TypeError( + 'Color jitter not yet implemented for numpy arrays') + elif isinstance(clip[0], PIL.Image.Image): + brightness, contrast, saturation, hue = self.get_params( + self.brightness, self.contrast, self.saturation, self.hue) + + # Create img transform function sequence + img_transforms = [] + if brightness is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) + if saturation is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) + if hue is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) + if contrast is not None: + img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) + random.shuffle(img_transforms) + + # Apply to all images + jittered_clip = [] + for img in clip: + for func in img_transforms: + jittered_img = func(img) + jittered_clip.append(jittered_img) + + else: + raise TypeError('Expected numpy.ndarray or PIL.Image' + + 'but got list of {0}'.format(type(clip[0]))) + return jittered_clip + + +class Normalize(object): + """Normalize a clip with mean and standard deviation. + Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform + will normalize each channel of the input ``torch.*Tensor`` i.e. + ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` + .. note:: + This transform acts out of place, i.e., it does not mutates the input tensor. + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + """ + + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, clip): + """ + Args: + clip (Tensor): Tensor clip of size (T, C, H, W) to be normalized. + Returns: + Tensor: Normalized Tensor clip. + """ + return FF.normalize(clip, self.mean, self.std) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) diff --git a/build/lib/datasets/utils/video/volume_transforms.py b/build/lib/datasets/utils/video/volume_transforms.py new file mode 100644 index 00000000..0a01bb36 --- /dev/null +++ b/build/lib/datasets/utils/video/volume_transforms.py @@ -0,0 +1,151 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import numpy as np +from PIL import Image + +import torch + + +def convert_img(img): + """Converts (H, W, C) numpy.ndarray to (C, W, H) format""" + if len(img.shape) == 3: + img = img.transpose(2, 0, 1) + if len(img.shape) == 2: + img = np.expand_dims(img, 0) + return img + + +class ClipToTensor(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip (list of numpy.ndarray): clip (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, "Got {0} instead of 3 channels".format(ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError( + "Expected numpy.ndarray or PIL.Image\ + but got list of {0}".format( + type(clip[0]) + ) + ) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError( + "Expected numpy.ndarray or PIL.Image\ + but got list of {0}".format( + type(clip[0]) + ) + ) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = np_clip / 255.0 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = torch.div(tensor_clip, 255) + return tensor_clip + + +# Note this norms data to -1/1 +class ClipToTensor_K(object): + """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] + to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] + """ + + def __init__(self, channel_nb=3, div_255=True, numpy=False): + self.channel_nb = channel_nb + self.div_255 = div_255 + self.numpy = numpy + + def __call__(self, clip): + """ + Args: clip (list of numpy.ndarray): clip (list of images) + to be converted to tensor. + """ + # Retrieve shape + if isinstance(clip[0], np.ndarray): + h, w, ch = clip[0].shape + assert ch == self.channel_nb, "Got {0} instead of 3 channels".format(ch) + elif isinstance(clip[0], Image.Image): + w, h = clip[0].size + else: + raise TypeError( + "Expected numpy.ndarray or PIL.Image\ + but got list of {0}".format( + type(clip[0]) + ) + ) + + np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) + + # Convert + for img_idx, img in enumerate(clip): + if isinstance(img, np.ndarray): + pass + elif isinstance(img, Image.Image): + img = np.array(img, copy=False) + else: + raise TypeError( + "Expected numpy.ndarray or PIL.Image\ + but got list of {0}".format( + type(clip[0]) + ) + ) + img = convert_img(img) + np_clip[:, img_idx, :, :] = img + if self.numpy: + if self.div_255: + np_clip = (np_clip - 127.5) / 127.5 + return np_clip + + else: + tensor_clip = torch.from_numpy(np_clip) + + if not isinstance(tensor_clip, torch.FloatTensor): + tensor_clip = tensor_clip.float() + if self.div_255: + tensor_clip = torch.div(torch.sub(tensor_clip, 127.5), 127.5) + return tensor_clip + + +class ToTensor(object): + """Converts numpy array to tensor""" + + def __call__(self, array): + tensor = torch.from_numpy(array) + return tensor diff --git a/build/lib/datasets/utils/weighted_sampler.py b/build/lib/datasets/utils/weighted_sampler.py new file mode 100644 index 00000000..fd40825e --- /dev/null +++ b/build/lib/datasets/utils/weighted_sampler.py @@ -0,0 +1,97 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +from typing import Iterator, Optional +from operator import itemgetter +import numpy as np + +import torch +from torch.utils.data import ( + Dataset, + Sampler, + DistributedSampler, + WeightedRandomSampler +) + + +class DatasetFromSampler(Dataset): + + def __init__(self, sampler: Sampler): + self.sampler = sampler + self.sampler_list = None + + def __getitem__(self, index: int): + if self.sampler_list is None: + self.sampler_list = list(self.sampler) + return self.sampler_list[index] + + def __len__(self) -> int: + return len(self.sampler) + + +class DistributedSamplerWrapper(DistributedSampler): + """ Convert any Pytorch Sampler to a DistributedSampler """ + + def __init__( + self, + sampler, + num_replicas: Optional[int] = None, + rank: Optional[int] = None, + shuffle: bool = True, + ): + super(DistributedSamplerWrapper, self).__init__( + DatasetFromSampler(sampler), + num_replicas=num_replicas, + rank=rank, + shuffle=shuffle, + ) + self.sampler = sampler + + def __iter__(self) -> Iterator[int]: + self.dataset = DatasetFromSampler(self.sampler) + indexes_of_indexes = super().__iter__() + subsampler_indexes = self.dataset + return iter(itemgetter(*indexes_of_indexes)(subsampler_indexes)) + + +class CustomWeightedRandomSampler(WeightedRandomSampler): + """ Generalized WeightedRandomSampler to allow for more than 2^24 samples """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def __iter__(self): + rand_tensor = np.random.choice( + range(0, len(self.weights)), + size=self.num_samples, + p=self.weights.numpy() / torch.sum(self.weights).numpy(), + replace=self.replacement + ) + rand_tensor = torch.from_numpy(rand_tensor) + return iter(rand_tensor.tolist()) + + +class DistributedWeightedSampler(DistributedSamplerWrapper): + + def __init__( + self, + weights, + num_replicas: Optional[int] = None, + rank: Optional[int] = None, + shuffle: bool = True, + ): + weighted_sampler = CustomWeightedRandomSampler( + weights=weights, + num_samples=len(weights), + replacement=False) + + super(DistributedWeightedSampler, self).__init__( + sampler=weighted_sampler, + num_replicas=num_replicas, + rank=rank, + shuffle=shuffle, + ) diff --git a/build/lib/datasets/video_dataset.py b/build/lib/datasets/video_dataset.py new file mode 100644 index 00000000..b05cc701 --- /dev/null +++ b/build/lib/datasets/video_dataset.py @@ -0,0 +1,272 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import os +import pathlib +import warnings + +from logging import getLogger + +import numpy as np +import pandas as pd + +from decord import VideoReader, cpu + +import torch + +from src.datasets.utils.weighted_sampler import DistributedWeightedSampler + +_GLOBAL_SEED = 0 +logger = getLogger() + + +def make_videodataset( + data_paths, + batch_size, + frames_per_clip=8, + frame_step=4, + num_clips=1, + random_clip_sampling=True, + allow_clip_overlap=False, + filter_short_videos=False, + filter_long_videos=int(10**9), + transform=None, + shared_transform=None, + rank=0, + world_size=1, + datasets_weights=None, + collator=None, + drop_last=True, + num_workers=10, + pin_mem=True, + duration=None, + log_dir=None, +): + dataset = VideoDataset( + data_paths=data_paths, + datasets_weights=datasets_weights, + frames_per_clip=frames_per_clip, + frame_step=frame_step, + num_clips=num_clips, + random_clip_sampling=random_clip_sampling, + allow_clip_overlap=allow_clip_overlap, + filter_short_videos=filter_short_videos, + filter_long_videos=filter_long_videos, + duration=duration, + shared_transform=shared_transform, + transform=transform) + + logger.info('VideoDataset dataset created') + if datasets_weights is not None: + dist_sampler = DistributedWeightedSampler( + dataset.sample_weights, + num_replicas=world_size, + rank=rank, + shuffle=True) + else: + dist_sampler = torch.utils.data.distributed.DistributedSampler( + dataset, + num_replicas=world_size, + rank=rank, + shuffle=True) + + data_loader = torch.utils.data.DataLoader( + dataset, + collate_fn=collator, + sampler=dist_sampler, + batch_size=batch_size, + drop_last=drop_last, + pin_memory=pin_mem, + num_workers=num_workers, + persistent_workers=num_workers > 0) + logger.info('VideoDataset unsupervised data loader created') + + return dataset, data_loader, dist_sampler + + +class VideoDataset(torch.utils.data.Dataset): + """ Video classification dataset. """ + + def __init__( + self, + data_paths, + datasets_weights=None, + frames_per_clip=16, + frame_step=4, + num_clips=1, + transform=None, + shared_transform=None, + random_clip_sampling=True, + allow_clip_overlap=False, + filter_short_videos=False, + filter_long_videos=int(10**9), + duration=None, # duration in seconds + ): + self.data_paths = data_paths + self.datasets_weights = datasets_weights + self.frames_per_clip = frames_per_clip + self.frame_step = frame_step + self.num_clips = num_clips + self.transform = transform + self.shared_transform = shared_transform + self.random_clip_sampling = random_clip_sampling + self.allow_clip_overlap = allow_clip_overlap + self.filter_short_videos = filter_short_videos + self.filter_long_videos = filter_long_videos + self.duration = duration + + if VideoReader is None: + raise ImportError('Unable to import "decord" which is required to read videos.') + + # Load video paths and labels + samples, labels = [], [] + self.num_samples_per_dataset = [] + for data_path in self.data_paths: + + if data_path[-4:] == '.csv': + data = pd.read_csv(data_path, header=None, delimiter=" ") + samples += list(data.values[:, 0]) + labels += list(data.values[:, 1]) + num_samples = len(data) + self.num_samples_per_dataset.append(num_samples) + + elif data_path[-4:] == '.npy': + data = np.load(data_path, allow_pickle=True) + data = list(map(lambda x: repr(x)[1:-1], data)) + samples += data + labels += [0] * len(data) + num_samples = len(data) + self.num_samples_per_dataset.append(len(data)) + + # [Optional] Weights for each sample to be used by downstream + # weighted video sampler + self.sample_weights = None + if self.datasets_weights is not None: + self.sample_weights = [] + for dw, ns in zip(self.datasets_weights, self.num_samples_per_dataset): + self.sample_weights += [dw / ns] * ns + + self.samples = samples + self.labels = labels + + def __getitem__(self, index): + sample = self.samples[index] + + # Keep trying to load videos until you find a valid sample + loaded_video = False + while not loaded_video: + buffer, clip_indices = self.loadvideo_decord(sample) # [T H W 3] + loaded_video = len(buffer) > 0 + if not loaded_video: + index = np.random.randint(self.__len__()) + sample = self.samples[index] + + # Label/annotations for video + label = self.labels[index] + + def split_into_clips(video): + """ Split video into a list of clips """ + fpc = self.frames_per_clip + nc = self.num_clips + return [video[i*fpc:(i+1)*fpc] for i in range(nc)] + + # Parse video into frames & apply data augmentations + if self.shared_transform is not None: + buffer = self.shared_transform(buffer) + buffer = split_into_clips(buffer) + if self.transform is not None: + buffer = [self.transform(clip) for clip in buffer] + + return buffer, label, clip_indices + + def loadvideo_decord(self, sample): + """ Load video content using Decord """ + + fname = sample + if not os.path.exists(fname): + warnings.warn(f'video path not found {fname=}') + return [], None + + _fsize = os.path.getsize(fname) + if _fsize < 1 * 1024: # avoid hanging issue + warnings.warn(f'video too short {fname=}') + return [], None + if _fsize > self.filter_long_videos: + warnings.warn(f'skipping long video of size {_fsize=} (bytes)') + return [], None + + try: + vr = VideoReader(fname, num_threads=-1, ctx=cpu(0)) + except Exception: + return [], None + + fpc = self.frames_per_clip + fstp = self.frame_step + if self.duration is not None: + try: + fps = vr.get_avg_fps() + fstp = int(self.duration * fps / fpc) + except Exception as e: + warnings.warn(e) + clip_len = int(fpc * fstp) + + if self.filter_short_videos and len(vr) < clip_len: + warnings.warn(f'skipping video of length {len(vr)}') + return [], None + + vr.seek(0) # Go to start of video before sampling frames + + # Partition video into equal sized segments and sample each clip + # from a different segment + partition_len = len(vr) // self.num_clips + + all_indices, clip_indices = [], [] + for i in range(self.num_clips): + + if partition_len > clip_len: + # If partition_len > clip len, then sample a random window of + # clip_len frames within the segment + end_indx = clip_len + if self.random_clip_sampling: + end_indx = np.random.randint(clip_len, partition_len) + start_indx = end_indx - clip_len + indices = np.linspace(start_indx, end_indx, num=fpc) + indices = np.clip(indices, start_indx, end_indx-1).astype(np.int64) + # -- + indices = indices + i * partition_len + else: + # If partition overlap not allowed and partition_len < clip_len + # then repeatedly append the last frame in the segment until + # we reach the desired clip length + if not self.allow_clip_overlap: + indices = np.linspace(0, partition_len, num=partition_len // fstp) + indices = np.concatenate((indices, np.ones(fpc - partition_len // fstp) * partition_len,)) + indices = np.clip(indices, 0, partition_len-1).astype(np.int64) + # -- + indices = indices + i * partition_len + + # If partition overlap is allowed and partition_len < clip_len + # then start_indx of segment i+1 will lie within segment i + else: + sample_len = min(clip_len, len(vr)) - 1 + indices = np.linspace(0, sample_len, num=sample_len // fstp) + indices = np.concatenate((indices, np.ones(fpc - sample_len // fstp) * sample_len,)) + indices = np.clip(indices, 0, sample_len-1).astype(np.int64) + # -- + clip_step = 0 + if len(vr) > clip_len: + clip_step = (len(vr) - clip_len) // (self.num_clips - 1) + indices = indices + i * clip_step + + clip_indices.append(indices) + all_indices.extend(list(indices)) + + buffer = vr.get_batch(all_indices).asnumpy() + return buffer, clip_indices + + def __len__(self): + return len(self.samples) diff --git a/build/lib/masks/default.py b/build/lib/masks/default.py new file mode 100644 index 00000000..2810c0a1 --- /dev/null +++ b/build/lib/masks/default.py @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +from logging import getLogger + +import torch + +_GLOBAL_SEED = 0 +logger = getLogger() + + +class DefaultCollator(object): + + def __call__(self, batch): + collated_batch = torch.utils.data.default_collate(batch) + return collated_batch, None, None diff --git a/build/lib/masks/multiblock3d.py b/build/lib/masks/multiblock3d.py new file mode 100644 index 00000000..a7bbc3e1 --- /dev/null +++ b/build/lib/masks/multiblock3d.py @@ -0,0 +1,203 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import math + +from multiprocessing import Value + +from logging import getLogger + +import torch + +_GLOBAL_SEED = 0 +logger = getLogger() + + +class MaskCollator(object): + + def __init__( + self, + cfgs_mask, + crop_size=(224, 224), + num_frames=16, + patch_size=(16, 16), + tubelet_size=2, + ): + super(MaskCollator, self).__init__() + + self.mask_generators = [] + for m in cfgs_mask: + mask_generator = _MaskGenerator( + crop_size=crop_size, + num_frames=num_frames, + spatial_patch_size=patch_size, + temporal_patch_size=tubelet_size, + spatial_pred_mask_scale=m.get('spatial_scale'), + temporal_pred_mask_scale=m.get('temporal_scale'), + aspect_ratio=m.get('aspect_ratio'), + npred=m.get('num_blocks'), + max_context_frames_ratio=m.get('max_temporal_keep', 1.0), + max_keep=m.get('max_keep', None), + ) + self.mask_generators.append(mask_generator) + + def step(self): + for mask_generator in self.mask_generators: + mask_generator.step() + + def __call__(self, batch): + + batch_size = len(batch) + collated_batch = torch.utils.data.default_collate(batch) + + collated_masks_pred, collated_masks_enc = [], [] + for i, mask_generator in enumerate(self.mask_generators): + masks_enc, masks_pred = mask_generator(batch_size) + collated_masks_enc.append(masks_enc) + collated_masks_pred.append(masks_pred) + + return collated_batch, collated_masks_enc, collated_masks_pred + + +class _MaskGenerator(object): + + def __init__( + self, + crop_size=(224, 224), + num_frames=16, + spatial_patch_size=(16, 16), + temporal_patch_size=2, + spatial_pred_mask_scale=(0.2, 0.8), + temporal_pred_mask_scale=(1.0, 1.0), + aspect_ratio=(0.3, 3.0), + npred=1, + max_context_frames_ratio=1.0, + max_keep=None, + ): + super(_MaskGenerator, self).__init__() + if not isinstance(crop_size, tuple): + crop_size = (crop_size, ) * 2 + self.crop_size = crop_size + self.height, self.width = crop_size[0] // spatial_patch_size, crop_size[1] // spatial_patch_size + self.duration = num_frames // temporal_patch_size + + self.spatial_patch_size = spatial_patch_size + self.temporal_patch_size = temporal_patch_size + + self.aspect_ratio = aspect_ratio + self.spatial_pred_mask_scale = spatial_pred_mask_scale + self.temporal_pred_mask_scale = temporal_pred_mask_scale + self.npred = npred + self.max_context_duration = max(1, int(self.duration * max_context_frames_ratio)) # maximum number of time-steps (frames) spanned by context mask + self.max_keep = max_keep # maximum number of patches to keep in context + self._itr_counter = Value('i', -1) # collator is shared across worker processes + + def step(self): + i = self._itr_counter + with i.get_lock(): + i.value += 1 + v = i.value + return v + + def _sample_block_size( + self, + generator, + temporal_scale, + spatial_scale, + aspect_ratio_scale + ): + # -- Sample temporal block mask scale + _rand = torch.rand(1, generator=generator).item() + min_t, max_t = temporal_scale + temporal_mask_scale = min_t + _rand * (max_t - min_t) + t = max(1, int(self.duration * temporal_mask_scale)) + + # -- Sample spatial block mask scale + _rand = torch.rand(1, generator=generator).item() + min_s, max_s = spatial_scale + spatial_mask_scale = min_s + _rand * (max_s - min_s) + spatial_num_keep = int(self.height * self.width * spatial_mask_scale) + + # -- Sample block aspect-ratio + _rand = torch.rand(1, generator=generator).item() + min_ar, max_ar = aspect_ratio_scale + aspect_ratio = min_ar + _rand * (max_ar - min_ar) + + # -- Compute block height and width (given scale and aspect-ratio) + h = int(round(math.sqrt(spatial_num_keep * aspect_ratio))) + w = int(round(math.sqrt(spatial_num_keep / aspect_ratio))) + h = min(h, self.height) + w = min(w, self.width) + + return (t, h, w) + + def _sample_block_mask(self, b_size): + t, h, w = b_size + top = torch.randint(0, self.height - h + 1, (1,)) + left = torch.randint(0, self.width - w + 1, (1,)) + start = torch.randint(0, self.duration - t + 1, (1,)) + + mask = torch.ones((self.duration, self.height, self.width), dtype=torch.int32) + mask[start:start+t, top:top+h, left:left+w] = 0 + + # Context mask will only span the first X frames + # (X=self.max_context_frames) + if self.max_context_duration < self.duration: + mask[self.max_context_duration:, :, :] = 0 + + # -- + return mask + + def __call__(self, batch_size): + """ + Create encoder and predictor masks when collating imgs into a batch + # 1. sample pred block size using seed + # 2. sample several pred block locations for each image (w/o seed) + # 3. return pred masks and complement (enc mask) + """ + seed = self.step() + g = torch.Generator() + g.manual_seed(seed) + p_size = self._sample_block_size( + generator=g, + temporal_scale=self.temporal_pred_mask_scale, + spatial_scale=self.spatial_pred_mask_scale, + aspect_ratio_scale=self.aspect_ratio, + ) + + collated_masks_pred, collated_masks_enc = [], [] + min_keep_enc = min_keep_pred = self.duration * self.height * self.width + for _ in range(batch_size): + + empty_context = True + while empty_context: + + mask_e = torch.ones((self.duration, self.height, self.width), dtype=torch.int32) + for _ in range(self.npred): + mask_e *= self._sample_block_mask(p_size) + mask_e = mask_e.flatten() + + mask_p = torch.argwhere(mask_e == 0).squeeze() + mask_e = torch.nonzero(mask_e).squeeze() + + empty_context = len(mask_e) == 0 + if not empty_context: + min_keep_pred = min(min_keep_pred, len(mask_p)) + min_keep_enc = min(min_keep_enc, len(mask_e)) + collated_masks_pred.append(mask_p) + collated_masks_enc.append(mask_e) + + if self.max_keep is not None: + min_keep_enc = min(min_keep_enc, self.max_keep) + + collated_masks_pred = [cm[:min_keep_pred] for cm in collated_masks_pred] + collated_masks_pred = torch.utils.data.default_collate(collated_masks_pred) + # -- + collated_masks_enc = [cm[:min_keep_enc] for cm in collated_masks_enc] + collated_masks_enc = torch.utils.data.default_collate(collated_masks_enc) + + return collated_masks_enc, collated_masks_pred diff --git a/build/lib/masks/random_tube.py b/build/lib/masks/random_tube.py new file mode 100644 index 00000000..84c06402 --- /dev/null +++ b/build/lib/masks/random_tube.py @@ -0,0 +1,117 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +from multiprocessing import Value + +from logging import getLogger + +import torch +import numpy as np + +_GLOBAL_SEED = 0 +logger = getLogger() + + +class MaskCollator(object): + + def __init__( + self, + cfgs_mask, + crop_size=(224, 224), + num_frames=16, + patch_size=(16, 16), + tubelet_size=2, + ): + super(MaskCollator, self).__init__() + + self.mask_generators = [] + for m in cfgs_mask: + mask_generator = _MaskGenerator( + crop_size=crop_size, + num_frames=num_frames, + spatial_patch_size=patch_size, + temporal_patch_size=tubelet_size, + ratio=m.get('ratio'), + ) + self.mask_generators.append(mask_generator) + + def step(self): + for mask_generator in self.mask_generators: + mask_generator.step() + + def __call__(self, batch): + + batch_size = len(batch) + collated_batch = torch.utils.data.default_collate(batch) + + collated_masks_pred, collated_masks_enc = [], [] + for i, mask_generator in enumerate(self.mask_generators): + masks_enc, masks_pred = mask_generator(batch_size) + collated_masks_enc.append(masks_enc) + collated_masks_pred.append(masks_pred) + + return collated_batch, collated_masks_enc, collated_masks_pred + + +class _MaskGenerator(object): + + def __init__( + self, + crop_size=(224, 224), + num_frames=16, + spatial_patch_size=(16, 16), + temporal_patch_size=2, + ratio=0.9, + ): + super(_MaskGenerator, self).__init__() + if not isinstance(crop_size, tuple): + crop_size = (crop_size, ) * 2 + self.crop_size = crop_size + self.height, self.width = crop_size[0] // spatial_patch_size, crop_size[1] // spatial_patch_size + self.duration = num_frames // temporal_patch_size + + self.spatial_patch_size = spatial_patch_size + self.temporal_patch_size = temporal_patch_size + self.num_patches_spatial = self.height*self.width + + self.ratio = ratio + + self.num_keep_spatial = int(self.num_patches_spatial*(1.-self.ratio)) + self.num_keep = self.num_keep_spatial * self.duration + + self._itr_counter = Value('i', -1) # collator is shared across worker processes + + def step(self): + i = self._itr_counter + with i.get_lock(): + i.value += 1 + v = i.value + return v + + def __call__(self, batch_size): + def sample_mask(): + mask = np.hstack([ + np.zeros(self.num_patches_spatial - self.num_keep_spatial), + np.ones(self.num_keep_spatial), + ]) + np.random.shuffle(mask) + mask = torch.tensor(np.tile(mask, (self.duration, 1))) + mask = mask.flatten() + mask_p = torch.argwhere(mask == 0).squeeze() + mask_e = torch.nonzero(mask).squeeze() + return mask_e, mask_p + + collated_masks_pred, collated_masks_enc = [], [] + for _ in range(batch_size): + mask_e, mask_p = sample_mask() + collated_masks_enc.append(mask_e) + collated_masks_pred.append(mask_p) + + collated_masks_enc = torch.utils.data.default_collate(collated_masks_enc) + collated_masks_pred = torch.utils.data.default_collate(collated_masks_pred) + + return collated_masks_enc, collated_masks_pred diff --git a/build/lib/masks/utils.py b/build/lib/masks/utils.py new file mode 100644 index 00000000..ca04af1f --- /dev/null +++ b/build/lib/masks/utils.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import torch + + +def apply_masks(x, masks, concat=True): + """ + :param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)] + :param masks: list of tensors of shape [B, K] containing indices of K patches in [N] to keep + """ + all_x = [] + for m in masks: + mask_keep = m.unsqueeze(-1).repeat(1, 1, x.size(-1)) + all_x += [torch.gather(x, dim=1, index=mask_keep)] + if not concat: + return all_x + + return torch.cat(all_x, dim=0) diff --git a/build/lib/models/attentive_pooler.py b/build/lib/models/attentive_pooler.py new file mode 100644 index 00000000..ecd9986a --- /dev/null +++ b/build/lib/models/attentive_pooler.py @@ -0,0 +1,136 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import math + +import torch +import torch.nn as nn + +from src.models.utils.modules import ( + Block, + CrossAttention, + CrossAttentionBlock +) +from src.utils.tensors import trunc_normal_ + + +class AttentivePooler(nn.Module): + """ Attentive Pooler """ + def __init__( + self, + num_queries=1, + embed_dim=768, + num_heads=12, + mlp_ratio=4.0, + depth=1, + norm_layer=nn.LayerNorm, + init_std=0.02, + qkv_bias=True, + complete_block=True + ): + super().__init__() + self.query_tokens = nn.Parameter(torch.zeros(1, num_queries, embed_dim)) + + self.complete_block = complete_block + if complete_block: + self.cross_attention_block = CrossAttentionBlock( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer) + else: + self.cross_attention_block = CrossAttention( + dim=embed_dim, + num_heads=num_heads, + qkv_bias=qkv_bias) + + self.blocks = None + if depth > 1: + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=False, + norm_layer=norm_layer) + for i in range(depth-1)]) + + self.init_std = init_std + trunc_normal_(self.query_tokens, std=self.init_std) + self.apply(self._init_weights) + self._rescale_blocks() + + def _rescale_blocks(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + if self.complete_block: + rescale(self.cross_attention_block.xattn.proj.weight.data, 1) + rescale(self.cross_attention_block.mlp.fc2.weight.data, 1) + else: + rescale(self.cross_attention_block.proj.weight.data, 1) + if self.blocks is not None: + for layer_id, layer in enumerate(self.blocks, 1): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=self.init_std) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + trunc_normal_(m.weight, std=self.init_std) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + q = self.query_tokens.repeat(len(x), 1, 1) + q = self.cross_attention_block(q, x) + if self.blocks is not None: + for blk in self.blocks: + q = blk(q) + return q + + +class AttentiveClassifier(nn.Module): + """ Attentive Classifier """ + def __init__( + self, + embed_dim=768, + num_heads=12, + mlp_ratio=4.0, + depth=1, + norm_layer=nn.LayerNorm, + init_std=0.02, + qkv_bias=True, + num_classes=1000, + complete_block=True, + ): + super().__init__() + self.pooler = AttentivePooler( + num_queries=1, + embed_dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + depth=depth, + norm_layer=norm_layer, + init_std=init_std, + qkv_bias=qkv_bias, + complete_block=complete_block, + ) + self.linear = nn.Linear(embed_dim, num_classes, bias=True) + + def forward(self, x): + x = self.pooler(x).squeeze(1) + x = self.linear(x) + return x diff --git a/build/lib/models/predictor.py b/build/lib/models/predictor.py new file mode 100644 index 00000000..2dd9a38b --- /dev/null +++ b/build/lib/models/predictor.py @@ -0,0 +1,246 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import math +from functools import partial + +import torch +import torch.nn as nn + +from src.models.utils.modules import Block +from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed +from src.utils.tensors import ( + trunc_normal_, + repeat_interleave_batch +) +from src.masks.utils import apply_masks + + +class VisionTransformerPredictor(nn.Module): + """ Vision Transformer """ + def __init__( + self, + img_size=224, + patch_size=16, + num_frames=1, + tubelet_size=2, + embed_dim=768, + predictor_embed_dim=384, + depth=6, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + norm_layer=nn.LayerNorm, + init_std=0.02, + uniform_power=False, + use_mask_tokens=False, + num_mask_tokens=2, + zero_init_mask_tokens=True, + **kwargs + ): + super().__init__() + # Map input to predictor dimension + self.predictor_embed = nn.Linear(embed_dim, predictor_embed_dim, bias=True) + + # Mask tokens + self.mask_tokens = None + self.num_mask_tokens = 0 + if use_mask_tokens: + self.num_mask_tokens = num_mask_tokens + self.mask_tokens = nn.ParameterList([ + nn.Parameter(torch.zeros(1, 1, predictor_embed_dim)) + for i in range(num_mask_tokens) + ]) + + # Determine positional embedding + self.input_size = img_size + self.patch_size = patch_size + # -- + self.num_frames = num_frames + self.tubelet_size = tubelet_size + self.is_video = num_frames > 1 + + grid_size = self.input_size // self.patch_size + grid_depth = self.num_frames // self.tubelet_size + + if self.is_video: + self.num_patches = num_patches = ( + (num_frames // tubelet_size) + * (img_size // patch_size) + * (img_size // patch_size) + ) + else: + self.num_patches = num_patches = ( + (img_size // patch_size) + * (img_size // patch_size) + ) + # Position embedding + self.uniform_power = uniform_power + self.predictor_pos_embed = None + self.predictor_pos_embed = nn.Parameter( + torch.zeros(1, num_patches, predictor_embed_dim), + requires_grad=False) + + # Attention Blocks + self.predictor_blocks = nn.ModuleList([ + Block( + dim=predictor_embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=nn.GELU, + attn_drop=attn_drop_rate, + grid_size=grid_size, + grid_depth=grid_depth, + norm_layer=norm_layer) + for i in range(depth)]) + + # Normalize & project back to input dimension + self.predictor_norm = norm_layer(predictor_embed_dim) + self.predictor_proj = nn.Linear(predictor_embed_dim, embed_dim, bias=True) + + # ------ initialize weights + if self.predictor_pos_embed is not None: + self._init_pos_embed(self.predictor_pos_embed.data) # sincos pos-embed + self.init_std = init_std + if not zero_init_mask_tokens: + for mt in self.mask_tokens: + trunc_normal_(mt, std=init_std) + self.apply(self._init_weights) + self._rescale_blocks() + + def _init_pos_embed(self, pos_embed): + embed_dim = pos_embed.size(-1) + grid_size = self.input_size // self.patch_size + if self.is_video: + grid_depth = self.num_frames // self.tubelet_size + sincos = get_3d_sincos_pos_embed( + embed_dim, + grid_size, + grid_depth, + cls_token=False, + uniform_power=self.uniform_power + ) + else: + sincos = get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False) + pos_embed.copy_(torch.from_numpy(sincos).float().unsqueeze(0)) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=self.init_std) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def _rescale_blocks(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.predictor_blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def diffusion(self, x, noise_beta=(0.5, 1.0), steps=1000): + + # Prepare diffusion noise schedule + b1, b2 = noise_beta + beta_scheduler = (b1 + i*(b2-b1)/steps for i in range(steps)) + alpha_scheduler = [] + _alpha = 1.0 + for _beta in beta_scheduler: + _alpha *= 1.-_beta + alpha_scheduler += [_alpha] + + # Sample diffusion time step + T = torch.randint(0, steps, (len(x),)) + alpha = torch.tensor(alpha_scheduler, device=x.device)[T].unsqueeze(-1).unsqueeze(-1) + + # Normalize features and apply noise + x = torch.nn.functional.layer_norm(x, (x.size(-1),)) + x = alpha**0.5 * x + (1.-alpha)**0.5 * torch.randn(x.shape, device=x.device) + return x + + def forward(self, ctxt, tgt, masks_ctxt, masks_tgt, mask_index=1): + """ + :param ctxt: context tokens + :param tgt: target tokens + :param masks_ctxt: indices of context tokens in input + :params masks_tgt: indices of target tokens in input + """ + + assert (masks_ctxt is not None) and (masks_tgt is not None), 'Cannot run predictor without mask indices' + + if not isinstance(masks_ctxt, list): + masks_ctxt = [masks_ctxt] + + if not isinstance(masks_tgt, list): + masks_tgt = [masks_tgt] + + # Batch Size + B = len(ctxt) // len(masks_ctxt) + + # Map context tokens to pedictor dimensions + x = self.predictor_embed(ctxt) + _, N_ctxt, D = x.shape + + # Add positional embedding to ctxt tokens + if self.predictor_pos_embed is not None: + ctxt_pos_embed = self.predictor_pos_embed.repeat(B, 1, 1) + x += apply_masks(ctxt_pos_embed, masks_ctxt) + + # Map target tokens to predictor dimensions & add noise (fwd diffusion) + if self.mask_tokens is None: + pred_tokens = self.predictor_embed(tgt) + pred_tokens = self.diffusion(pred_tokens) + else: + mask_index = mask_index % self.num_mask_tokens + pred_tokens = self.mask_tokens[mask_index] + pred_tokens = pred_tokens.repeat(B, self.num_patches, 1) + pred_tokens = apply_masks(pred_tokens, masks_tgt) + + # Add positional embedding to target tokens + if self.predictor_pos_embed is not None: + pos_embs = self.predictor_pos_embed.repeat(B, 1, 1) + pos_embs = apply_masks(pos_embs, masks_tgt) + pos_embs = repeat_interleave_batch(pos_embs, B, repeat=len(masks_ctxt)) + pred_tokens += pos_embs + + # Concatenate context & target tokens + x = x.repeat(len(masks_tgt), 1, 1) + x = torch.cat([x, pred_tokens], dim=1) + + # FIXME: this implementation currently assumes masks_ctxt and masks_tgt + # are alligned 1:1 (ok with MultiMask wrapper on predictor but + # otherwise will break) + masks_ctxt = torch.cat(masks_ctxt, dim=0) + masks_tgt = torch.cat(masks_tgt, dim=0) + masks = torch.cat([masks_ctxt, masks_tgt], dim=1) + + # Fwd prop + for blk in self.predictor_blocks: + x = blk(x, mask=masks) + x = self.predictor_norm(x) + + # Return output corresponding to target tokens + x = x[:, N_ctxt:] + x = self.predictor_proj(x) + + return x + + +def vit_predictor(**kwargs): + model = VisionTransformerPredictor( + mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs) + return model diff --git a/build/lib/models/utils/modules.py b/build/lib/models/utils/modules.py new file mode 100644 index 00000000..dc470d9b --- /dev/null +++ b/build/lib/models/utils/modules.py @@ -0,0 +1,183 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class MLP(nn.Module): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0. + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + use_sdpa=True + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop_prob = proj_drop + self.proj_drop = nn.Dropout(proj_drop) + self.use_sdpa = use_sdpa + + def forward(self, x, mask=None): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # [B, num_heads, N, D] + + if self.use_sdpa: + with torch.backends.cuda.sdp_kernel(): + x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.proj_drop_prob) + attn = None + else: + attn = (q @ k.transpose(-2, -1)) * self.scale # [B, num_heads, D, D] + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = (attn @ v) + x = x.transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x, attn + + +class Block(nn.Module): + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + grid_size=None, + grid_depth=None, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = MLP( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x, return_attention=False, mask=None): + y, attn = self.attn(self.norm1(x), mask=mask) + if return_attention: + return attn + x = x + y + x = x + self.mlp(self.norm2(x)) + return x + + +class CrossAttention(nn.Module): + def __init__( + self, + dim, + num_heads=12, + qkv_bias=False, + use_sdpa=True + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.kv = nn.Linear(dim, int(dim*2), bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + self.use_sdpa = use_sdpa + + def forward(self, q, x): + B, n, C = q.shape + q = self.q(q).reshape(B, n, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + + B, N, C = x.shape + kv = self.kv(x).reshape(B, N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] # (batch_size, num_heads, seq_len, feature_dim_per_head) + + if self.use_sdpa: + with torch.backends.cuda.sdp_kernel(): + q = F.scaled_dot_product_attention(q, k, v) + else: + xattn = (q @ k.transpose(-2, -1)) * self.scale + xattn = xattn.softmax(dim=-1) # (batch_size, num_heads, query_len, seq_len) + q = (xattn @ v) + + q = q.transpose(1, 2).reshape(B, n, C) + q = self.proj(q) + + return q + + +class CrossAttentionBlock(nn.Module): + def __init__( + self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.xattn = CrossAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias) + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer) + + def forward(self, q, x): + y = self.xattn(q, self.norm1(x)) + q = q + y + q = q + self.mlp(self.norm2(q)) + return q diff --git a/build/lib/models/utils/multimask.py b/build/lib/models/utils/multimask.py new file mode 100644 index 00000000..d4800869 --- /dev/null +++ b/build/lib/models/utils/multimask.py @@ -0,0 +1,48 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import torch.nn as nn + + +class MultiMaskWrapper(nn.Module): + + def __init__(self, backbone): + super().__init__() + self.backbone = backbone + + def forward(self, x, masks=None): + if masks is None: + return self.backbone(x) + + if (masks is not None) and not isinstance(masks, list): + masks = [masks] + outs = [] + for m in masks: + outs += [self.backbone(x, masks=m)] + return outs + + +class PredictorMultiMaskWrapper(nn.Module): + + def __init__(self, backbone): + super().__init__() + self.backbone = backbone + + def forward(self, ctxt, tgt, masks_ctxt, masks_tgt): + if type(ctxt) is not list: + ctxt = [ctxt] + if type(tgt) is not list: + tgt = [tgt] + if type(masks_ctxt) is not list: + masks_ctxt = [masks_ctxt] + if type(masks_tgt) is not list: + masks_tgt = [masks_tgt] + + outs = [] + for i, (zi, hi, mc, mt) in enumerate(zip(ctxt, tgt, masks_ctxt, masks_tgt)): + outs += [self.backbone(zi, hi, mc, mt, mask_index=i)] + return outs diff --git a/build/lib/models/utils/patch_embed.py b/build/lib/models/utils/patch_embed.py new file mode 100644 index 00000000..4ff4de51 --- /dev/null +++ b/build/lib/models/utils/patch_embed.py @@ -0,0 +1,57 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import torch.nn as nn + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding + """ + def __init__( + self, + patch_size=16, + in_chans=3, + embed_dim=768 + ): + super().__init__() + self.patch_size = patch_size + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class PatchEmbed3D(nn.Module): + """ + Image to Patch Embedding + """ + + def __init__( + self, + patch_size=16, + tubelet_size=2, + in_chans=3, + embed_dim=768, + ): + super().__init__() + self.patch_size = patch_size + self.tubelet_size = tubelet_size + + self.proj = nn.Conv3d( + in_channels=in_chans, + out_channels=embed_dim, + kernel_size=(tubelet_size, patch_size, patch_size), + stride=(tubelet_size, patch_size, patch_size), + ) + + def forward(self, x, **kwargs): + B, C, T, H, W = x.shape + x = self.proj(x).flatten(2).transpose(1, 2) + return x diff --git a/build/lib/models/utils/pos_embs.py b/build/lib/models/utils/pos_embs.py new file mode 100644 index 00000000..d1d82e21 --- /dev/null +++ b/build/lib/models/utils/pos_embs.py @@ -0,0 +1,99 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import numpy as np + + +def get_3d_sincos_pos_embed( + embed_dim, + grid_size, + grid_depth, + cls_token=False, + uniform_power=False +): + """ + grid_size: int of the grid height and width + grid_depth: int of the grid depth + returns: + pos_embed: [grid_depth*grid_size*grid_size, embed_dim] (w/o cls_token) + or [1+grid_depth*grid_size*grid_size, embed_dim] (w/ cls_token) + """ + grid_d = np.arange(grid_depth, dtype=float) + grid_h = np.arange(grid_size, dtype=float) + grid_w = np.arange(grid_size, dtype=float) + grid_h, grid_d, grid_w = np.meshgrid(grid_h, grid_d, grid_w) # order of meshgrid is very important for indexing as [d,h,w] + + if not uniform_power: + h_embed_dim = embed_dim // 4 + w_embed_dim = embed_dim // 4 + d_embed_dim = embed_dim // 2 + else: + h_embed_dim = w_embed_dim = d_embed_dim = int(np.ceil(embed_dim/6)*2) + + emb_h = get_1d_sincos_pos_embed_from_grid(h_embed_dim, grid_h) # (T*H*W, D1) + emb_w = get_1d_sincos_pos_embed_from_grid(w_embed_dim, grid_w) # (T*H*W, D2) + emb_d = get_1d_sincos_pos_embed_from_grid(d_embed_dim, grid_d) # (T*H*W, D3) + pos_embed = np.concatenate([emb_d, emb_h, emb_w], axis=1) + pos_embed = pos_embed[:, :embed_dim] + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + grid_size: int of the grid height and width + returns: + pos_embed: [grid_size*grid_size, embed_dim] (w/o cls_token) + or [1+grid_size*grid_size, embed_dim] (w/ cls_token) + """ + grid_h = np.arange(grid_size, dtype=float) + grid_w = np.arange(grid_size, dtype=float) + grid_w, grid_h = np.meshgrid(grid_w, grid_h) # order of meshgrid is very important for indexing as [h, w] + + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid_h) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid_w) # (H*W, D/2) + pos_embed = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_1d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + embed_dim: output dimension for each position + grid_size: int of the grid length + returns: + pos_embed: [grid_size, embed_dim] (w/o cls_token) + or [1+grid_size, embed_dim] (w/ cls_token) + """ + grid = np.arange(grid_size, dtype=float) + pos_embed = get_1d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + returns: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=float) + omega /= embed_dim / 2. + omega = 1. / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb diff --git a/build/lib/models/vision_transformer.py b/build/lib/models/vision_transformer.py new file mode 100644 index 00000000..a8748dfd --- /dev/null +++ b/build/lib/models/vision_transformer.py @@ -0,0 +1,307 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import math +from functools import partial + +import torch +import torch.nn as nn + +from src.models.utils.patch_embed import PatchEmbed, PatchEmbed3D +from src.models.utils.modules import Block +from src.models.utils.pos_embs import get_2d_sincos_pos_embed, get_3d_sincos_pos_embed +from src.utils.tensors import trunc_normal_ +from src.masks.utils import apply_masks + + +class VisionTransformer(nn.Module): + """ Vision Transformer """ + def __init__( + self, + img_size=224, + patch_size=16, + num_frames=1, + tubelet_size=2, + in_chans=3, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + norm_layer=nn.LayerNorm, + init_std=0.02, + out_layers=None, + uniform_power=False, + **kwargs + ): + super().__init__() + self.num_features = self.embed_dim = embed_dim + self.num_heads = num_heads + self.out_layers = out_layers + + self.input_size = img_size + self.patch_size = patch_size + + self.num_frames = num_frames + self.tubelet_size = tubelet_size + self.is_video = num_frames > 1 + + grid_size = self.input_size // self.patch_size + grid_depth = self.num_frames // self.tubelet_size + + # Tokenize pixels with convolution + if self.is_video: + self.patch_embed = PatchEmbed3D( + patch_size=patch_size, + tubelet_size=tubelet_size, + in_chans=in_chans, + embed_dim=embed_dim) + self.num_patches = ( + (num_frames // tubelet_size) + * (img_size // patch_size) + * (img_size // patch_size) + ) + else: + self.patch_embed = PatchEmbed( + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim) + self.num_patches = ( + (img_size // patch_size) + * (img_size // patch_size) + ) + + # Position embedding + self.uniform_power = uniform_power + self.pos_embed = None + self.pos_embed = nn.Parameter( + torch.zeros(1, self.num_patches, embed_dim), + requires_grad=False) + + # Attention Blocks + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + act_layer=nn.GELU, + grid_size=grid_size, + grid_depth=grid_depth, + attn_drop=attn_drop_rate, + norm_layer=norm_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # ------ initialize weights + if self.pos_embed is not None: + self._init_pos_embed(self.pos_embed.data) # sincos pos-embed + self.init_std = init_std + self.apply(self._init_weights) + self._rescale_blocks() + + def _init_pos_embed(self, pos_embed): + embed_dim = pos_embed.size(-1) + grid_size = self.input_size // self.patch_size + if self.is_video: + grid_depth = self.num_frames // self.tubelet_size + sincos = get_3d_sincos_pos_embed( + embed_dim, + grid_size, + grid_depth, + cls_token=False, + uniform_power=self.uniform_power + ) + else: + sincos = get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False) + pos_embed.copy_(torch.from_numpy(sincos).float().unsqueeze(0)) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=self.init_std) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + trunc_normal_(m.weight, std=self.init_std) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Conv3d): + trunc_normal_(m.weight, std=self.init_std) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _rescale_blocks(self): + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def get_num_layers(self): + return len(self.blocks) + + def no_weight_decay(self): + return {} + + def forward(self, x, masks=None): + """ + :param x: input image/video + :param masks: indices of patch tokens to mask (remove) + """ + + if masks is not None and not isinstance(masks, list): + masks = [masks] + + # Tokenize input + pos_embed = self.pos_embed + if pos_embed is not None: + pos_embed = self.interpolate_pos_encoding(x, pos_embed) + x = self.patch_embed(x) + if pos_embed is not None: + x += pos_embed + B, N, D = x.shape + + # Mask away unwanted tokens (if masks provided) + if masks is not None: + x = apply_masks(x, masks) + masks = torch.cat(masks, dim=0) + + # Fwd prop + outs = [] + for i, blk in enumerate(self.blocks): + x = blk(x, mask=masks) + if self.out_layers is not None and i in self.out_layers: + outs.append(self.norm(x)) + + if self.out_layers is not None: + return outs + + if self.norm is not None: + x = self.norm(x) + + return x + + def interpolate_pos_encoding(self, x, pos_embed): + + _, N, dim = pos_embed.shape + + if self.is_video: + + # If pos_embed already corret size, just return + _, _, T, H, W = x.shape + if H == self.input_size and W == self.input_size and T == self.num_frames: + return pos_embed + + # Convert depth, height, width of input to be measured in patches + # instead of pixels/frames + T = T // self.tubelet_size + H = H // self.patch_size + W = W // self.patch_size + + # Compute the initialized shape of the positional embedding measured + # in patches + N_t = self.num_frames // self.tubelet_size + N_h = N_w = self.input_size // self.patch_size + assert N_h * N_w * N_t == N, 'Positional embedding initialized incorrectly' + + # Compute scale factor for spatio-temporal interpolation + scale_factor = (T/N_t, H/N_h, W/N_w) + + pos_embed = nn.functional.interpolate( + pos_embed.reshape(1, N_t, N_h, N_w, dim).permute(0, 4, 1, 2, 3), + scale_factor=scale_factor, + mode='trilinear') + pos_embed = pos_embed.permute(0, 2, 3, 4, 1).view(1, -1, dim) + return pos_embed + + else: + + # If pos_embed already corret size, just return + _, _, H, W = x.shape + if H == self.input_size and W == self.input_size: + return pos_embed + + # Compute scale factor for spatial interpolation + npatch = (H // self.patch_size) * (W // self.patch_size) + scale_factor = math.sqrt(npatch / N) + + pos_embed = nn.functional.interpolate( + pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), + scale_factor=scale_factor, + mode='bicubic') + pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return pos_embed + + +def vit_tiny(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_small(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_base(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_large(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_huge(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_giant(patch_size=16, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=48/11, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) + return model + + +def vit_gigantic(patch_size=14, **kwargs): + model = VisionTransformer( + patch_size=patch_size, embed_dim=1664, depth=48, num_heads=16, mpl_ratio=64/13, + qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs + ) + return model + + +VIT_EMBED_DIMS = { + 'vit_tiny': 192, + 'vit_small': 384, + 'vit_base': 768, + 'vit_large': 1024, + 'vit_huge': 1280, + 'vit_giant': 1408, + 'vit_gigantic': 1664, +} diff --git a/build/lib/utils/distributed.py b/build/lib/utils/distributed.py new file mode 100644 index 00000000..cfba444d --- /dev/null +++ b/build/lib/utils/distributed.py @@ -0,0 +1,113 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import os + +import torch +import torch.distributed as dist + +from logging import getLogger + +logger = getLogger() + + +def init_distributed(port=37123, rank_and_world_size=(None, None)): + + if dist.is_available() and dist.is_initialized(): + return dist.get_world_size(), dist.get_rank() + + rank, world_size = rank_and_world_size + os.environ['MASTER_ADDR'] = 'localhost' + + if (rank is None) or (world_size is None): + try: + world_size = int(os.environ['SLURM_NTASKS']) + rank = int(os.environ['SLURM_PROCID']) + os.environ['MASTER_ADDR'] = os.environ['HOSTNAME'] + except Exception: + logger.info('SLURM vars not set (distributed training not available)') + world_size, rank = 1, 0 + return world_size, rank + + try: + os.environ['MASTER_PORT'] = str(port) + torch.distributed.init_process_group( + backend='nccl', + world_size=world_size, + rank=rank + ) + except Exception as e: + world_size, rank = 1, 0 + logger.info(f'Rank: {rank}. Distributed training not available {e}') + + return world_size, rank + + +class AllGather(torch.autograd.Function): + + @staticmethod + def forward(ctx, x): + if ( + dist.is_available() + and dist.is_initialized() + and (dist.get_world_size() > 1) + ): + x = x.contiguous() + outputs = [torch.zeros_like(x) for _ in range(dist.get_world_size())] + dist.all_gather(outputs, x) + return torch.cat(outputs, 0) + return x + + @staticmethod + def backward(ctx, grads): + if ( + dist.is_available() + and dist.is_initialized() + and (dist.get_world_size() > 1) + ): + s = (grads.shape[0] // dist.get_world_size()) * dist.get_rank() + e = (grads.shape[0] // dist.get_world_size()) * (dist.get_rank() + 1) + grads = grads.contiguous() + dist.all_reduce(grads) + return grads[s:e] + return grads + + +class AllReduceSum(torch.autograd.Function): + + @staticmethod + def forward(ctx, x): + if ( + dist.is_available() + and dist.is_initialized() + and (dist.get_world_size() > 1) + ): + x = x.contiguous() + dist.all_reduce(x) + return x + + @staticmethod + def backward(ctx, grads): + return grads + + +class AllReduce(torch.autograd.Function): + + @staticmethod + def forward(ctx, x): + if ( + dist.is_available() + and dist.is_initialized() + and (dist.get_world_size() > 1) + ): + x = x.contiguous() / dist.get_world_size() + dist.all_reduce(x) + return x + + @staticmethod + def backward(ctx, grads): + return grads diff --git a/build/lib/utils/logging.py b/build/lib/utils/logging.py new file mode 100644 index 00000000..fcdd3faf --- /dev/null +++ b/build/lib/utils/logging.py @@ -0,0 +1,118 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import logging +import sys + +import torch + + +def gpu_timer(closure, log_timings=True): + """ Helper to time gpu-time to execute closure() """ + log_timings = log_timings and torch.cuda.is_available() + + elapsed_time = -1. + if log_timings: + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + start.record() + + result = closure() + + if log_timings: + end.record() + torch.cuda.synchronize() + elapsed_time = start.elapsed_time(end) + + return result, elapsed_time + + +LOG_FORMAT = "[%(levelname)-8s][%(asctime)s][%(funcName)-25s] %(message)s" +DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + + +def get_logger(name=None, force=False): + logging.basicConfig(stream=sys.stdout, level=logging.INFO, + format=LOG_FORMAT, datefmt=DATE_FORMAT, force=force) + return logging.getLogger(name=name) + + +class CSVLogger(object): + + def __init__(self, fname, *argv): + self.fname = fname + self.types = [] + # -- print headers + with open(self.fname, '+a') as f: + for i, v in enumerate(argv, 1): + self.types.append(v[0]) + if i < len(argv): + print(v[1], end=',', file=f) + else: + print(v[1], end='\n', file=f) + + def log(self, *argv): + with open(self.fname, '+a') as f: + for i, tv in enumerate(zip(self.types, argv), 1): + end = ',' if i < len(argv) else '\n' + print(tv[0] % tv[1], end=end, file=f) + + +class AverageMeter(object): + """computes and stores the average and current value""" + + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.max = float('-inf') + self.min = float('inf') + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + try: + self.max = max(val, self.max) + self.min = min(val, self.min) + except Exception: + pass + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def grad_logger(named_params): + stats = AverageMeter() + stats.first_layer = None + stats.last_layer = None + for n, p in named_params: + if (p.grad is not None) and not (n.endswith('.bias') or len(p.shape) == 1): + grad_norm = float(torch.norm(p.grad.data)) + stats.update(grad_norm) + if 'qkv' in n: + stats.last_layer = grad_norm + if stats.first_layer is None: + stats.first_layer = grad_norm + if stats.first_layer is None or stats.last_layer is None: + stats.first_layer = stats.last_layer = 0. + return stats + + +def adamw_logger(optimizer): + """ logging magnitude of first and second momentum buffers in adamw """ + # TODO: assert that optimizer is instance of torch.optim.AdamW + state = optimizer.state_dict().get('state') + exp_avg_stats = AverageMeter() + exp_avg_sq_stats = AverageMeter() + for key in state: + s = state.get(key) + exp_avg_stats.update(float(s.get('exp_avg').abs().mean())) + exp_avg_sq_stats.update(float(s.get('exp_avg_sq').abs().mean())) + return {'exp_avg': exp_avg_stats, 'exp_avg_sq': exp_avg_sq_stats} diff --git a/build/lib/utils/monitoring.py b/build/lib/utils/monitoring.py new file mode 100644 index 00000000..95a7845a --- /dev/null +++ b/build/lib/utils/monitoring.py @@ -0,0 +1,175 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import dataclasses +import threading +from typing import Dict, Tuple + +import psutil + + +@dataclasses.dataclass +class ResourceStatsSample: + timestamp: float + cpu_percent: float + read_count: int + write_count: int + read_bytes: int + write_bytes: int + read_chars: int + write_chars: int + cpu_times_user: float + cpu_times_system: float + cpu_times_children_user: float + cpu_times_children_system: float + cpu_times_iowait: float + cpu_affinity: str + cpu_num: int + num_threads: int + num_voluntary_ctx_switches: int + num_involuntary_ctx_switches: int + + def as_tuple(self) -> Dict: + """Return values mirroring fields.""" + return dataclasses.astuple(self) + + def fields(self) -> Tuple[dataclasses.Field, ...]: + """Return fields in this dataclass.""" + return dataclasses.fields(self.__class__) + + +class ResourceMonitoringThread(threading.Thread): + def __init__(self, pid=None, refresh_interval=None, stats_callback_fn=None): + """Starts a thread to monitor pid every refresh_interval seconds. + + Passes a ResourceStatsSample object to the callback.""" + super(ResourceMonitoringThread, self).__init__() + if refresh_interval is None: + refresh_interval = 5 + self.is_running_event = threading.Event() + self.p = psutil.Process(pid) + self.refresh_interval = refresh_interval + if stats_callback_fn is None: + # Default callback + def stats_callback_fn(resource_sample: ResourceStatsSample): + print( + f"PID {self.p.pid} Stats: {resource_sample.resource_stats}") + elif not callable(stats_callback_fn): + raise ValueError("Callback needs to be callable, got {}".format( + type(stats_callback_fn))) + self.stats_callback_fn = stats_callback_fn + + def stop(self) -> None: + self.is_running_event.set() + + def run(self) -> None: + while not self.is_running_event.is_set(): + self.sample_counters() + self.is_running_event.wait(self.refresh_interval) + + def log_sample(self, resource_sample: ResourceStatsSample) -> None: + self.stats_callback_fn(resource_sample) + + def sample_counters(self) -> None: + if not self.p.is_running(): + self.stop() + return + + with self.p.oneshot(): + cpu_percent = self.p.cpu_percent() + cpu_times = self.p.cpu_times() + io_counters = self.p.io_counters() + cpu_affinity = self.p.cpu_affinity() + cpu_num = self.p.cpu_num() + num_threads = self.p.num_threads() + num_ctx_switches = self.p.num_ctx_switches() + timestamp = time.time() + + read_count = io_counters.read_count + write_count = io_counters.write_count + read_bytes = io_counters.read_bytes + write_bytes = io_counters.write_bytes + read_chars = io_counters.read_chars + write_chars = io_counters.write_chars + + def compress_cpu_affinity(cpu_affinity): + """Change list representation to interval/range representation.""" + if not cpu_affinity: + return "" + cpu_affinity_compressed = [] + min_x = None + max_x = None + last_x = None + + # Find contiguous ranges + for x in cpu_affinity: + if last_x is None: + # Start interval + min_x = x + max_x = x + last_x = x + continue + elif x == (last_x + 1): + # Move interval up + max_x = x + elif max_x is not None: + # Interval ended, start again + if min_x == max_x: + cpu_affinity_compressed.append("{}".format(min_x)) + else: + cpu_affinity_compressed.append( + "{}-{}".format(min_x, max_x)) + min_x = x + max_x = x + last_x = x + # Terminate last range + if max_x is not None: + if min_x == max_x: + cpu_affinity_compressed.append("{}".format(min_x)) + else: + cpu_affinity_compressed.append( + "{}-{}".format(min_x, max_x)) + + # Concat + cpu_affinity_compressed = ",".join(cpu_affinity_compressed) + + return cpu_affinity_compressed + + cpu_affinity = compress_cpu_affinity(cpu_affinity) + + resource_sample = ResourceStatsSample( + timestamp=timestamp, + cpu_percent=cpu_percent, + read_count=read_count, + write_count=write_count, + read_bytes=read_bytes, + write_bytes=write_bytes, + read_chars=read_chars, + write_chars=write_chars, + cpu_times_user=cpu_times.user, + cpu_times_system=cpu_times.system, + cpu_times_children_user=cpu_times.children_user, + cpu_times_children_system=cpu_times.children_system, + cpu_times_iowait=cpu_times.iowait, + cpu_affinity=cpu_affinity, + cpu_num=cpu_num, + num_threads=num_threads, + num_voluntary_ctx_switches=num_ctx_switches.voluntary, + num_involuntary_ctx_switches=num_ctx_switches.involuntary, + ) + self.log_sample(resource_sample) + + +if __name__ == "__main__": + import multiprocessing + import time + pid = multiprocessing.current_process().pid + monitor_thread = ResourceMonitoringThread(pid, 1) + monitor_thread.start() + time.sleep(5) + print("Shutdown") + monitor_thread.stop() diff --git a/build/lib/utils/schedulers.py b/build/lib/utils/schedulers.py new file mode 100644 index 00000000..df02e2b0 --- /dev/null +++ b/build/lib/utils/schedulers.py @@ -0,0 +1,76 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import math + + +class WarmupCosineSchedule(object): + + def __init__( + self, + optimizer, + warmup_steps, + start_lr, + ref_lr, + T_max, + last_epoch=-1, + final_lr=0. + ): + self.optimizer = optimizer + self.start_lr = start_lr + self.ref_lr = ref_lr + self.final_lr = final_lr + self.warmup_steps = warmup_steps + self.T_max = T_max - warmup_steps + self._step = 0. + + def step(self): + self._step += 1 + if self._step < self.warmup_steps: + progress = float(self._step) / float(max(1, self.warmup_steps)) + new_lr = self.start_lr + progress * (self.ref_lr - self.start_lr) + else: + # -- progress after warmup + progress = float(self._step - self.warmup_steps) / float(max(1, self.T_max)) + new_lr = max(self.final_lr, + self.final_lr + (self.ref_lr - self.final_lr) * 0.5 * (1. + math.cos(math.pi * progress))) + + for group in self.optimizer.param_groups: + group['lr'] = new_lr + + return new_lr + + +class CosineWDSchedule(object): + + def __init__( + self, + optimizer, + ref_wd, + T_max, + final_wd=0. + ): + self.optimizer = optimizer + self.ref_wd = ref_wd + self.final_wd = final_wd + self.T_max = T_max + self._step = 0. + + def step(self): + self._step += 1 + progress = self._step / self.T_max + new_wd = self.final_wd + (self.ref_wd - self.final_wd) * 0.5 * (1. + math.cos(math.pi * progress)) + + if self.final_wd <= self.ref_wd: + new_wd = max(self.final_wd, new_wd) + else: + new_wd = min(self.final_wd, new_wd) + + for group in self.optimizer.param_groups: + if ('WD_exclude' not in group) or not group['WD_exclude']: + group['weight_decay'] = new_wd + return new_wd diff --git a/build/lib/utils/tensors.py b/build/lib/utils/tensors.py new file mode 100644 index 00000000..6ae28509 --- /dev/null +++ b/build/lib/utils/tensors.py @@ -0,0 +1,71 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# + +import math + +import torch + +from logging import getLogger + +logger = getLogger() + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b): + # Cut & paste from PyTorch official master until it's in a few official releases - RW + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): + # type: (Tensor, float, float, float, float) -> Tensor + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def apply_masks(x, masks): + """ + :param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)] + :param masks: list of tensors containing indices of patches [0,N) to keep + """ + all_x = [] + for m in masks: + mask_keep = m.unsqueeze(-1).repeat(1, 1, x.size(-1)) + all_x += [torch.gather(x, dim=1, index=mask_keep)] + return torch.cat(all_x, dim=0) + + +def repeat_interleave_batch(x, B, repeat): + N = len(x) // B + x = torch.cat([ + torch.cat([x[i*B:(i+1)*B] for _ in range(repeat)], dim=0) + for i in range(N) + ], dim=0) + return x diff --git a/dist/jepa-0.0.1-py3.9.egg b/dist/jepa-0.0.1-py3.9.egg new file mode 100644 index 0000000000000000000000000000000000000000..f0c3d8b02c8f414978df190496eca8b7309c7e09 GIT binary patch literal 105636 zcmZ^qV~{Re)27?DZDY6X-fi2qZQHhO+qP}nws!aI_nV2CbLN|=h>EJ`$BI~)Ph6R~ z?#z;t1O`C?004jhaP>A-15(YAo&Ni)4iEqU@$XYuRFqa+MnslQPV#@gSkG-s4Dus@ z?tOYkc>4!w)WS3oUSnVHg0Q-z2oS?VGtd#PEx zd=a@GFIY4MQRwZlP$lJ%Sy7nE_Bn@tD|UYUn!AntvwKHOe%|!9(70Xo^GrRc&4|9h)7VZ2LuK%quU?9G>u_1zikB;j|SFF)L6B-P6Je>f%#3uJI+Y7A@p~e2% zod*m6@E?!-Z`&1Rl@)#qEB%X9SkgPA6(Q{>8C7esglQ>7i`(GHjsA&D)Khjs^`+xG|N3wxis>=TF}63SQE_kF*6* zAj8O|E7~Lg$Si5dfBcBg?+!tbcDhj!j@DgGoxD1)1UPQzyn51x$SnbDQgb;B{_EUT zn{eGrTIRE}B}8edZ{&uqjvJUV)y!goPe(W9a{AE)=(;txQ@MV2z&0%ph^EM1+SveV zdVY2d$4k_SJ``BJBoC0qZu$>{HKqmLGJ-1IMUh^487FU;>AI*DAO>m$}{#K7b-s%sevorl%pAI3=R-W0aKm1&4B+cAP;9wmf4AbMS zP88dfK;Cw0BX;h~z`_az@v8-7f#;UQ+p6GZr!H*EiV8WL_wfJrd?MH?<6ox({B=G6 z?msndoE2Sg~{jNy8!`tkH`MI3!J|< z+CO*WWNW8uW$a>X^&cZ7#c9N8WN0Z*CZuU*z!Kgm%Q=R%ib>P?k^V2^{^^>30qJ3G zN9(9>V*D?6DE~#+$Mk-b{I549pa1|c{%*iX-$~!m*vXOZKQFr0`ZoHe#ty&i+?$o9 zEwLF8x^HU{p7gMz>=mG__2JDgQus5_5)1_>IvXwZtDD3e7M@mJi;`D1VoS{PdDjZr zh@(xOcHU6im97&{o_7+A>xp#x7$2nRH7U2#SlSbYL+I6!RFt*sO!}=FRF4E94%jdY z`u%+Tko29luG;pmhA0t(nMcSMQZhCq;wzju!AW8!K zkgAhNNmu36igUv%5O|L3u=vYzTfdH zzPV2j3f^-A7*i7{RtH|HD?#OFL89V=PmgIkkRbEa2Hx^XMA58(yNtn5=?jvJ%~s{{ zL#vXO?2lBRTe^zJe)3Pvn~Kt;P#2|3b#Vu~RoM^N!;rpr;T29;Wr3AsdqV7@SWq1v zJsb&W`lCq3!P3+>rwAvvBlzusy@TQ|5&ykEw)|NIT|Ty8ysg(C(@ruwKET&36jMFs zijT@CF)J)-GCQk-wh5);$FbX^z6WYhl$td^OqSAn-iDSU9}LR{@SL-E1`l!B%vN^~ zLm2^WrMefIcxv?NV7Hxk;9*;V_?#hSF=-@3Yyx@(~nm4_)P&G`E z1xwM#1-Ye_Y$#J^i^HzX+5|~9UtXH&&>bvuI|=TZlDS~}t=0oZ3gr9CFCFHG;ZAk% zgbzN7FM{!VJ7||c1ooxO_;r-NGDi6Y`v1`VAGn-2MPU9Ds_-EI05Javm$~&{taSeu zYk%nqR+hBgq=)JGR7JS%hm32drMLPGfZM@}3uFYJ*9l!AO|D@)9(QyT{O47^!En+% z^nFh*>HYpWo+A`{@w>^v_pXj2^$xMyk51xkV{N2buW}M7sD^*3SxLdoN!SH$RYHK- zL2o|admv)T>He7jmwj;^DW4e?2sfEVo~tTv5mn<3oVy~AwFtO8FrmtIGjqE16E=<- zr%5mRUQRr+nC*o;Eu$$B-WDHJKn2K z%M^1?Q6Dx-l9uN*vJ5izw{q=>;+QArIKaSV*uaC^!kPOv<#6^_FnX$OjgD@2_ZT^{ zZLmvz^i|?K`=aD@&xXyMyxeGIV`HPd%mAMArHb18(ANh{`S#!+CjZyHAAD?i%gtOQ8Tz?iV#6*A?n^!}boF-IfzXPg zmy{oqA?uhEzpcbt`>m4J)B5;AJVY|kL*9xm7D@0IuQR!A@BXrfAsN{;TuU!c^&I_|#R>s^dh|f)htwRtpODkUM5f zki^l@8^?W4<@e%&%&4`1FF*5~{X1-MmdDO4(#On2y^Z;_DwBwjFW|pHyKM%JF!-Cn zut@(e&|J)ojBWn`TC;|=?IA1D_l>RqNuTfpr;HbD1eakme{JRqT58Xp%k9f=1c)Y~ zbwML>@&%VI;h)dxJLw`JuJdMcCs$Gj6ZS0klVGRW@m6Wivx#MmIEu(|cqzFDM`7W4 zCv09){0l1)vutDy6k}zD{x>TJnVxbgqIC9i4AbqHV|p0m+!a#tu5}koV)aBOpj_qj z`i#abtII5Bp{jIDB4J>|qAGiYwp%_g&)36IE+9z;9*K?;L=rl#l<*R+xDvm4AJ}<@ z5Ng>8=}o5q=B=(~rK(B?bKcjc%1Mu5^27p5)*=fP>NgS(m*F9vsJa=biq^~*ehPBu zP01=p48NZZt;lip6{q(2hg(L|X+F$gZ5iY>5&V@U<><^4#fC z4k~f5-UjDz$&1jU$UyKuMP_8Y$-9zN4bBsjx@jN3I6L&Yt<_Ual-J38?L!oQsby|e zAl4-p?Tu@}I_Z^*M3?jUmnX_@t3a11FJCQNH5zIQZys@J*f(0KL#idz_Jc@R;-%GO zco>^ZY6v}MPEPiYLa}&IVJL?g2%V@5ghgjoOcx}qdvo;gMXk`FBJd1VNJp+nL zwspJmTzIlza;o}xjij}kDPqSqJEMaCyGK(EG$ zAc{rm*o)Dz9+QwaNW=dE7hoFZ2#L0LAMLZnJQl}SDQkkDTdBZJehU^PHzztZOPmPISUaD$gdZ z(f?TItt1}=p-XLmoa}i{BjkLD*&3qT^VEE#6N_#OYTcwb7^kQu)Hq^M=uM-yo&oQXF5Yln}dA4x{{hl9@j+kgNAX$h7ilw zh8<>=p6oy|-|$RqthhtIixh9K1$=`+zW=(^yXl54-g2iHemjs&5{Z6N9LC9A1l6u^ z?(L@%T?7nxikWFcS ziiRo0h7_oF>tBlu{F4v*|6^CcVu4DfdB zFF@7QDv~9BjO0;Vh1Qc7+zus%y8(6cb+DH&c3UCNT!Tm_8?wSmnkKS3o+x@LU~6-% z6=x>tiVs}?U(MQ2IDAv+s*j?|#D|F{q7lffK5gzKspM8Kq8lp?p7xJ-ijn?Om7)*o z)b9OHVTo49qz9CX$^j5vK z%8(1+#@M%I#itp>d?`06+9sZgKo1#u1S1Ri9(meBhUY3-a)V&}Bre!|H^@k{o3l*J zhW{Rkz&;4q!TMDkUanM3j?0ELPX!csI%dAX10yIqfKy2BEQ}q=LRk?t*c{AS--a-N+_!UWZ1DD=sfrE zygj8Y=Y-MZP^}fRcgDmFqlxV3(S;kk;NH+>kJP6AVNBu5plSoJ7Vhq zSAao!+NPG9F*SbI^TOnAC`5l!dp*S;2-JMZ=>)6^EA^z0zP(Vm>KLd6#H-R&G_El% z&o%|x^v`RQ3w{d`kyw0DTt1Go&loAp&#znt&Dn2Iu_Nzspg;kZ#J*lE ztmJHvSXtP`N!!*y+6~S%=!53eoA%+wlH3DSo%pzi(8=4+zd9-ebew$?aU|-}9T2SL z(M%2)kS_wkU*@IA12j;^n89kXu$*f$hJNw%zCT_A`HQ*28V#`cDe1U4_88aBX)w!y z*8}=i4;b?7;99Q8YDV)VTSrz9g}UGcb)dWphs*eo&OZ?3O?p(L=3Bf~-f%j4R%rMb z&y-uK4`Fes=4ed=1|D#XZ&*7W8cDxo)vLsDy~7LL95{1!5z(S*HQ`13(l=>`@Lc0T zkIefd1F2QH;0jU{cPc{x&$nTiky>rnRccE>uC^iOTACqzc?U(WzVck_se*B$katsI zT_{u_%8Gcx<6<7`Lsp!^QJrd1UnkQyLI9_r23?@jo-ZRpfgMNj01DA>^^C3&YR$>0 zP>_0$6Z~N?fM1&U33LFIM%tL6#I$oNN*3a6Vw6gOYF5{-HO{;QJj)uxPoo%@lqycU zV>a|9H&t0cV+-l*!9)A~YBPGYFMrF#9tc-aK*xZWOYLpFgKM9&u@OqoaibT}i@Y-O zOKDd%j^}LssvjQE9L=eN{#LSnrI!s(+60e1bZ@49lyU2jtcu+yo^fZ1|Dw&2=~C)X zHMj=5P*%L5uuitzQU^k8y79DThW}tyT6r>2qXA>YM?8WSRRcSuct^%4)5LIouupk* z|94Lh?j$$4swz1eqpdA~X=;x$n5Yfjm;i4?SbN0)4OQ$4f;omB@5~Nf>p?pZURKqy zGc&WxMOTH-K}a{18zR2?MHdo6i>aG9h*7qB7{xZXFPQq{oX)MFDAc~ z`!*ZM5kzu|E(-Lu3K&@VzU-1A5E+59L;8NLnA*!+`C^~DaaR_2_r8h_DA%zcz|+^T zO#f}OSzyB-&%To&PF4{t1v^H692I3db_25X^>%wIXc2JU4-J0QP-%58O0m=k3@^4H z>#>VqDm9MCulh772rxE|$sbv>i+5kXBqd?F)E_o|Z79toS=6DpzA^fja9x!b0YVY* z9wTu4GZ#esBO4|?n~SGXx?OX^JqXgHXgnP`EQ8X;MsV@%@`+p;Vo?AO{_kE6nMY1` zfsojk6EKkI{j6RK4hu?d`k$=zbua6M1%AOq2laC4yaH74APPAI z3JdsS<_IGW%Uh$SocpmCR?3H*iRK^Rt-TSx1>Zh$kW!pByCZ&L&> zt#_1pZ!XB4BA?D39sdW%+omn}Nhtj@BmlG;MF8W}0KyT?!TuwGWgP^VL1MESwiOYb zs~)R>Mg1?mO-s(M@q^R+)xii-8m!woJjq_TMzqKrkil=z9ZneSEJsI@$E-?G_1DLQ zlP^RzT^%R=S0a$NCnBJ!`HWt#_q;aPiTNAUlU6;O{x6L69{@ep9k&G1nVwWG~B7R`_05gXw)W>&~6};4X_c>y|Ga(S47wcigWVtz0xd3~C;YY}Y#&j`93Q zpdvcG(m!#Qt2e6ndN{&og_Vw~j3x?$qsabMNoTMFZ z5uG<;Py*3PHHzq{l|*{5$cQ%Rv4qwy;ymoRL+W@B&me6bF_rs$;i4V;$zVmezPuy z@D4%^0?cp;nu4TWsyo)`3#_!#teo#(=XeIL45~V%80N#u zzV$)g7r%_>?eltPyb0mz4g?l1Z(#9}>-2V;{B@V-$la$+Did8yoO-N=vn;nALu2=i z*?{$lp$Es)Jnainc>GqL7`D(CySJvQc;9#Ji^WI06{SaFu!YVS&bo{!*Y;t3{xB8~ zC!QQ0-{)XsfX}l=UTI|W6QH(_cctaihjLx%4!-ULEBCBv9%i%pXM8yJWqDcF7wF%7 z3mo;Nseu3hD1!q4F#P}c{lCxO#hJ|X z*Z0rq5o&6Gz}TU+@MEO!U62<9p;pKk|g(;|>rNie4Yr!uyy;6=6UAS&A?VTuMp`fs$y$&{oh;TCjWp8X4uEdONY zSoOk{nn78@9CCXqJ)#Gi4hH(ir;?p1|K%N;zG+uyig;zNvu>Ngm@2;@vc^|DQ$a|2 zrb8=+Zl9x76`duxoT_a5Ukw|QEJ7BdMuS~0kq#5ZLn+I=#XwhbPzZKVIdG&djW@qm zetVQTINLR=CJpAZ0}}{3p?G%zp(tb&*~`m)Ot^J>YDFYH$D~s%$xt;sT-~gvb~?QZ z#Kwy7aI4rUKViW?p3gjK)%c~lB8p_FS86-rj-+sGYKYmKl4)rY^?U}74dXPp4Nz7C zOOSI=D}HSkKb11dWTIMk>7<9I+X1j7Yl`X7vdZV#YA3nULl89YcZNV7%2wlQ6<%zB z*vD1(Qx(wayD(jm6ofY!3m#CRr7jvY`dWjld>L7>#Hg ztI>2sJvTWHlHipVMgmp=P%f2^1QE|EsY`}qTA(@@X9k@E8LgL)c_Ni-O`j@7u_zOx zN5#y(gOtm=$zv%3O%?8j+@s2S39)rHob-TRQxXjKn9==$E)hA5N%t(lON zVy4MLN&97j3 znp;V^_%564Wn>+eV5r9Hbmou!4bN%LWA?3#qs@%LTcYpwqe`yPmF-oZE^IIQU8GN> zAbmgZiIMD$-SZL-tSs%=vy9itxN9!V2xLv)ugXoDz!tP>oKq34L)+&YrsuTQ}+bapqMl7zVxP0Q}*FU5t?& zx5-DYOgA0#7chVFS1E|UvdkVujn0Ol@7G;B52I!!uVFdyoATW!n))R$3WKB?W2%|> zulYnT1Crn&DU0xA6wqEpyj8=VLP+aW$0#JLvEyR))TK@j1sMq<@+YnjH%hJWN9{t* zvy5#6h1~1(CQY+y5=ltelf>YnjJ^QP4*Hw4Lu1n3yrb2`W25ryKOTCU6brk;HSD5cyEVmXd5zR*xu-@pHX_4! zee!CSz&u}K%PZYiqdAG}91=ISS(ZI!u zRwy`?%kV%_cANQ%VGWHk)zKzf8&1T&N5#T~pTKNpBK3x_Uvt;)-926DDPk|tJzEb( zM@xD|5H*Lv_f6JMPr#knA@c}$qHibzkrsPE;c{nV~e-I{4>erhR ztZjdS9_)2nSQo$OUx3R*6|xiBZu@1dN3zLnuGE_PjGJ8!!)WB^@QV#%pd7R~WQCI> zK&dF6j)_nwm1yJzWNy^vm=Pw#8l+%NOeT7gDw)qPno-Hf5}lvHinaD7OX)di0RURg zWUgYi6Gcr@-h;*y(;nZ~qx12@6tS7@V0&1$d^|p}O}86OW!$r8%F8!%6lxA-bXLLG z7K3#hCnvcr79_AEbV>=dSKfNxe@f-++rl$)x_$MrR+?_12kGIX*m#*DQpj>iP}sxL ziz!WXMD`g5#PvvA9Rv%{Kt~8~P-jrNwbT08tF?g+)cYE_5o)|L0r^gaKxYHe(3QER z^9eoBkL<&6cuc5c>ox6W#%u@2DNRcJ>r85I%N1$l^r@y2gg4H%rpV6KqA0bz!_okP z=u-h}(k21brl6Q&nd3mmJdaWKcnH(=_OCd@Te2Kb)wBw85tDQ>slf!uHYP{^vYwfd-*-i)<6*T((;kj4VBLv#tLu=Ld3>U6a&s%;G)ze~cb8qg?b_h8F?rlbOqvbv1Qkva&4fZPK6nCalwlp%l zVsEVpy4K(K6szT|Q#%-MRW>snMh3AyY0q2VM3{c$8{8}<@p>v&XV-Gn=6f<+)#gHf z(wrvX>}AcKXV1x3dncrD!LP>)F+KWBKoOIyr7+Yp$C%^qIbPrgTmFL>rF^bSU9%l55O2YFDrm!9g9pQfr#TV!nxZ(H3-4$BFRN=?gpwh*bmL zJ45L2@W3s*13h2L>H38#L?7IpZm{59+Mz~V93g&gY@8>3=9-z;@R$|5H$O$Gs-2qs zQR!u4|KYOW7DjYG?=ugO24?nX6u^up`c)oRKe%p}4P|x2NKXDE^GINfTbnN~!5@Rx z!n^-~^W4+cn|dLT-%HpHYYh1?InVU-*#o!E#MgZKu#|)9P>j3gkZOuHB{ixnW-z#0 z)O}7Ot>ZyZbOeghyW6^<`u%xhVr5uvCsP@Ohb&Qer=3ISZ(2S(2H~oaN3J9RT?74O zuMG->saD35F_B=t10UR)jfIK8ZY*3YG!ms?2yBJHkyn+*De^(vr>N7P=;f|kEBRR} zpE19OZbY9cHCCMkH=!hOcj*`6FtacL8i}0T;3kb-{l!Jh?FR~C>XffwRFpx?$hcxq zG>sd5z~@0VbA1M+b{GFr-@)K)cW4!eSkB;%^*ap!tN%!uMTL($MNE2iZQ-~DD_+(C z-(SfqD-)MVBL*50LfDE*M+F3Se%D~XV&>prTGvtJXI(AO8$SGo8K=ZDS|Glon1Mo} zm4-R_%d$`u+1u16H5!^hKF2|U-|Zw8F$W{gErVp|n5{A^T3Y7PKsH?5(;Ye?n3dJn zEJWpnJKC85)OCz=8vOoZ#xQ#IkV;=wVawnmx~ZX|C3X#P`#Bplzex6^&c-@9x*C#$ zI(jmX(Tv!xV{#YQ|6?zwqxkpDZsI{2b6MdxHIWRLDF2@-ZP@DMqPBsNM>FmIKge7t zq(nCN3p4Ut!t+EE)K+Y*jS+~WF5zm3%?%d-<@PLJFbc7|%LS_DJp@$dq$5nN40$Q`vuK=do@5cAZR~WwMxgHikAFY8kt^;#Cn(nfxaNBHZ}sD z0{((Dg5pr|H;bjmJsmCMG{?;3!NX~Rh*-{r_%6SKEttYj&o zP8sPZemDP}tR`bVZ~ft@^?bSS&(BGpBS6XhI#Wa_2Wc9<>o2;Y+4)bx`MwUh=pggO_JAFg{;v z{HI&fCRMpFe|~p|y&m~S^6SsxUkMbb6%={|j-5^B_{0SaNl5LDm0zbf4NXUk_FSbin`D^IK1gMSkPq3&fpCFZfFN}vShs`)Hm@NnJIBVhgwPr7%+0a^u zgu2}IG-`>HfhAply@ws5Z31fuk_!-hwr~M55p)_Q&HGY`6Ge+(9_XT5TC#o_ttj=$(zNkIr0m^)z5m(`IJ?I<8s{QBVzp!kI1ULzOdfF z#J7YidC&)TICXls6zE?Y3t}q*(b8>ZDND;}=gXYHu zEJ~63AeLjwSh+2?ot;D>=tn&Lk2ThT$(%))IP)(w-OgdP<=;9+X4Heytey)6l*)=B zs^}qfHMKi?JWy@D_~Q8c#HW%0T+ruD#|xLuqcqx@(9A2dyaT4}NR?^;aWd7T#@AD@Aqn)1vCdq^JT z)1Piy7Yhs4#W-9>sh0SjI=HCX2ImPu;8M9)CCLwa$R!HD%*2-|xceA5kJv+f{y z6ZX7PwKoVdTQ`G~F(&T#YKKDOG&rsk`J!j4lnDb3lE>4e^ThYZSX{gtH*5NbEy-ZO zd_Lb&H$CP<_-ZfXWb38w_%@b%|1ppApSH)LrhFjd-BkT|G%{LK~ zFPPU&ANeOL3~`hQNeNywi)`VUWjr2sPX5jF*b|L&GY@u+$k7l>E!t|g$JQN|{i3D9 zgCBjOIvqr#o3-kz#@^->;O;)`-R9n^8txaTZ9p%8PasRb?E~u@ZV4aJ_6l}Xb*1=e zXI3PEY_O9L#*uyohMqZ1Eq8FmOclho^4(N#QouDUQmO z8Sef45i*)D3=*2?$>VP;fu{_3gVm*XA$2*DZ>wF3NW^GK)e!jH#9d^H@HfF z(?-sqT|a<`xSPmvafH_F*p+3~+HkYM<6GO+5d9&Q6$~r2m;s}=ugB9?)j!Lb)Lw>$!Km`a)Pvkn4aug) z!e0;dlY!RfZ$4>3?YFsuZU}fm8ct14-1)~AxA%;loolthKXMoa?hJ@8y!_>MD5!e?g#TY5_^ zI;;H!n`{q%qhN)e1U3;4U6(Tg5mEk5p>i$oOhEfo093pp zLHQ7VZ9rhKT~LL3WMtRbDoUs6xiz21)vlb_HOdQ0HQ|z?i0+s$5*8;EBFvO2d${G5 z2pt}^uWg$Mr}vbRX1|hX49)SW0znZZC74zgn5i_1l|?61)}wi23o3{wv@(K1a7Vf^ z(bn@rAgTDrHKkkYByG8%Ek?J*^UEBqLpUQ4*r{=5^}v{P5oVQ!>Hu_8v^uz^9+-2u zVnpD#z`6b7(SwO+w%R>W1PUm;PijgBq80nMy_m$dF8Q>3&(XBUwhYz!(F|JF)q|6@ z`zF#t{K9t6sUr@(iR_?&Dx!=hixI=3r8H!7_nlkn+wS2wk~$`T5_dpmMF$4)H;R_F zLaDlFBbbeu*#P>;{8R!fgI_;i0LQccVC~?`J*6PLIhR~xa-sB?N1X!(cb=r(w8B9Q z;1ZCOO@Nb;p{OkQH^~bH@ZPo*r@-i;m9ZBu?@A5wrjos3Yh@|RQJ)x*VN>uL2Ny%a zO-U+-r!F<>(_kMn8Ri`6c;hzhsAdk8zF|n2*b$o;mwu(&>2zaM4>q^WTPCKJ(@ zE=hF+4xf9V5eC8bFqni^JgQBgTs-4Zq4E-%r)4ZYn}i}Wgvx-681b|9{D?^}Y$+VU zHf&sS^`51e+h*~21)O7}AI>zhAts7)CxZKkfvmhiw|a9BgshDF#vb)Ob~f29J&`}w z9~Lk;1OV`VUuC=;ruVa_|2R4-!0EB06n8Sg=9U+NAh4k=hJri`7cpyYyth&BiSG#hzAna zywrZ`bqy)fiB4z&L_5pplsmFJ$y^l;SG^NpL8px=4rZ6#BW{J^Ri462s+mS=VU+bB z2U^=Fpz5b#Gj{^Lt$@mk2b_joHPy+(V6&0b?<-53;W(<;^P7}7(!$c|6Gzhuq;)3` z2o%JG5EpFfS@&7H>ahaQz9#^!P6`$@%h4>R%Y&o9^-`PWaE}q%rmz;sx$vx!Zk?L7 z72%o&O#@taDv;gGw24@A#}tPY}UW}_E>JxSJTv)Iy?ao-WGcHi4 zf|frFkzkjD!va3;M;{TP8#z_6AQX6PyV^0L9&PEVx(r708Vf)!9Jf4_&F+)25 zj|XH!&wka8{NgZsP^%qLMkZ_=7h$&l2yD@!OsPlmAeAH?vnE%Zg9^d#Qkp9zxBab}#_p zXB+yR1=7x&`c^ItPP}06*S2*X3lg^9M zI=AG}IEw*DSZNHy3DPG{<*uh}=Ps_D?^)Ycu9>d6xnX)M5mv0aHlD(XjTR?o3I23& zDaY)yhj2gdiJyzQNvU;PkDWM=pPn(_C9gk%x2w3_wYnm=B1$}p;4)piypwnCgQJu( zd_{d0v*mbufT6y}xiPX7UyeDqk9UdoU(R1x2@2L`hXnadLCW?l0esx}KTa#-Qp#B$ zD0$uha~^&U384e*%GeKaNiM?GkgG&9BxHKfA6Lw%lkXq4Au*v&Bq}%Inj2Q5UB9+P z$1~o!vU+Q26U?G;vSMsF%20TJnd?xFAL_};Rd(RE+8Iay6o03TeP|ZUPL@}1he7*{ zWZrFTcDBhcm4{Lh<#Tw~nH#fk}K5R?bh|BzD zL$A@V_0k2lNIcF+cMMZQy}(-MnNwWmfa}V&7c=4DcS3g2XIq{$jsrUr&J2w2Z^qiO zPnumctLB<8WVkVM4bUShxC&zxl43~{^Rsw@Xt6r2hBQ!A+U^}{#&E*{@Z_RAQra7H zkm))5P7_kfMrlL^Bojo^JIg7-j1*>~Oy&a6@|7kVqDhzy2+4Xxk=)rGSvs2%5Fz<> zi3plXu^}id74>@3btH}EsERxk($|kaKC#fI;&_WeCCYO3G{WzZ0Kul_w%W?>?wmL^ zYRx+|UG$P6!@x5{kPG@fpx^r+aiskbcw~8=T5#z0Ef{11>hlI_^(2@g0;n*7(#h}r zCcz3_UYnC_4AiR@0Rj?a?g#-lrw$pSJ7b4fFZ`WVWnxH5mmiKD;t#Zt5(y{HihyXd zNYB{SeA?49o|8n9cfj%1OiQb%d?n|cVuK&8lU-NOUIfp+SvrYK3f6s$Gq{q}r}h9P zbYsL=m=N5%y`{pCFAv_eQ-&%W-nD#fTs#@G;C+XKS6?_7?1zP|zsh?BM7wvak{d`1 zNpBk4+5MO+)H;x)()unwa>@8U zPZ#k^%Ld(!qWKGNYsppwJyxxSC*YH!KZ*8Z9$Z*81}&Z23}f4#o)a$(5UlFS_VJOA$Y zL;tQLlsYt;j^>W$n4CQeFHlbfUaFO|42mOMnrhm<7+MB7C;ypI8^?t3J9V!O=4tfU ziALm?ecvXPD$ObX#6WF;N+wbA%wh0l6w4^o$=+HM%S1sXIXE8n*}WjcylZJR6L_VwrkVwWJUYsEAu#o2g0Uav z0BNnRW`@gbf$0p7y8D(b{W*t(vemD3-wV43GJM`XB>z978Y^Kx*oQdHdNchh_MfYj z6iSt(H;VG3Q-!*7YGoOr%968LJOPh=1^ndh)kV9}C%zZVL2Kl+)ur`+8qA}9w#+GX z5g&3JvHA;=7URg?B5_WkX}*6Hp3_xm8P}A{=I9Hk{b1=75Err~EOxW*t6Y|3y2~n1 zmQn~EZNbTz_;}llU~|-EHJVS=)!(-`+t~Jtz%@Knig^iO#}e1x`9s3k;}C!HAOoF1 z`!1qzh`#S42n_maRg@D2Z2R&2Z)8{?8T@QZ0@p(oFsCM2;q$Y45bGg= zerpfTWtU-@!cL3@x!{9?W6e~mU z!w?3Iz&#gZTI*tnvxtGY!ndz%B9ZmY;dlQ1JZk$1D{WEG$TUAqXd$Ht?gm$fnq&(Z zn11kD`}F$jbdss6ByLuRJo#h@d!=&Gw@DT*%@iWwjEk`$Z7Y#jWD28N|1B^w~+ZRYcLxM7W}Od7bsIHZ}}9`A2J>(EAa;H5k>g(3k=A z>OFGYwBiDpiTAOd+2a;XNXGyQN!NMoi28rmk$y>uWH}qvT?&y>@Dm6Cy%8x?aO1>x z>=A;i^TS2FNI!=^W4PbaFv>xfe1k?3Ni3naPmghdL0sVSDb646Ub1y8_;D60Dr+u| zFd#rA|8cEGE-;nW$21i}AOgNHT-CAgxf>H|sckp}$PXCokV}u}#-gF?^6V2{0 zj_tP$?)8ZiFAe7}z7r?QwtE2R0G0_X2^?lDbqVf3k;7C>TG=v)q(r#~yr4mzfLLw# z4yeekx}HH*;E&7d#dOBlkIZpyZWtD5;RA(_vY%m?iQkjyv~~nb)7CJKjbsc@sbSnS zNOki#U7)xH*VyF`bQJDfYZ_536`E~vtaHR+tTIvVBZ}s+P5+L8(d8wCkm5vxY-#h&m3g zO%43dbp~1z$X%``w%{4w$?UXN1T}j_C1yKj5}_GD7=#4_$qZ|8d}#=NBg||L6x1Nb zo{`8TA-u4?)M{@Abp=IKT$_b=Y6&RK7=B#dBSda9Bu8zp}BTMu(qBP6~(p?l{ufR=5npcLwMMu|0 zdV3Xen+sGWd{2hCR8}zya{8Fz^j!!X>ar)(s0(mBj7AzJu#B=!M_n75;Izx$!%FBpyrUY1A*QzyGbeH5cRMJSbx~d(s zO{EKn&>^FfC<5~hR`6k{yyWKLvlo+t8r~#& zCRY+q>$p&sqabpYQ+&aUTPGCX6&wqwPb3lFCP_7b1Fi;{g2sK@)6 zt~LA?DCk0HnE*^fGF|Gyq+Lwi8zS^J{xS+Y08X^I^&ek%d$A8qsi| z@felx!q0spoXxJ+GXr1`u?l9vu2_e!2AayC ztOP{SkSv36+N>`I60jB42mCpm-RoHB>giybxH|lPc6Ma$dV4$791{@lV*HDYW~JoX z-|us03vc~NOgklqMR0sZY-~3g=V-XzIy_~_bAAHplZ2U*%4vik_4XZ-rb z2KoZ=2B(D(v+TML`J*MyqlI@{Z*g?Mxnw4Sh=+3M1afbS=F0fa#%h|Ua2S|q8N@>< zmjbXEOx^waKXP zZcGg?_D#dicCUJ0Kjl7GN1|(avied^MP}wUV|4Y{@={ZS7ZZkK{ds@WK{Z!9z@oT+ zUb_fQb8^__%@5b=Pc;{-O_Zy&CbdG-^@ayk)DLjCW4r2|ul8$*ym1vUZ@V5Q0L!W? zm^wtf$`P+N4R4hRR4>*|!y!n9oV|ik!0MjzZQBjoF;8)=%3Cms%v&&`v33REa}Ip| zL48Yf?U@`}mHoGUG(Q2>3xVzcDjZx%Gi8(;hnnjtsF^`3%^I}4)m*oGg|+*f`?wNf zFmk~1ZmF6?VCyh1Og#-L_cTFR@%KPNo~O}^o?3NCUjc6HR`j;}{+@sIcKD$;47Z|C zULDi08Q&>UsTOS^GdB^NqF3L;KzD!ePVmVF%7Hv_g5sh@yAeP>0Wf!U?O96nZ2@P;-TA84E$o zL?%HEzP{~5Yo25u`5Y1*87qQ`p89UiN`RA+ps?Ogpr#<_oq!X-Y0b~4P%1GLDwB`( zd>}0%S{a;={tjxvonlg0>=UU?s$GI0=NNbO7#Enzk+1Ig`;}RWNrfS~L?-0AESQEWzZy zKZHh*7N2q?C>4vspt~ZAU15t_FxET8UXx!kmoHL;S9%w>bS$i#srV5%qEGVJ$8iN4YKnKXP?jA8KLe5cae%S|1f704 z^(=h^i>r^3_AxFUggPNyb|P5Y#_m3DhCn?D=L`+OyG=EJ4RfrzGNvzxNjK1c4g7%Y63LgJ?`j_nw^XR8$iUg73T9pXPGP*fu*KBsvJub%T^-@0Kq z%0F+KEWsu}nK&EVaCpT{&PB}gW}Pnge}G*m?%~g*+2$uuMa!j0dbw5`n>lRL(w^V) zxer!3ByaFC*+e&cd;HFajml3|afh~hmy3aK?Qwpg<5<8t^)?q`FgrSUsX?A=G-XL}rpT&!K4Twbq z-xSsUFzt1p5@_UsE~Xg4WNw7dp2?K@#o;J3`@GZsBp7pxV&8b~tI>eC3?}_b(K4h( zOL9Oxrsfjf7wzjrt*4n5pbkG`_1}Z9v-!%kBDjnn1H8WHaaFeDpY_e5E3u2lggp=SHttrCRMDO?Se6p17n^ zYFvSu>nvLs&tcK}W^ygsO{wmAYJgQ(PAV7I@I_q5$zKJog+ zGzT-c{5wIDP96xHD2@5z<<7?Kgtp~@^M3)d0I%0!BPdLai@DY=pe9bY;s zJ-QcBkd#^ilM#24)8XA858UK!_}Y+`2w@@0U0Oz9S~qtwm&C_GB=V!Yw0VF>NkL5& zH^~~ks224TyYPGUdQD5r8>5P%Kv-#Qiloqg)X`daL6_i1VZMS*z+YzysxGyJ_I?|o z#)?4`UX}ck8ihmV<1Pqmp9m}5K96torq=Es@az2E%%badKulx63sNX(7oyq(gsP}& zh8uScKoEloF1XZgWQZ=xw-XE*vU5hE4r<;1+BR_P=;y5?8>U<@Ts4MD>>Cof%f1$<-n>h;X%*p?aU`)h7o3&BP9=6YA#X;xY7CBB~F<(7pNS7c7{J znOCgdS5v7hq!gzr2|vPOV92K5hLi`19apZb#`c;ecZE}l7;B=se@m$xr&c0P_w3u| zzRMK72xU$fQiId%FB#`WWP<;+&r&UEXhYmqY&b*)wbl4orJhoS)8%0H_Y(RDu^;#b z!79U1p>q%(;xP(el1?TS+FTnB)vufg4^(<6%vrUCMBwlbo(-&<^59w#vE0tgQx&3= zbod;pN3)5YEM1rMfujHnYiX2C%N{R@-fdeU?#hka9!N_W$xS+rlA?lVdd`a=nxP6J ztxtIoFR>pBZ>vRn6dg4))wqM-g=J6o?b>*%jxcVD>ueL$s7||o{qDzHU+?n9^g!Id z;TGRt5zj^pMb?cu*7@}sp4SI32=SsGI2ctMn$q19St^fH)<0th^BBo|ZoGYNz3%0< zX?6ToV88yd{|icjHw?HsRQ_goTAn}sekz_H0}t8EZ)WI#=RyzR7io$wq-UawyZIAC zy~_&Xj8YhopBJp9u(w4CKXeX+Q@iR@UnX_-r*m%@4HStOF^HenX!x6*@;KjrPz0C#sq^mnh4zlBQtFSe$uN6q_*p z+X5R(?E}}FSdn^!s+rig95~N0*oWKit~9;bW}OK+E2>hogUFU@Be?%gkd>LAI@Qy| zaU1>^`}k=8wnRl#Mcw#?tG|#?i%R_k7n`SWd3?yV=It7hn~JUNMh^uR3prn4WPz0 z`T^>9Bx0*K7PS|CkIm>AgHY%h3=`!p-_2Zpz8dy{sz>?0FZ`#9PmlEbybz^@eBJZvqgD#LRa{7xwgpJQf+|?T36!w-}@`@QZ#7jidi+)i$CzccGi)K&8T)(#h>s1OH8jMd-nH^wtoGN2G z&ccYECIrNzh3sC{*%qsh{vf32Jh?matcS>72C0~ywRn=GsdW!64Cn7~bQ#m%lCr5b z&s;IB+vYnkLVTOqTsYZL9)uIN3+Rq?+*fw{xyrtbxUg9CP|t8_(64#v0O9K@acdxh z3lcMY}Ro1wWGIK@L zFiRLyf~h)0jUFy8pWpXQ2Y|9-N+i)7HKPPHXYIqGU@g#fig#$NQGZ7r)LX0?E|PUV&#>2g0?s?E(OM1H$MIn(@axn zp)P2<mqSyD=#@6X= zEO;R>%%z7(&U(e`(Lxn5%`4T#VE)Mg|Gu7l!eqB+U2YHGSD)UV$Fp*V+U?&aJzq}G z?ctx(T1q42E6vF@!;Ij{aUOv&lbGV`qp|f6Y@$C^CweH2&Q`+~0`+JzY_%?smDw{q!b)t|CQH zq!BbT(1$|^zJVTvjS`u)I41ZeXAps+;tXJ%f|fU#TNqI_isKp`y=X^cNeYezQS(@ZdNmfbQ(K&AH9E=jO* zUuKkMIuQXK0DY|!QUEjMuc^Z&Gl0GYA8^;8w)Lz|{Ee-Npe6!xQAC+QWtmH>4)fHP zw7e2lpU%l$b4Vke2Ca0V zP@k7OCi>wag;pcQClQCo$aK~?mXB@L8;`V3f{xmJ8eUM2Cm8K45vsKy4DmLMA4dD8 ze7&QcAr`Bx79c#TxcD=*%2i{k55~Ioa6appJkerr4Cxp8P%Nf&9Ma!$s?-nM0StJ6 za8UtSw9J;+`bgp-T&^yHcMPNxT)xNck2saetIEI&Dwd-4UEo{wzHp5tzXK+6l5b4o z^5%lO0-6sJ;1PMP(L1Hlm`olgcTVWQ;r()~+(dkMx(CmSf+DbDD(>v!Ktn^W%xd1> zQr}DFk1fAqDVJ;J&`ZEq$VbCH*X+e{kijOMb}S=AJ_(nEJr3X=9fOb z6lD#9#t;-SNw%tL51_#QUgqjFD$;ElmoQp zKp!{;9z&pr^d%;ts251zI0JSRFPL)if+=qZdphAJg)JMxN`em*y(;{#qCca{)y|6T zOl`I)>ym)YvGMYzL2y3Rj;_Jydb0WJC<3MEj%P@cAdM`X;r-i_@|sK33+$d|=8Mri ziFsw@3+!$8#8JQd0u>aWisd3(AU3OHQ^heU#j(E_v!6fE2)=3L3Yj2#VD`xDS@es0-e@2AF8T4 z*hIIryKK&A9~`Ozl~Qj03}9xwew*dR7UTf;6632rej&yb;&rWw)=Eo+-5RN-}%JLkRG`W0cRP^Eg!gPonj`RBI#PZAtbBql$JxCBRlP47)kJQn)b zpX8!;OVDWyC0CK2c>O)zbt%M(``a5<=2GkP#DNBWT>H$sQt})RE@eTN17U8zo=nUp zNuK5_6t5J4a%(MbNd{ZAWh~m~cgLWqQ2q^6(Nd{3lxi78Iyh`sAVcw}q3O$cLR?LMe zKvujTZ-@*I3s5;`_&CJc!m@NR#@aANoMnkjnR3TH=V+m?q5>omQl_yQjab*m&@ZX4 zy<#d7@=I*Gkh%9@U@;3d%wHg7d6_yaVu3kc6Woh071f{C#0NJ_TxYu*TC3pMa<&V_ zEFtVwM{_V=Ym53j!w1h5;1uD(ujC5-6_1@i!TN|O!t)(;_{mk;m)TuKI4QGoI}Wy% z^e{TowRSX19w_e$5DfbcyeN>F?gk;23Q=L@iR_EDXMqTKZ$^sHweDvZ*LZEMkDZJA z6gDLVXg=5CK})lLXHH?;arX1rcNMFx_xWJz-|mK&%PvENL421U=?E{r?4}oM536G<%BDKLX?tCDG7^| zLAwyIWAvLxIjaRsxhrieRPB3BAjHlmURff7>g$Z1-Eb`V8z4+4 zVj0n?2rJJyrRFpbV#$kK>KEWO@B8(BEgPU49u}`RUMX~gop|FKBqnTnW*FSwI~cx; zZa4A+f0+OJcIy{6GWYe})%VSc#{cl@t5+c{T>OHTp~_pX44xN<;yRa%>>UsHmmfy{ zH((uss;+%*#D9gFkHZDFqc9jB01w@!M%2$LW-Nv^y4p#^=KRq^_iRSi)4qO|6Lc|y zX6gVi%^#~Gvbg%`mI=_mdCS&)K7ONrh6Hk`&dzkxgbIO#8Ml@n^@0;VExl{+=-6q7 zN??>cT!jVKhP1E2^e(itGjeY}b~v=Ob+Ptc8;1hyF@v1WM*#$WBx1uE$^MC6*FHa# z5wHnfwb$sYNNQm-s#YvW%i1mhUFWI<}OI>s^OUg?f7(t%>q{w)(BANGb5(c zKF94w%};AOdc7&jzUo?nRzIgV=_t`5-DK=h%FM+%w8}F&I|REe#;xdg;zAEiL+m$7 zW(xHR7GW)9&J!~7x4jAr@zbMBt<^uAdMZVA)U*3~Shep}D9=0Aub@wIK=h<-=hTEt z9b@FYcA@JX2?J;3PR{2w@(+?IWPB^9W$hfN zk|kLYEA#mFk0sKT;o$+Hj5co}=6hb12%U4+n&_xc&o`I)6ufojEOPuH%)Xz!sh@}P z-6+3IeJlR4?h7gC(XN|oj(RwDAS-J5-9{!gU7Dpie#WNgQN*!%md0NN#G{(pr!{+A z!`ZG|U42Kg1l%AfWE(CFAI}J+N5>88VxF?>_U5Os!f?HUG~UUb`Jk6BFGwqaU6(LH zYwK-9sowM!qe-maIY+KFe)DmG5?V534@Oa-2IvG+=#yz3+(QL~+`&GfP2TguM!;ub zM!??NrLd1f%~aGpJ6iqe<8hQKHr75eS^zakpv^x_tNL4il(K}qD~3hKnJTYl?trNu z_wC%A7c<_XruvV(0paMJZ8VMn&$%FOASP)9lrWiWoSIjf~WiijM zDVz0@iRJiIq%`^0#SxEI;D}wzLw%&rPtd)eT+3sj>y|8euxD0h%qINEwibf>Z)(C> zSG>2;*^`9cvP|7Y!1fdgL3euOKRV$?Y>h)^kuJn_6vu~eUOW)4x~lD!j9C>^0y*vg z6=LW}`CgAzAN%o5XihjlheKNh1SS5E=5KU1Kums=>F{f0CrgF%!oXD0(WfbhrwcCc zG&|7xqX8KJxHi3Pa`#EKPwhogIeI+x17Dc_nsGP#DqCGMwRkMO3*K@gp%9x6oOGu% z1VPmB>VwZaU+3MA6t4CJRhY;x6=Y@b%idyJUD8ro^)<#08s77{LiEIm?Vg@aya8Xc zq^6r4BRg4-F*Dl_yldY6sc(HR{djdZ%=$Z(0czE=QEk$_>b=PQ5SsE-3it(#w^a+6 zo6)gCB-TE+$i@;csG0zJsllg)?bSAaK`_t|nO%SwhZiyC<*7}?4f7&6gwZaB>ydOn zybyjfD@8geN!uw>-BS&PD+=Dzw=m&>YOYupwt&nf`HSKP!&_1(S1#HvA#=<23I`it zf2Nf3fq^|o1h$Jjx11ongZ)nnR24=~TIt5x9}3zX*h9cTDoe4{EkDDhqv_%Vv!QbbWxnB$*$dI+j`S_nTL3Di=EQstm(t6Qf#J|L8qARqYCL!{4zY{P0N4CnB1&5 zt0kd@N`CH41zm3eB6Ces^2|c|qSN#m{YrFroOP6lFVe&7@JDNfX@Dk2k21?#w6?f4 zJTic|87C6TX=0Y0)P79TzY|5)0kzH7W+y%t0Ol)epzrz2HJ)LUR}h@cgiz*O6;hFe zAO^q&X24FGVoH*3%3$SMh4Un?kk0MxTb5y$d4iU~lzSlEAx%ra+AXj_G__^WUw~6u$&rK3*JTnv~chMRh4GpjX6X}eB2H}Z8$^vuV`KK?vc2Va(j+5#rlkGQy&Rp&d{iG!j!&G#DWNs z?u1@w;m{-H`j}aLo^v-E?y}=eOzN&uTmYSySN}xep!7HmQE-lb#O7toYYE;-l!@-7 zXXbtJ`|P4VhYWaU86HT9)@Ebo0Bq`N8>p&P85^4emEY0xLxO`YJkPBP@89*tm8g+S z_YQ3+7e6?9DD&p_3f*&BF5x$LOO+uV_ah zboRiM==eFy2(rtu%@LGUXMYUqDMO{ZJbYbH>XZkQs#~Zf0ss;_aUjpRlRMI@`;@i! z$g5Fr2bXB?R}vj%=w?%;W@?aF99{myMK}8_dR!^MGtV6VzQ_{fvQKljq8qtzRu3bH zeu=vj(|hU=kF*5u6j^QrlM>aq+JVX(iiVAp2IG5WXxaPnp=11frwfymjCNQcx40>1 z#iq>!4Sn%${Jf>KDWVt;dju|tM?`V~L=Ba5Omh=S(!KO}M3TG;V+rJsc3UL(q=;o} zFk-RA>HVPHEicpo$j_`?=JPXl^>;8btyVyYbk)N6f~6zh^Qk?Kd3WZ0J(OM2L3x6v z59{3;P zE-)~^4!2*W!;VzO&n?m2Xql0oJM@M&`|~?wW0at*5`6RQoPx8woQ1gI6*E8Nx0}NX*LeqGw5LE)7JLWQ?)rYuay9PY)Z^bl`m@`{J{bIZCZy}Y_FXI5 zU)zo%X~CY8QS~?C^Da}m2EWY*PfABnRZGRa6j6C6AsXmDHXdmFv{KD)8;#OZ&6_2f z=u5>iJD|;7%-VaErAiH$j3f^{+-Pl|>b>%FwMG9L-Bc7A$ z@_O|xQHvDqm^7(O#&TItGiZXF>H8;IznqMtl*Uc8WogmSO@gD zyF8gbUA#vfEo>F#>EW%FR{=s#z}5gL2vvL@Rh1mujOUzk7N#K|@Elai8I zfWGvTWjrO1U17+GsYFb2f>#2&Y#)b1kKNGC%W$n3_ffoubkIxsxClELpFycKOd7GCO~ZS$*Kg(N&6eKOy~5 zTFZR`dzcQfPSv;|Yfqm^GQs65@eUp78qSYZUn?J<0oAf|=c5 zRmawDn7YGE$F!2S@P6`nH-N$#oH;lP=>mx2fR7@r;TCU}HK_+&3|!-RK-MmaZQPTp z3FCK0FZAr>)2;l4>m;izA7j6W$C}LfNK_SaBPahlbf`nlwA&(nzRp5?UHpLFgwyf8 zV~4r>vYq{xu-(MT|EJHj))?Ep0zxVB7z3fuy55(M!Fxz#P;Aw6*$d+j48&OhKpnXn z`*HOZq&6JHZlp+hnr)0y`0H7{{0rE@_lW@R75eiFqukLNJ$5j$x%%lCE3&?2HtVvF zufL(1S@LJ=-NP}ng}EE;{XT>=7qpi4xmImPYioPv#5(=FhvWYD3@dI?P!afI!4qOg|ujNOd4Oxu~Cj2#a{*67_no zuv{As2$$_Af8T$6(qok~s)cM}&(%pq*p{Eo^P{55&@Y+i4|V)(^h|wpHm{dPVrk`{ z1qg2c0q40n@bi>ugQXte+Q>UFF}&Nekn3iyJP11goS1qQ-o9<%<#^bGJ?cdt^&y8j zz{CrlGLBvDxJ|pG##E@M4}z#}!6kj&f%PR#{v}j2@H)I&gZVV{I(!N8Jn+Jkh4+Oh z>f8jW=vrrOY1<;%K*?q`C}{)jG{(+d4Tm#bd=|IOkjWaaIjTaujJAvU2hDZi4PcjU zlNDywua1jGR4rUi&tesr3fT%>12WhJ497K6wcN)EB>sYbyO^<6ezwGX)w zpSqCqF4tdni+bl98|f8{7JNjb0-qz=4EJBQ;?$BUbLxd1+JMBBd}Hor z4yJ?8m>WXC;QzP=B5rv>5_gJct$f9!))%V}m6PnL4TCR$tDv+=sZn`>SkiXU;@0^_ z2Je~l_UK_OSg>-zboar|@(85I7)~hA*>yHz5fN8gxmGF(^Lf!aNV=KzmH#Z9*Ul)d~2zQl_?}Y-*@J_1330k~> z3&E%$_&O!yIPsXVD_&;nZ?yT|_*~lb;7!`y3b7B+&1^A1$$OmCMW@x>b)dD?s)M2z z)=a6(==KoS0I|SZd@AT3VE5jS*5(#{H#ED@&|96`XJ5f5iGw?K$bd#~bWiW2zc#+q znq&iGr*AerEVAI8ER%Qcxxj3`-C{h#G!lg?Yk%HV!k(=llf^$Y zTbGVKikkxXFM$eI4uwOh(Z$y6w6A@8;*Cf66RsO!UQzuKG%>xHKFpa4Od%!J@R4?I zV1qP)ee(=&icCu4D(4tCBB=n#ob`qKq-J0)NYV#B8i88Hf(4?_;heBb!MWQYHddRUZJ-LFGY06HvX|eu1Vpy@pyml1Q4Z(18SFfi$Rb<x>NNYC z!d>r{(9oY57$d>8C8Kv#p&BrcXCD4ljQ-`w-jx#~7PHhgC6GhF6f9n()i~G8)ETSw zG#)*P)I@rf^0q&Os&cN-(O*R|C7p#aL&)A`X5B76&Irg_!QhRb7bm9I_2H-|kg|Nq ziI{bBK%FrNCLrnyNBu=g#U4yrZK|NPMmx!++V)AM1*47OgjJ^L^7)_UnwwMiTJuo3yEQ#ixLl*d^&~>GD`lFML%<U*lMfWsq3sptLKn~plY>wyl3tbK@<2_lQ04ofJgoGa+o!#u%D8 z@J%k(Ll@=~3ahO;F#I&HsXX-AFn~5i@=XE8C1^&gM(JOIVjpW)di9nGe_g%SYN($` z?oTPEU3Y97yhml-%8_3=nUbqOW=o2Db~K!c7;z#3kE+U*(5dLqzy6TAyx9+S z%#hIG85!;bcb$BSHQx}*6i5J$Au_F<#3i4~HArzdDg{|UhLKSXk3_u)1BwIcjeK^MO=@S-uASZ5M{u^=g}+2h~H%yT_|Zsol8a6 zJXSE%StEE7?x<6a1NY=#3=EfrTu6tqUF({9Tmu+4(V8{K+^F6diGs-x@vLsN*<=q{qx= zWq$kLR$}hZ*zC-J0sy4^L$Cf1-p>D*hyHJM=fA{*|LN_-sA~PkBhST*Y2JLcIcj&f zBAIEm5KdvcEKWvSaEPj!A*wd3ZItEm9ErA+Y1Y^3EGV_W>3{ z5Y!r;m9$h+VbW3}ZR? zeaEm_(dIJFI9crzx=a%oKK3U&jt^CWmh(D z(SZuy3$4TlLzC&)df*oOb20yY+>}zghvY;(7Z?;+t=<_=X;yPY3Jf1%h7=B z;lQ#M^R3F#b^uMEZRHHi9$JyD+R~IFyA1An!Eq0AXbjasgbqvq4FrX5W`KK<|fq5!H=Z~=pLY4mQRu*sOJg@fz%X`B0L6re)D~7Rf7GYog z?HdwjA?x2c;?%|qT&>5gDlwiCtBT!!M*d~lWo8)`6LJ3{tU3M9ifGP@LVO%6`sS5Y4Hga_RY$bqn3K!q!Hw6$lyQP4m`FyN zu>}}njFCXX({u!1vE)qK#K3iE;Bd;m8Hx$&rA^jIBwjX-vQu@iWOasSP9tIIuQ=zH z!m+fu5sIF4l72A*Js4r~cBZs4-yN$Z1qT4oDJf226R_$64r!V+l` z)&;Kdwia@mau=xo*~{kjK4{6W82^^5&LAw4^aEi$D`}f&ev{YaEpB+5%YY=6VDKm3`+=1`JDV& z=Yy42!QLwySL(OQ9^l57)92!$7(<_JSO!T&V@|~@ieLLlsZ;wL;9n_Xv@p#E4eecQ zjBleT?qTE)=iPwRf+M4kD#`4+cd!(ve`wHi#`Ls;ewE_0F|iQAVFkjOrZj5-JE0XRYe)vPZg3F8%^A##`iSbf>5 z#)DZRDx;#vtLen0tEu~Z=-G>al2A?1CW>w~1ub_Ohi(O}$I~*_$X`uQLpx=Wjk=aU zzkZSy<IFqC~-)I3e6bi``Z@1$vHVyT#fnGb$rrR1C)+qa+#WU(kkGY5@t zSPW|hoCAZ~pBE^;|E5F!a2@>2vX3J*j&fG3ul%SF)vRqnX&$jiRm!>_pIW-Pqa3u; z>}+x1Xba0TF(~u4+W5`8a||!w9n&hdf+RC}UJO*HiP!N3TOGo;YLjSEi;I zil#GD3(IS6Av7B4p%jMd@Wu?{RRS0!KNQMRKNut=)Wi&5;(ufeR!abGIyh@ZIWTQt z(Q-|_(UX5DmI5FPR#s?BhAM&yVXdQ|C$RNT1*1uVBK3d*%d$Mvlh zI=r-TqTp5nr$*~n1ebeERlS*uysn7?-L(&%lX`T5ob6(+33C^2A^1j9ltt#$mbtGS zoh;#g%izIIe8{T8(kRwf* z3)braS;bboMP|h!+c|Sg){`*vb#acx68!ZsaA}T2*^&ci61>ghOwyn4U-W+58)s!M zM|h{M{u{&Bw%@g%-!6u(uB3k*VEESRsWOuDy^_{Fytll7zHg`$5;7A~Q4HEd8br>S zNg}nz$Z?~M^zoHys19vqse9}0Vv`m{qAY^M6l16;oiv(f$u-f)imdt65xo+xVy8te z4e|}!QWGzAqOKs)8obqKwwV{rd+OKzKDz)@w?>r9UOJOc4frj<_nzvs9pHC>OCxT= zy^)WNp7>i+U~BYauXWp2@P{62`YrHxzSg`Q@ONKV{~h>uA89jq0bhSyV|XE7e(qts zKy5QJ?rE^Pq$^;WnOK1d!50W_sP`c2a1#Rl;m{3;WB7)D4D>O46Y`b9IDFIC`cXW5 zBl&t#eA$Ks8`L}E3!p31gV;LwhEhDfN!bSGc-T6I7uEv|_tdN)-}LB)$GF%!=nKs8 z*gA+8+QZS6ur<;HvH*R1wIH<-tQPSq<7K(pj!?{bQ4-b8VlpfIr zIl(W(v0Ni28}u5E_FTl}`)Ekx{oodNBl!l8S3(bnGmZ)8i_;a;whw@(Peu<=PcKkU zt|8bb#PnrQ^sJ;s>&wXTw&B1kbcy8xm4?Z`!LyV`IMT;b8^#_*SvoG+6+B$LS@i6Y zO>Z(aO4vuMkgq}|rm3M7m9jynDB+OAjJ8loR8!RJgLgv{-*)58*paXtp$e9NEHVz8 zmFx(El?M)~l8%h3)kt6ugwgWk^kJvQ>R6?z?ZP5CH0QsyHy`)Y;$$5mRE#O9J?kKF z2PJ>lbah##IH#6|mz-B-6T-m)cEzE-<_giCg%i`X&;Kgta3n%A{gU-mPXCi0S?-U8 zHYAkdOhtJb2aU>8)4SbFdsQhK4?I-sbEF&OYinR?c;4IZ--?D*`RGfHv|@R+ouC+Y z{sJ{o4QNzt4_ETJ&_BHq7d(`|;Z0Z*_BVGwJJ4K0CUwLA%xBcR)@OQh zHfP97e}2PAX}U{PAKY`*=kXqC=zl z_kmpvW*$09VsrT6w>v3=dO{16NAKu@+$Tzr{OO8i#BKr01tK=t{4}i zYHH>_{M{GK>xMt#T}&_f1J(I3q+W8zVyw}T=>#Ie+oGussI;ydYgFR0G@Yfi^J2s&V?$c`8K?K;*_>r z9>;$9XsKi5P^y!50m*()Nz)*9k+_X<(nabn3pOWlUtI%CrDmj4wx3(_XA>28ZN={q zaz9uPIhhpOWHAL~c0xk;+_8n?-iN9^+2uAteGZ#NJ;znLgt8CcV?JgA5Hd}4w?1N^ zkTPdu6VW5l(J>fsoTquAY22S&q3z5BRYEgvWyQ0*wxbbR^Ms1-Jk520}p)Gxd(GDODDV_33bW(rK#EXGyfy!3ESk1$+qP}nwr$(CZQFLGZQH8MN_Bp# z2i-mELHvSP@z#xZ&)J&~+PzH_RPpQZV4oVv%1Re-4z}mPd2w2NuxV81`H-ouNf5_{ zWT5UQX_fMyv%0dzy(MI#Uu^iLNMpW%#ZNINy? zD+ya`Xy5TFEu&$)jMwj8r~Dm;ud0B7U}t|dYo#hQN|`D}u$-Xk;kZb3cB~2&216yE zrfCu*f9Rdau*s?M)T3mc&aN^c3yj5I8HxxAa+wgcicvth01MawUe5kCj%SOuP@Nzx z21Sm1ir?X|y#)dqxuRDsBz$ApXAm&H)rdw@t(BOZUbu;n^;1)4QDnn50&8d!8*vr( zG90y!WKst$$%Sp6iQeuVQM=<2A98Y4j*eDkrd}lElK2i@?3#w_`x6ysJ8%&1p52FB`c3baP#pJtKalL4m*`59Q$aT?VwL2cCVYux#2h)AfGU4$M7orpF z8?bbCL5noho%0~>{R)-r+O2=#KFwu(EKiBLr|EZvUfW{>$psw9)3jNmS?`tmTkujt zKp}?kv25Cw;tQ4^gQgDa=imMX0CCU4Tq+hQxHVez{TpD1eY)VE5JUsqDYOWMxB*6w z%Nf~A(%5KYdaUH5xp?^~yFv=Jmb6+UHDXZP^Q_jn2Y zeq6vMxMBHAVv63z*~~a^D1>N{Ddy9Kl)J^Nl<{Nn10Y3OU=hW0L0*LOJR3ueKzW7w zD?&sc6e_&94R*{~f}tQNwHW;Mt$qDuWMoJU9cW1|LM2zD$ys<);0H1hm z*#-_?Lf!wM_tZ7m!0>1z2i_snDK|sp%ee5_uTpOh*^!{=>v)9th5Ec;R?19`oWo&LMk(*B-X?v-GHH7pImcz z=W1ds1NchXJCf1w%&)7vmA!nxu2zet?g}yH+Sr|s7>ctKK?}1`=F~Xh(OA)Q`0tcT zb!8ADyrUtkBT3K^U2mI`PQ!>eOdBo?%2Cz_S(;T$2h=F5To+V5m&p|g0oZU7Cq+Rq z$>eeC%?^EpcT)^%?uG54Q+tH<5~Uw})Ha_4X31Bn!F1m>ea5GAe>uZ+9qy^M6g;Qa zoG$iHjBXC=Y-~Qhx1H0hv|Ow%4*O|Y?1ePK&+_A#(1tW5JtXwF8J$NKeVamELG~Fb z7O%HdosuqU*nBkR895pGpkgLwHW$0^LPkawc77)dFDDy2Bkzg*Ypu~ECkWCL$`%!! zhtvj*=4X4uXY?W)*)KHHA3DxZreJIzkGC$mqq$%xHDyIxQmRs7Z>ekT9~RA@5c=IV zm$9JjZ(RvHgl+p!0+)Vawuv$iFnM==yp#c6nu>&}Pnfb8p+A@w;MXJOI(E4k#axaC z*t+4;Evp$S#6AyT8}lOFBV@G7Wc_rUlIa|}S1JDgST9(c1V#yKPmy?1P>V}-lE~Rh z=o2w31Lk(V$uuV}s|yd1pF3^20o5gWCK2cYIX<}*$T{-c^dMg^@x$M-wNrk;+`88K z$0Kw@tl2Gpf6egcc;D>X8N_m|QiZ6?l3K%yQ|2;9iws%VJF~`(F7WaXy0LYBauV`; zG=uj~I4L#G2_e|k-|X2+faI4r>LL&;ngUW#&y3gm_i_c^QY${+3}BErhLAR31?#BX zioHy4)aXn33$2iFP`#v8wMZxYQN@I?2mEnu1++%_6rEs!i=c{C3|onI%VB_J-4u}7 zVbp#%TtB!zPiEC7?3Hh5Bxe%)r9$_$B;X>NUf|<5#NU)Mdc((HWF2e6{(#8azKCO5 z>t=@Gp`$1LF_ZNMkJTBUo3FXPSjqY}F0Ys}evrv?Mvtd#xsKSS)A3s0w=DH6`F#Fd zKMUXWx^L~QXDl%u#@&DafGw+Qn=j=|lw&!Iw&42e7Y=+UMmBVLJc_4P?EKU`OCBby zm97ow8x*(|1_heS;C2yqyCiRiOcp8nMav(={Om-NfjA_} z1Lpz?F+3mODGKNZgQhNNLJ&hCdalrJiUAggORf&j$#;!MwwI&5N|f2$vm{H6)6VO? zdNyxsA>3IiTotcYWve_y?6aFB0-U5ONQt7u@Sm}&XncvU*Wjk0Z(ND7QFY=E9$7!W zlEp{p0X@NcwE|z^M+Su@p_+S&Miz%rO>ol6pu8Y?3!ZZ+EJHwcNnIC}@~Z?57}z5O zmV}2I<|zD71DwR08}IGJJQh5-rLHrII4b1BhERZE2!VVV)sArN2)L?K2{>b$4f=Me zGK`3fb`j?ldUXQgIQ@@64ub$QLGvvjz|n1Q+&Ww^$7ArpkMm84IxWH$ZS>+>!I_J` z%%$){Upy#-(Xepm2GcJ7uE0|&g=#)cX?Cw}w42y;@uxn88X7CU#^9_oS_|M`FkWnh z?1Fg%7?*OOn=~laYMka5eT5^JQmjm3kU|VNV(42TTs(ldY0$84?Bu$2C^iV>5W>)C z)@}+EXGIL)XVj@i|0?u`+Ym7Y&9WjiMK;m40X9ve(Qz{1Z&%pCu|Qt42}FwjMZmlG zjGA96TpTOBk01!)P4c*5WLOI`V}C?Vd1$+n$6x*d73OzTv4`Y$J(q+*iUANE537r9 zaoPPh)HpM-02X<`Ca^_?H!kLak}xqaUW|oqGZ1&VL~zLCvkd+vzp_X)BYqGsGkq43 ztKA~i7ZB$(UHR}>!B?iQl{;*Y=jsAnu){;KrxRWD;r7k@OURt6335gqf49EeH%;SA znx*1!M!YjIHAoVd!lQh8vbgQP;HEdCy?kirS{S`IVv7hmQnY(OzvqNJL`Mi-YY-6B zynlV?I~ZBs$b4wrh^&N03|G9)^WrY2niYBLEFT@?tamu@e&fTXqr14ZX|gOM(o5DL z`(b%@;#sjD;U`#~%(J~Nq=FRu>0KxU4U;>>jgjg?Ilz-qCKsi@1H71~2 zS&3l3XAiR`2 zN>RvNkKpxkcq5VXA9{%9*hLHjHSW-{Rga=IuOGd*OF8B%U$Z~9Grj|n*@q5|{h%fD zTTP5`7aBlZx;-lK+@#=t^M&Sh$9iu?RVChEmV;LoQ81A{ms`=0T4$A6IdWWo|`n*sKnI`-QDF~IqwCugh+ASNQ-(MHD<4A)8B+T zh)9=9kA$L~j{86mL*A-w!Yg zgn$RHIGq38xMV1#ncCc;xM&OaBg3$dcHpp4)_=3T9cRJxiuusQ_^W*pmmOEcfIUD_ zoWUgbnXxVTWiT)SKO8WMNt~{Yu6--5^Xz2^N_+|annQR!`2)-{CNFS|^!%=hG*3|yQad;Drdb6Qk)U)J)r#5Q4T*iU_fU5)v36ya$Qp6K;aFvbi@Vo zhBO+by1obz`X{V%{UYTs1paH{YP~y#d?QL%MQN;;W!p^gpa`>vAQEZUQCS}Zm>#+? zKa_5Kg=ftJ)$XTb1veDY*4;8AQVnIS_-5tiYOTjweXC|K9qmuX#3nIf&}e>03hCmI zg&x=#ZIxa=B&$VR1e#UB9~^1@McR(6dY$OCLQ;+?b=RpiQv?~{Iu?^E<3&=fVpoF6 zBG|5bnXB~{N%?`D!sus`ZRuq|<6gyOdb$h(N##+%-%;_h-9!Aaz@u8ERCkjUoP>_%zm;y7Bj$w7Zbih%^x;^F~mHhldgx_I0Cv!iW^d@JC5&Wh^a-Rn>JQ>_(M<17gL5T_ zuEf_=8A6Ek?=w0PZXsfo;;876@IEtVaA3SS!bsq+l7JJ{TTgu!x=dakvlbaqda469 z6^U`wNdI|u)vXY#efOspL7VLz%+A-0V=cB=A;_*E=(h4V;A6ovV0nZmsV>@_m1KmnP;5`mMQY&G69xl ze(dHIciHKxhP^ek-RxwopYHChe`mnSCeI6}nC??r&5tLy=Aa9pZ6hto(F5oC7co6ir7e*+1 z0*YIz4dcv1?f?}bBPS3>MxVNMC*KsV~&e3!Hb&!KKIQA7ujF=X949W%abhWp_E;==G|K%DD(adD6R@oZ9BdO&$p#dW6x*r54Je5yU#0e9B=LC^89 zO(ItQCTEGiDj%tdYX?<*k)>cdtTZn(*;#eAyE{Gop6*U`UKg`$sCm-)zwU?D;M5;; zKU%4En9Qbz!g+I1OO)^oLpX#3dn5j2+w#vAn#x@Oe9#iM&V+H)$!sF|!?uC`DlYgB z0m6{+p|B!$g?3ucIw~5--dcgx-XbcWxN_nR9d3|*K8oksArwh14aBR+Vi2xJfN8!Q zyabkB!1Btbi#{ssSbt16*?P=!J!VRlA=>$603m5+A$0N(x&hVn z5Viv=8OWW_X+^OMLb3O?#O&lGY(GF>t(z)m4h2BRI&TLG{ZTqv6kC+X7(c{OQj~K) zKhRNC6pW-dR+PKKj<~Fs24!J5VNaxh`Y>V^)zl|mn0%mIU*w%TKTaiB55D-+7kgp; zzf)D$d3M0GXUO(Fy zJU!sXr=?4qI{er@@6kHyc`4u7Cl9GiB@Af7E&}E|8J*|bG`{=fOg~}QRfi$8@<}*< ze3rZS#2#{wlHqWSh~_?A(p(0B?XhK%dk+KPV>+k*bpwBMJIgF8N~u-hD8t$paRdVO z&#szYrb%n(a~BUl89rmg`RA#3iNd9qA9V+s~+d4<4+{ zq>Vgi-}BUk5kPa9uzqD++26iDs!#2u^r}mJ?y^WYw*b-yo~L*z9!68AtJ>IoraweI z%CbIbPw0}tYq!nJraevBeuMu{-19$UQ{!;}*2nK69x3Yonb`VYna}?_H{JL`Cg7~O zYUnR5F>`wxG-@0ot$+p3QaBP(J5DvCa$YD^`KTZ@E1f8%Qaffgh4lRW zzO#-4g$OFSK|*^f|JmsG@Birk8Ne7tYT_epi_Y6wWBqa z{?zoczlg8`r_4p9ZC9jK%-7Y@*`ukd)66!;lwfB_R&qoGeE~ZYDkZX9qx? zy|LB?yJhn?xrBS~%e*zY3%b|dkbEunYG?6)XV#ouaIDrAy#uq?-l)Cqz3&~gGi4Wi zN0e+v2T^c`{&lgeQ?F@6~>#s8@x{HTKHP;)vsgosbHvV z9b8Xg9h}bUq&Hz}?lySa_qH$E>s@YQZw^l28{p(F=o9#w;22)%+in8iD4ftgWDop3eBE&Zzx=Z|!9T+n;Ss;&^MmjQ!W-o=Uda#Yap{_I zf?w<%p5Y}#iKag+fmv52c;I=?V0pfN7^zy$bK%$@JY|IZn}8n+o%%g%5&M+#ge~Xdt(P%9iYa*V~m8KbOvX1Vd4?gC#KO>yp% ztuAIY6`$d>xK_^SD~zS4w+fqG?&izn(7rPw_|z-oMSPmqM`u>=n*y}}%@eIPp0TDG zlTGy)MslLl@1D47lVSex-8kr5Zr7(xN9ws#&u4{Fwz8anYz}*KM-$mB+ zDsx?Vx}p{~n*s1FV>-dk?Va2A7;QPWIqun{RU8xOS;{VDncL4j^0dpZMBBR>&X-O)sh-fy zr4-cz&ZL^VE;o?@6de402q=Or2b7IDXGCTItViA=cW2*kf1m1W*=i$skC}t*PJkWk zX+EAzW%`Zq?sWKZq-td;UJobH5h?{|pUv*0G}XJB>1;dG(dxr0wXE4iC*1M83|p4V zj{B!q$-YCmO_ig~Cc`fv7ulV=4c}M1hy6E$WHwVrR7P(oz=7A?B{~6k_p_?$9>5ch zzo2WsUhG&=U#Zsz&yNgc`yqp-@*Yo)bkALlru6ptZt9sf{o$c`vLDD=Fn zCWN+G)i2R((dyNVek+d0ZD%*2G=?om`Gc(%?Cm@|4?8cgLs7j^5H9_LA2gBcaBw~0 zsgJBU8_EW=>U${NaQyBPaIhh50~*#0Z9JFD70`Mhj`KljdN?qy0P}={Sk!pXtahA? ziFPb$K{Yy;1WP)u>Ghol^xzqv-i7_=69M-A!85eNZ1m$Z^&U>ZqQL-yf!MC2?ol6u zs8L#NLLpGK2pqw%yt1BFe3-{S6S6XmO@d)*Ejhs}wGq=idQn}dr%j2f0eef~F z6Jl#|IZHU9bbK)xlIp|>0sF=?tZDQpuzt`&Rj72r2BGq3FlDD}kXxQLsH&`@E`va~>LR_K{Q|zat^@ZhybR3n( z%u6aClgN-1{?W%#e&=Z#GY!YG&3ZaXZ$-~+#vBK1g{+1DTo#VE7@I+pu^Bd`pb&+i zdc=lijpFWxuuSYkfHCU@??A#rWneZ| zJ7id0*G!3-{WO^)E=nbY$tLUiVoC&!FQbOSGoY$MRu+HJt1DS;W%a;@0Hw##w5SN$ z+Ui?W1QlAqDB9R@h@d336vQkx9+~Dh%LAsD6NoKw$9+HG$ac zf(%?{oRY>mcSVkH$6x)R9d9K12A2-G^bqGvi`fS!^}AtyK#GvL#U<@s&3ia%0D-+> z=R?)g(glw}Mo8oM10n1mjH2-FZ4**pb5sk~bmJ|=8a%rW@WorfY{lrfr#NZ!%4`Up zA)^9Eig+@-j0D-ZCWakg%u#K6!`^yxB|xwQX>29_43`%Si8lwx$XI>$O5h~9B?>9o zWvGKXa#XWG!&Bp{`=%g;^b!lZ6+7qv)L|ThcD7-(pI#XMmX(Nm|B_9ZYed)^@P1bn)MpBpz8mvZN-^_7hs9O zU0fuTZmwL!(bR?c9>2$tXy}7Siav=gXaVf}Asq`q(q8@HubA^JF8W@gY9H}nh#O*} zc`71XOat{7iklqNrx`yE3>ahN9rf2;+R-0oYk zw2y4roxeo7JePFy?+wkqFSCz#S(0u-OfR%piawqKtyI}~cPQvhe(jSK`Qf(B=+kvS z`;zpAV~mntY{(HSC9HlD^0{2rxP;Sp;M`t#I1Z6|HSTe?=x@xy^K3i#h%H?cf1_{;2kM8Ojs!ne>p9ItUDI_9h43UMyBh-`cIp-QTgCaAQ@E7BOe$GgG-kzxg{KWuz5!fG z*Js8#&LudN2c&6y+%B!u+q$0{+aa2G!ng6Q>OeP6R7i@*O^y<#L}^3SHi``o5xW*T z$|=wem$Oj^Ic`sBLI9E&Y9VNdm!c4%wN7X>uhi4_fne{br5E>+f_*(1SdZi)_F`I# zst(zyos2Xxx;a+ zZgIeBm1_7CJ5%h!GhXTdEwiuVB|If3_kAy+C`8YzEl+E{zYaxn2gC3RkxQ6asA(fY zENs`qJboZtFT*uNyZlp?@G+IOrL3q2-lvo?#+qlxrQF4U7*~`OOW7`9V5XW5Gqo+~ z1n5wtoFC=LznHiZQYY_v_kq}kkCMgn2BZjDeu}MI9W=I zJViz!QjU}>RYox~kd)6N3cay>u%DJ@uCFZ(vGnSGZT5{!S@gTeO@#9WWuuCj);YUL z_KnwP3jC(7RA+f}DpRWnj&+d!11-Sa9rrSpl^?uISED?^)ysa7Ba}M@uENwcpra1= zu{zKfi!$hk9qwZ%OV^2M&Ho4o&@3&5X3mz)n26@q+!;X{JH+3y*u{JwmG4)j{&I)K zCh2a&72vo&wpC!geQs5&{#G=?U{A^|fCKaRkOiXlmLSNN$81-@MbP9SLjjv8S;+%n z?`}C-u4cM)5ZAsBhWXG+iKlk!4>o*oZa7%>M%Gl3L5ie+C)K~;MEIU0V!PdLGxfj( zQW_ZdPrRH;5P;&&iGLBgC!WbBfryY_OAuFGJkSZH2;LvVGGeumOk@V`&sM;R6s)MP zDZp{9E39U^JRVmo%sk)@`$EL_BHg8Jsz%DGkL=CBD#(2wE`;~$fjx%gt{}?KbP)Z7 z5rEJVo2rqaAXv+pn6k~twz!pG!7vr{4F>zy^gBigcR(_J=!jym?h0doa6nHnw$N^3 zU&5NubaC9P^u87W7UK~r?oE&dCrD`vGM4OFb?!Wd;;=g&+VA9&FH{sD(mTBB+*Fb3 z+9Av7z3G~MVAO@J0WX4JP16}C-rzO4A8+6YQvLZ2!Dmb$x)dERkd-cARjG|A6K@Mq zmaGo4M2$QS67PEk*p!_9gF=!_ka+=V4`Jfd1unx2F)0l>1TNE2plB&&gRg+JCG>Z{ zEkVj|1Qf*&vCRsrYl^7RTht!Az!zwc7*R)axL40s*E)NiZYd}UE8<|rfNHv@D4sHf z{fLM8K-=2<>pDvms|IwaO=&u~BEIPFc*f?eC$gd@+7#ck3w zhW$w!CAz8tpxV~^vW5S;lGeMe6!o-I?WY>ToxOsV8g;)u=Tq!4E?@0qZUO$8=C^2V zDmi$qN_EWT}kt7GtJ?!ka_T&~_ldbURR6J2;HH4>`5`snU*E}m$~%8KIdv-bD9 zyxj~fjfd*Hf{FOcx9dKYQ27Xsx@`0Qvdg=WlUGObUJIvAMnp!!g&Z04pfd2gt;+CB zCIPhOozy)XFQ-#V22So9u0Devk0ey8mn%zlJE&$?%WgG-50MtIUrpi5(4>z-r zj#SW@P{+rhDpx*+Y!lg-|Ha=jH2+nV41n=6DKt#8as=G~`8$miAxYsm*JmGHpX0Y$YF45KOA-5*Z z?lz(#QVF!XD3PCo6~qSnO5AR+0#?x%{BL`ofm~j&;}sx!6ccbLW0VrUXKMTAVM) zE~>VHIY3U361=o0s{232=LAUy@{q;AT?=*9k63>(Ry<*k;c#P<=ZSx)i2ennm*@Pe zTEx4T<%fPzqh7;80CyK)q9>7d4=D}ygCl)+Wl^H~q^3pY3$6_+Yk|HIOc;a;D=Hq} zt#5n&ow0a?RcAF6_$a3P*A@?*^mV6?##F2kc3ii9EwJrSDfo7{RB zv;+a?}D2aIG$kS89SOQ^v}{yLuMv5;rgw>eiO`IO}!_DRGf* zpmRfr4Qexym|Ll@2^sLZnC#glcg8ZKr6I4R*0^X6bK2~}rr zbd*#PCN4cSnfps1^N$UqclZQ(+BigSvJmSn%=K&7(1Evk`e@v5M;c}-vzl*PSpWjJ>VV4edPQ3mURaA0WR7(qHuIxG3wH8gq9%Tfj$+pa-? z_`K4eP^J~MuxKbbr%VtcOyxrHqXM;HRvpX1PVZUWZDUfiuzetDcxOfJZ()}wEx_OK=pX3C+wgfongHEI)a$xUXX7u?E8u#I zT6SUTERG{ka2WURsm`Nb6Ym=T+J1R<68tL?SlRW_4&SSX|AFvGQ86APd71#dgU=Kn zP_3dFyrUnUYJ6XB1kOlq$dYhBUx=sjE97PmNjDdr?X!=PweWSbIj#=$d)W|pgm_VD zeiJ;I{s=2PMs|*poaiFFFXdsoB{|UrH404GDm?`WdYC_kwt7lRuorCdQpWxQa$iLV zpKz{^mRE)JJ>jEcG8r#nisLgHmO437o=^@oE?k8KCoz5jW-I&Q5-M&}D#?J2X(R{& zAi%O*I*_>yd}}!3puPgaO&|h_R>bu{@mebJfbS+T4fe`uz;KB*dte35>S-W&skOK! zT@^0CcS_w(%vO@jcrd+y8O;8dF(00TAm9)24edO&cC%IIL|;Q`YbfN}V&Wpk z6lJPiQI7aPZzGjAf=1rIe0zhx|79hOsD$O!$=y}3&T^fPW! z=Q1|!Ivne4yZIbI^M#b(HVGQKdUh`O^%@e1ls9N1f(Ee~gA)J(A64XzYqL`_h<0Oq zQh%U%RB-GcXy*eyF0GA{eora}j<~e|V!Yhacgl>8JX128eYuX@{C*6X5XuE)KRSS_ zJnClEyuQC0H*ijh#V~1VNRhNwLFy}%)}m*dE&ruTh03-+)Yz9Yj{nOnUV*ycU4@i$ zBiCK}>)Hd2tc&vDRPigU7NOAd_ zXkV~?3atQ7!q@5cz%zD7&1Z1b@;aC2WQW3j8Y5@0f_r}rj3(By5Za{qR2Gy0t(E_b4HBU0kjIl6_pi}i zl{pmgXJZ^#I7(3?Hx(DMPo0Q+#uQyo^m!>7YhUcty%9B$>@sh;zb={L549g#qqF*x z#KT;ud?nl$SEalakj9l)`wYtxTzsiKij0c&R|{)8XSFljoPfS+LTtX&>W|5WD-L^i zn3*X>e+6yD)g|QUoIlR3Je|6DGM#$KID!~AHMAQFT`{}k$PM-*d|uI??0HiD-A%;e zLYH=+5I)0a-3M&LhGfbQ$7O7t0=poJ>Fi*F>y2AaBSdA&?om3MM(+#X?%r;9`oQo0 z_UGLlReQTVI&ULL&4h^DhJDp9RI(BSdK$1Bi`A3L1%n}r@B}rC=V@KNI(-T z5G?>|2{T*7L=hIh8}f8|{X9DOr7FT{!Se93{(J2o$e*wM0FF)e46J&a{b3aFAfHyq zwa zznco5&?6$~dH7#fz<&&0u7nb=psJFf6;D)2ZJli6_+b{YbAd#fE8@yO+bHKOZ(t~# z&MhO5lGZVW-YKb3yI^n$XPatNPM8v@%#*f*l?-OdMeEGkoUAp?YO4cCs3e3qF{po{ z8qEwbZ~JQa5?Tz|y59_4Dtv;Tv66nZ;)0W;hTPh{PxnybjNm8*Vc?N1^V0Sg2;Ynd zf+B?xsd(I)Gp{HO<_wMunW{wW<|Lv)a%V_mEq+71;;6ZM(k8eN*GU|R3J7klhJqCE zQ#tcs(P@s^Fd}M3AAfL3lbE<&*ADkeBERGbwpoVTs;)xYxKi@tS?b4{?K<5zx zVjqYum9a5U61N0eMs~ZSFW6)L`4$K}%*PsPCrpS6u-=L*Asf-OY zY!LKb932;sS?9<>;sr?l^Tw1l?s;LF?iPRJ@%=?4BGr}dL(|)A5{p0=&vy#H`z=@U z#bp2Nb5(q^ND><9OTNfgf@4?w@T&gU)e*nNdgGac5d&0<%OOAFtz~Mr=(g>R_i;z17a-9Sjj<4U5=oLUBaKB$cm+vR zkr2id(OZc=JqGz4u>Kn~VnNu9WXk$jfOf;a-B!rMe{kKlS3)JpE@$;aGJz^hdUz}_ z!0Px>8MJ_sB8}dyLX9@^G%Yh;I=!T8WNNeqtC_S+=nQ~V=Y^_KCJ6&aIWJznFwRi$ zNtqBg{A{#5R2aE3vQnP`5nHOAi{6>dfbc5; z&upYZCFt@ieZ)?DAjnpyrLEsg&~h=Ys&3auoRmHgpiG1#N5e$t&u(B0K?`H#at@J( z=K9yZ1!nnrSq-)E#oX7JhDdLwo4Qt*x);(TSu5N0X#Q1WFQ^IUq0QEGtv$Io(UcFI zM|S3j^#$v6V8d``ZITH;>~=iCmoJ}kcuh-%J0M(&vP|8wd3Wv8hbz@RPX$5Er+Mox zs)m-OJq<`Ol~a+d<}-~QEgr2+b|%;M>EpDkl9a*Osq#bX z!AdblS8h6;i77Idi8;j~2{ny$2HPDO*8mFO96#aXNJaLy8SzB|zX|CKjZ7koS`Qd9 zYC<8Nu`U(I)t3B_AIn#YMr|ptLT zkxc;yg=5E9Kg1s*xd|z%-(u%zj_M!|Vsf4Gqa8&@gBS(48@J+=MVmsmfBZ zbT>HcxOgYV3}Kr5e1o&(Rz=+6uMs&_;AI7$7|~sa1SQcj#b>RYUS{72yQJqnp=%_G z;kM~Watv*f&p~nR6~T^`tpadhmcan-@Z8%e4@K;!-oalLVeZ0eY!Ej`E-fBIOJjg&v8<-ip=J7KtWst;*~S5V$>h&?F&F%INrQyK zz-KCFyGBLO6G>h*I+7P}Acllo{?J@Mvad(ujaGPSK#)3hsxCY)IkWa)v*0=s^>G1_ zzja4y3zTVdRE0%H)_rpHjzNkYC}sTuCl+91#To+>15UkIP!gt&|Yf$_lV8q(IgatNlm` z4^`3F`u)wST)A%(E3Bo`O3Rn;YE8Xcx+kg9bXI#-_CVY_Z$Eo)R|6#kqnT$Pk`_PK zcBHagEJXN`l;}<60l+y6jJ}s7F+QlJbd@5Z1!kAZ$<8n6x^V8!zxMHY$9>jbNe(zU zyoit1`Mf`zIeTYG$Y;6HKTiog)LD<~CR$db-sphnOB3Vzq%bwCRd_#J&(8ptt_CQn zP(^XzKInBazeUh~VHfJlkoc2>kU+daVhof&4_HUV$4_^?h#Q`N=< z>#=%lu8DBEq;XMz<%VLGli0_r`Xk<_)TyOl+LcGcC{(G;)N;CfT5U>(v78vc>6UIG zxj#46&;@|c#6gA zBqhg4lN8uSWmPa&boSzRQJ;KTOJb`{*=_GZiY!H(Z8%u!I)qs%3ay!$92n`qJ9aZ` z0$r9s(vTP2V8c&XNB&0s8dn+qD9RX=)MvCP6d%dl(=Grdq7|`t(&5lPU3mEBs%qlL zK_g9GPi9@o3TYr=t0_bhEf+4-W7(IONWZJkyTg4inDoFv@y_>9JmmrRF6?rHMU;dC z@^V6o?`P8Ffbe?5rnpJ^&?j_-1!#hM-5ne_SO7yUuGAOspjzKRfY5ydnSmc+MTdOj zdzEVPLmr0%3NF<*Hsx6;9VTn9tvc1AtItV=ik|MmC55*zahnJut$YpPb z{ST)d{250Sx!5YovvKvsg(sF^90in-O2KfJV$A*HeWQ3BznluD?Hv;nOL4k12D()J7d*nV#GG}(NF10gwH z)7+N)@R>v75p2Op{OhA|b}0*Tm*YTj2%(b3c=ckjYvrIIY9-xZZ55xWmzdAZ_Wj+y z4Q1q5o`9yy6yLRnN!O!9c^@n<_;#89R*A1R2I4#0B@Jwu2%1T8h_+fqj(kpalPJFn z0S%P5p7222`7-J~u~AqDCqV{b=8lR<K7F)v0sWyf`nnRiGawS_|bgTjv>zMd62M z%wEJQ-n}Y#4+8S$ zLxh6rM81UtM2qow5MUvpTEw3vhj$FiY2ga`{mCFtaQH9~G1tl_i3?7tvnk9dO4>fO zX*55@oRXSl9`A#%=C0%Qs4i7(LgqmPT(r>v1~enTvu-FxJ!GYeHHLR5ex4ul1fHw} zJSWfKsa=3F?U5M7CC7FUMqF6X&H)tW!ylHM(bm9+Q#pn>D6hZ=DX5rmcA$L~h~!$3 z0oGI*GKTQ1Srq`1!ZoX<&wNUhzh_q-`TluXq8zkD+doa?E%&r~^TWvYE$xu@wjEc# zmFga<{Xml!aq2JBy?QIyDX+mj00he}dOwE?PXl6;a2TMtZMcXkrI1iDu|C-rg$>6m z%8&Bk4=65m7kQeFy_`t;A|>VXqLiJ`#5*lWvxSz0mFwC@F(fa)#bo2+Ft9|%#?pl{ zbgc#AfG!h+F<{G5-Y+u)xh2rvxdd?gTVS312vUy$ep^@P9*15*8rP@??x=avfHF&q zn{*a=iI(J~L(DluS)1(L?+Q?`QkKF$EjGlj4Y2mz`J?PqIV=riglm_zGnU1kc=5YA zY;Wcnip^S>1w~w0K6;Fsngzw0To5b`Y6eux7ber~Cc6XsiV1V?!PdgREk*T$*5?

y93CD-vA<`?2lKMvNoTGO0vP zg*a52>n(vF{Qh;kmi+zebem^r)vnKhl5qN#K6j_Ul8F9`=V#*8e4B)xRQOEne91R>r!<@MKJ1 zZqHmi|3r^i9B9w4hiw^wD-1lV-m*IT^0BQAz%LPh+VpD4?oc&6b?X4CO4c3l9TZF} z{hEgl<}nxHPVK$%06`0n-etfbXc2M7*Bc=*H$eQ(0Z<`Rliq$GKGJ5bY z{x#1!3CYn!R z4zMCJ)7g6QNx+?$Mcl}|-hhrIrrs6O6&X+A$03nXM!9TK7BIX8;<0N^k9id7FvV&c z6lk%-ZI@sa3gvJ`T2oW>M`a3)#q+EiwwPnXL67Gn_rW0x{o~Ad;7uzIq^DlHmY6DY z9T#<*puR|6!+*n>R>q5r!YX*c?W^lGR0$kaAhwG!c2H39k82P9C9FamJ+5SH%&PK< zGFw&4@S!1DntA}svA%r)q|bGgR7P`WXGM~3grs8E5Z&XPt0$9NT2#4Ush#0{E#ni6 z>^um~WLP}CX~X|knBhkv-Y*i?l~|fOAkE~Wn!QA@vY#p;bx*bjwrgWGHC;98Olk_? zqJ=9DP;*UB&4;)wqhWTOuhK^e6}(HYA!Mbo))QW&R;ah_Thdf-)B`zEZR-n%E4}vQ zE0AteDzM9Vibb}{COsFX5#Ca1O71mDi`aTsnqe=6SAKKy+&o=IPC`_yQCcg3a2vAe z&k~7`i^hZ>%Es&Mxz-Z9#Y*u88Ew49N^oWwjThCAZO@DP$je=cKBwII_(6zK6l}<| z`vaHSBcvWzgeyN+uFA=jW+R&t4NN4n5aY-gi02KclX^)>e`rBo zK}i~`gsHt=JGV^YaAHxtSXXWY+4tISt4*Vb3IlgayN5}=u;+K|Fjd+8{4d7dsXer{ z*%pm$+s=q>+qP}nwr$%^Ml@oK*tTtBXYRB1I`i9SU7U;l2j1#WRkc>tS_R5)rc>&a z{h}g%q@g-58#M8P;=ypq`sN~y4gC1ia)K5ZJi^OP0#r~v@GTUF9xTpcshy%pUmBaB z8Sm+%CR~3lkjOAsIR5NOn2t!aa+w2>dfn<&AV=>BQgkguZ;xRF#w`suEz8Bdi^f&^ z3#qJ(piiN>dyM@Ci?f-eU)R9ZfzwVTyQS zT;l<1desOoB40P+oQ*q8ctV;XtDS!eTz60O-v11V>~6~ETGEK8(+km$2su%S^j+B4 zx&6DR8-(utdZ1=em_2}&Z)bEbQWrrYiu(23vfPvu!g6*e9O{FBEDXh1!T#J()PBqv z_1hQ!_q#u02CvMU7OoV5q_k&}l89XE=%$5C!fY>$FZ30|%ZrpO)|dFW2yCw%KbOid z6GraJV)-Um}KZPxq?hP;Ws&r39Jru(+%M_(*uhZTP~9> z7~#9grwJ^I?fp;;U`g+iC!)WFJfLfTYeO!TqGdFTB%TRl%-rG z7=ya>64KI#CgP);kL)a#tmKN#PR8M}a}A2sKI?qsrul~Tn#sI z2%4fjF|y2g9lrgT>Ua{DX0PycFDrgB`J!+LVS0YPY0sa1iooFK7#aP`daQDf2>+fm zVT+4_AHD zzPsdQbT4TgAJzb-4S4zXzUO|qbRXAunTZ?qW8+`++^*gXLw;HoD?yM{(4hh8oeCYZ zCpx}ThfEHoyuSXO#^4GI5gVPMLvPbT3j1YxPZuo0-kkL7C~&%&&RZ8ejGao%fbI!CG^|55#t;*ZQfq3@beS|udIor z1AgQ4i3ciUbq|xB?o|6 zk;%y~2a=wIlAh*w8B!r&>YQ~2qS;Kuru!1x#Bt>55Ad{@9St+S9O7C86>m9+AM__n z)hb6E098)yZTRO!U>bLcp~zhw(sB9MB;Dx;@HTboDh-IOHayCF6D9XN6Mv$Hb8Sv%bZpYD)7?#8M3)k$A;p&;zsL2 zT5T-&BBTq9YYAz^ue;IH{V6E3udYYT3iC)C7jEP}Ku#&Vx69FBlw!Vg+3r9$sVLI! zuiIFU?Z&pV`SfFceA(Q;Os$$&!1B76Y|Cj9@;+^nuL2%MoMSq&!ery3qpk5f9E`1v z9gU|)apQSkzXAVKQgBC(=>P!+0H6vB03h)HM~V6Wb(8+Tlm%*9PV2%bOSQGC*-J9( zB;E4D0Z2k;$-07NN}MGH7SPh68DQh#XfYQmIGv3LwFmJauOZ1|rkwN>Hz(XjA^81r zGfHOe<9egOhuJBYoV8rq_z7lw7HVrwQ&$|@UvUF6Oi7 z0MdqoY1TkyXY--fe@U>FtV716J$|j8(L_{8d;MaVoNqPO3hSAIIl(Tp}xMut!9dW&6Sug_DIuVGVW-}asv8z zNwD{4Ub+iJ94R>&<6NCsGYGg2Zf_?{b_y8S%e}ilQw4LZbai;o>`(4!p(JXZkwIh! z!bo{%QuQk=L*gFVx26M!)=L28t{cB|-K--O{#I)@a?wAhd8x*0CiI!H(J}zF&Jztp z_pzF8dQ$V4PRCPU)BX=@CqP%DrsjhX+Z0Z|Ktn-1F#U=@Sv0y{^MmcFwrRwEVJbTH zxcugv?yNKr2}uGDWr06Q0w85j;0mD>m!qYs>L$UBH#tQaXH^Xcf^lVJ$^}IQlE)WR z-k$?}QT9Okr7&)OOLkeB@}XEKq)&@NYnB%qJlu_L#p?cczx#U`&@+(x1FjMSaHB z^c_XR9oF7a#`lJE46}yZsr-CXi^?dBA3yd0YvnrQUXs`nuZ@)+|6As<)}odeJ)Wc@)q-pU3;b?@ zRRAZAEptm|N+_3#LA&&@ zqtA#3?3f3zn-Fz85C>chKczzXE0<5{Nvbx$Sjh~SC`(W_QRoY0BV>QH+ABH>;eEYw za;IQxQNi{f&>=U)c?T>)t5^3Nw`?(*tJlxU1wEh|NYojo>Ch??Tm#G*M;jyRlcc z;%WH!uhDWz+)|$28%8KSuovs_{xmU#^d|~#9%+LYey}|%>Atv~g%i+5kIHY3U(AbV zen4i0?Qvp`ZIE~c!GnrnB=Sx+nKQILQ4g=W1LI^~kPR&pJLNWCke86%1Z1n+%z~II zzKHle!Sr!Ax^`EipZ$E0d=Jt6z6uPA%uRuRfSOF);Z0FhE>ff_Lk>B+SVA}>KT+pJ zJ;Y02uxGel-68HU>^sPISg5 zrUow7&b0O(I`INhzzhf>*RQDXUDgG^azWu+XcX0}3$T}04mB=&-zPFq^Rqf#j zUro2Xgz)N*?nMOoP?%i<*r>qpXHljd$E7Y3Z445FZg~gwp$=7f^ihe)6hh_6UdhxP zY`ozJiSH>=^;Z$kn82hclLpdpAjkQ8A#$mO#MRUnH*LE&@mtFkn3W)$0-yz3>$U&% z%=JbS2`?KThRB6)JQF@blrc#O$dyzD3s+<)LvvA-=yg7)sg%L5UxH1$D?4CSEq4}c zP?T6Ra~KJ2MK@G)EcqE&yu7HSX>e19XvT6kw~~q0WJ%A`IWP~wwZwH?6qA&diOOUw znU5eD+U8J}y0k&?$g#48bq2^|l78N67*0pnmc9L}UjOrujLI^_N#FngS~vgzKeoL8 z^^i6{uV`UtZD(Y~Wc>4*EgCj<>#PW0S9%WSyuy;kqz<8m5J&JjXegj_w){~A{0I<{ z((Bp}tckH%UvHZUo`jTRr@vLqbxz&wyxqIF8pZ5&7N-O4TA>1e1wTud+=%5?x{yL) z_H{Bw5z?r|*DiA(R~Rzp;`U{6gVT@*(2orwYY}3C^V)QPXj}>p6I7|f+%O$uQj==+ z)oL>UTK?5Us-`Xmq>BijYrC$sj=_-B=C5H?un;6Y3e^6Swkq{KBkfg zk|`Kx{K|RO)A%S#26|R6URa4d*4t=Oul|7o_!azV-a(z%A`VTKhF<68m{V!JE>4I` ztEyJr=kK5=wK{ z*0Gz$Q`;a}X7p)26@h(%mS|IpS-;W$R4>dLDPhkdSKa@yPRMnLf>qlWdkPo~MC){n zoKecD^_L+`-cCF2(%}A0{E!3a=1Xy?@)AY}WZ+np>Ue3)Sh8`A3E2nU9hFzIQMJe$ z=~$&=16`%SDvQW|PEuU-;s{3U?Q1ZZVLd5!0KCc=cf`yq7W4pnjg@LGP#VrtrvjBK zIA?R+hPewdl8~G9X8}cQ%h}A2~ zQsTHN*<0CL?cplr5(Z5PhI3L_vnu`a0PYmdY@)o;ap~*L(d}MtEWi*0{TX!V;T_1Z z7jt052e^C&HL4Q_`(?M2^6uZ6_VWJK9Z13rJBZSk!l%33U^D)rk-tB#4{NW>0k)H_ z#2Qz`*YL-5q7ZR)b0MepML>>kr;94RO*dA*o?P$XEjy!k<_ze^jta@8OHt;o>`$Eq z#taH0JN_~#j%d2}gd%yt#%=~GTVyok*U_lcpJ#jAZonGBtalM+|LL!ZWwm;~YP7~= zQ6BDfpH{Q9y5Gv#9#~x+r4mD%9s-H8GH;m|Pc83>BSpfAfU6sYS1C$NyND?-Ie08& z;4+v0^Ob!w9wWTY;$}SF8}P9ZUzKltm;SYU9r;GzYA@z!k7xj-63|IPo)mx|3LwTj z9;G7y{p=4*;WE&r??lCd1T=s@*Zdbr$J0GjG-awmXxaQ4!)@Zc9JHGsgRl~4QrISY z_KZrp9qHr^F04FcKE~C4qQQBZuK8YLUTUs@-&P5TW@u+9xub577>Z?+Gb!wxsVwD@ z#RmMcu@JzjnQ$vrD|MD741n|8M!(_OG;GG7ldw|awS9*8NxBGh?7*5tM}KjuIh zS9qZ7JmE6DZEq@b265u}M!{==68th#*WNw|l_jOB)-E3&s()(QWE+17r$wWpVaKG9 z2$y_(3$6Am_Tqw&2D&Wf5g8&nwFC3SrhMStP}swK(H1)bYziMVn3-Rky4PKzpa4!k zo)@MuKp+&@Kyq^O>0*bT6ey$y%Yo6*76?5kETXa_OY9q)sDeJexc~1w9=)z8TOlu* zcm|u$7k6z{;Z}t!5iAyoWj+M!M?{Kg&16Xk%i`9)4RA&N-!Ok$7{k{#{&1fIIh?S8 zdbNgcRaCPis#Y+6eeBi4Z5DxjuTEkN)+#_-Ddh(EGDQ>-5wAUZ8J9d>ERE)eoh2KU zMOpe8?P**whnUtMaViN0#Zt3lt7=lAX~vPcgD{d8Ed$M(NgxM>)|w@#Lsr||3hekF z8NylRG*r=g#c3A2y%q|rZtdXMHLlCGlrJ6;4dG*`?_45R=1uB1j?L}!EJAAix|+Et z#LQ#)l-DNOsW(%r$q?lmOzFi|kf7{)&Fgd+ysE>=0(TF7J9G%NgeJN$t@*I(bZ>`C zs~8?Pk`&NlI<16)PN~|LMzU8PyMh;%$n{(DoUYoP5^WgVE1Q2qN<0BOZVv{(F~u!w zdcoyHgXPtVJ&r!cGNIlh)j8avP+$Hv4Qr`WtXR?SLHK2^qW=ZeKyi&RpwFj27M`Z^ zni5f&&NNJi+y@w7L-lW5!yLQ%A^v`>G5EEaJ=|KB(l+x1n)N27^8^4#2Vw z0Y^%XG$$w~PJFmbJI~tg-2}+!=Rs{&M1v||Xp`*qr;6ZQtH+wvh?T6tGXK36sak~- zgXjoeXpz!-!&b+&(X?RMmW6XUvxNlef^tb|Oj-wry~=V4Z4cZSL-h3e6sO0x*=Wc1 zBVt9PEn1VBbDTjUS3o&rVFT-Nymzm$3{*T-=lKx&BF8U+IH9fw84gBCZCFv-FsrAX zH%Lq48o-)kAhcLyB-VAn*31N5o7fn=sZZeLx(X_aPkgI762=4+69_UXz+&;dm556ctNX-=onJo>!b#f zBWa&4>~wS+%{UR<*jT#iup#z4h7XJZd_f6Dbp;=4>_nApDQ5Ql?^2wb2xxe?Frm=S zYXg8#B8{SGuULj0A6bJw?i}}zzkfNYEt60;?xL&hJ!h?_J>=^bcDPxp7uAcMh{iX_ z4LIe56KeeflL>9ZZn3z>knrsYLS_LO(Ikh6;S9XfVr7fYA<03rTjcu{$ZpJ=b5fbI zmjN_qvI`61C-HXSlN<3Xw5X>YN_=a$`>Sr-{T$L(>BOkC+?!I?T=mgO#m@u-Gb048_o3#Sh4@uuCU=?GiPTg9QxE@~x6a}d8h9ehZa}QON36)m`tlvT z_%1406B~3bR1(L=XY#CD0 z=w=>`wXZJ;xQ;J-xu|TOrnoM-2=kiF?j+`QkNNq}toH9#4H3B{?k+7j0Gp@C;E7rB z{vprs#1whO^O5~4cgI6*InWSb*MkN;$jiS1>u{E1FS#t~F(m*N41thO4lsy8A zay+uJN~q_wlw`tUQeDS-SNeg+s}8|UY=0WqRteLuO9VX!83b^FisFHm-eJT9p9!xq zpAnnyGPo)25i)T%j5^b)6GzT14)K!flaUMO=Ug_UCA>VPH|TDTpM&eFre*p!FS;pq z6VP@O;QuE0{z)}e&n7~#Kd7q$2ms(G+x!<`aB;S+}d+_o%?fLaCY{`9~|jQdGZxla>*pV zu#7IW(G7h;Dm2B5@j5^2kH|x+8$0Xt`OG39ktkn@rs;IKUdV`>C@2CPGCTYFD68UF zNsB1)o^S94PSJfUFpkgDVV&=r*IxA~I`SaLNX?MNiO^>4v9s_oDB+Eb^hIuj0anU% z!7~d2o_yJHMbUH(nb^rP1GUwO7VkJ@m25y=s#Pklc3m)zgIxlKPPE1~D^hYReqxVe zWQ%o09!0G_x9bwUN}O$+oI;Y^(RH8Twa|_{Z%f-P5}y>L+sAKar#O@5uf8*2@3E&;QB5+9dSK z4$vbE+mKr*QTPR8xQD|dqVAFDmO(M&l`A>Vg#!0XB5;=!r8_v zPW%cs@j^resyxu-TkE_H#$!>4-NV}pu8_MeuIj|O-PuhvJ98$Gm{lq(I^^!=?M?vq zq(v30C6uv!RKwBX@vJXINL0~+pml!@YZ$9i)=rEJx@yby{>HJ06i(3sb}JZ@1lZkz>hKBnYf;q?0W;aUJ(T8`-qQbm7?6`?wMt;J&T`VrgucT zPcs5NRLw6K>+ax?L+SAu3__o3uQ*;>g+aGbA1|SNvsx~IGcCG@obCq*utMD(I1N0j zwoii_eG)| z`62_G>7HAH=*(}?2K#^pd&sDj`SGHk%&MIUXL!?L~ZCcSEvkK;L*S ztGN!|-Ju)m|MKK%tB8%%w7qIJ@!KKa3^SQ|N2Kxgotf-dq3^ zvON%*R#}7B%Bx9SHABQ^L^Y)*k>biHJMeD7;nskmAy%bI%9gZCkeuirzuvPQ(^+wS zd~ewl1ZM2|+p#g<6F_K^K#O0JM8&R9c&;(oPj*k#GN z0Z~LxlRE6>@p;1i4p4R7Rzx7-?U(1{VPUiqGdKi0D>K(pjtzg^>;64n^;K1;4a1ca zwZPV~f-P#HhI!bij?EokmHxmSP+6g`y8O%5$;NZ_@X+x&PxW!PLIa{sW@-C~NqkW< zc`IMQ!kHifFl`ZW`XwD%)9qLPJg=Ue#U0FLaYAu@4_U9^`VCBkS=rvta#xUh1eh`& zgbf}o-fZw&TMH)5#++6P8lUo4;;jzvoMKN6;HoHC9mXlHd+f02@Tj!RiN$3ixXz0@ zw#JRWWa*`2*E%sd(@4q$ncgT&7soiYu76xcMnlE6=^c09z|$ zn{YhXYZn9?ur<#g5vs5bWS}kMVR@SXZZY6?OK?Z0Ao}=D>@fNAw9jC@1sa*&6b;n! z`{9OudfVf!D%J(3QjuJ*mBSTYs4#W2o!5=Fcz{a zQb-ySW%d}+7y~goB(>MiZ-yANUW?Cep#sVa6fjiCf72u)G$T%3#fhiOSF-m4q&ual z61CRI=tY*(9-CSV&T{o_-TF6{O*7t;4X-#9R3>zgU4}O94Fl4(eno=irm7?Tbyr16 zTHooLzx#D+-|zX{GF9g*(aWbxrw3OXv}~oUM}0&N zAniHrOJfWm?sC_^EbeBJ(F3+<(&>}NmK^(s(2bjPi3MBDt@&~3#1~= zi3`#}Bl!1lor`3xe83tzquZ5hl5KXezy{nd{QzKix;TAXYrm}-_2i_NPJ-=WD+bk9 z%psABO*}z=suT$X&>f1)yue1sR?NJ&M{0jrHzau?1klWRDH3d`IiZ#l=&|&xn`!0% zW@VBE34;lP>OF0ss{oh+e$0hixqusok@O6!jn(209dU~PLuntNvgI-g>y`ty%|)W`9c%EfwEIr-Tl!bk!VkzO2alY*WB9aK8j0HmjwYNnX{*NUUTS7L z;FjRU5vW;Wrg_P&7Js2p755h??9~cdf9GG@6Bj^1#K*x~oSvOU9QyMU1b??w7LmiZ zYJW9VOMZ&AAbVn;=-^M9xjcma@}esiwJe)jnY3K(XIvKo5%?Zjohu_NiE*aYQf3eq zit4BqaN6rm;GnRRPc}b2JgOpT+Ne@h@y+>VFiL>dF)NZww4LAWn>6Txv`r*1n39Fm zE>finuuY$cn*sE?+1{e89%D}w!A`PDZZ}bCB+|N{!8^I1adKGZF0XlnTdK%}i7XeT zDltoSTPm?)K>Xu1C_099XrC+M0~A5d3EC)L_o6y=jSJ5T%LnOc|3M_C>|mQE!;}El z7z^4{jqh<*wkxOtC8Wo5B`g>1LV{0VSSfw;%;iY_)xbL^`Zg-Qnmz+yHvXWJO{;2O zxiX)CC_SDaF`cY6?`XWrbeNKiAX&nUD|QuDo;zi>d_ zybUXYMyppM#Wj-0R{4?r#V0M^9hYujTa18;Kr9>0ZfdVv>^vUvD2;!fQJToi7?g_& zrZ7eKVq|2Te45MAs4Q-@4q+L8#mIi|^u!Ui?wA7fWvYxwuVpF)4{78wm$}wo?cGWC zFd{7EL{U+N2aAAyHgb;{t5VmL&~!gPfl0?t2mVH58cTo0-{t^JN-LR45uvOn`#YPX zGyBY9*qE5-5mPKfN*O#y)@L{p?Zr@BSkA0gvnZsE2yt`eHh(0NKJ4RIJGTt)LFSUL zRJ_xnZ1vJ5kqlwk4ooIxKz78l_VrG)slFSl zxB_5?gn%^+@=>N=euBA|*(NvG4KUZd6+0N}Rl*6mGFZS1S_wR--eY%bs0XWpg=yii zr4(kqAUcW=YSMz`l2>L1dcWBNuu7RF3^(Bl?@-eNdJp)ONA-yBTrN9ef)T2c2{C7x zPb1%+cE#-GjOhY>Emk3yI9cq{s-yKuv9`r49F1j9*>E|dSjQvVi=|&9&bj-^%lkWq z%9z3C`=Y*HW@B+-$KTbfr2YI3QNnFCRT`jYq}!H8Lx^B5+Ha zE&&KDKo8_C9T24oV*DXLeY7n~EA_Kjrd?oQ=++(CE(vOVf^PQNkvi-=5C3xzg zk7U2NK5Pdgbvj@TD%;vmScf;m7COhtSEf_?;4jq>M!5djx%1HoNA;>*+u zz4HZhK7lxOXT476-ECW4hugmImsuUC72ju$RlN-{#$veNkxs)+Vj>QIx;3)}>Sj$* z%EW3S{|@VFz4fp80sQ9*O2#kB>!F_&6h0ULfM5S(Tl{Zg>_59=8`Vj>jbYS0%tRqt zks4!(Sd|tDEn=A_QR*T7-S!%*+18$CFjiwH~$&9iPPXk=LDxE|2SHU%SMiq4+;V+u7yYtdOtd9XIXr zya9^bz=RRSn5VPpiq%cgkJ&2?F62%|%f8*|Qb_OPm`UvJ(#6!>X~LeB46b_!Yuax! zPc~ixoN^M>?0q?!3+~g2*5&zgC*^k%z4Hp5T-}ny-jOm0k%K6%Mvl9v7D}fQUyX=d zb7_52KyS>(5PH|B(vch&C>ba45)I!Te18)g>*7|&u(`Rx9U5Ww(k<}08XbRe8m|7^PJo8^k`J< zW|ogRSv;E#vFkA4l9$Y>ks)R$J4+BuE5O6haZ{*cM(CuK@;UIPIirDH*|;%blxejB z9H3KL_C?pOP=LzLK$&Ghh~VU&ql+Bdix^dwEN62qa<`8$aHxbcNks?;89cna%vL53 zmSQi^4o#OP!{~fp_pWdqj@7g(=U4kXhEio6SFa(pJ)CaXVx~G`P5nkV3&+@DDYAO| z{M!B8KdY_S%)(?(G2t64o_R+~mIVCSAYQ1kW+P=OJa7Gr*0B)5#vM|!Dp9;t5EP*};SMT~~oVW|W9dIPM;hW%6Ne1;`7 zye|(BlhPSb17I&E4D22kj-UWNq&JWN`iB@%p#y>k8fo~s(4|pyxZM+3Qyu92Sg|8o zn%7jm*A#PZyH#us<1aY{=cC2fC%<2~&Ua8*OJUj#z7_mKCS<1w8{IgG9O{lQ`)D#L z{eYWFTay;nwTD`2Ct1AE-<}TL!HN@>rpOH|N?BCH@$OWunE6Smrs#opNmx@O2m13_ zj8p#t+X}+mP>wH?6Qf{BXo_3WS3Ms1;z7TS<0Re~+Lh{|D9i`HKqxmu++fCd^m=3>JMm=< zK0uQr1Ef}3zaT97W_$Y(;0T<;tMjg~udT8MbKWLS@PvAMBALFUu~yf?aj1%A-gtSm zyX}jmHZZe??68Alkp3L~)gLU4K8@bUR0yE6i{;jrWHga+6Qs7ym zxSrZq47iU#RL6j0o`t*WN5vQsI|?yEC{MB;wk3h=N%8P&S9{$M!QQzt{V@nOW~tHO zmEfoo;LVxuX@_@)?qP)e!^$~veR`L+?|j4Z!_aNTR!IH>W5m7UuCUGs9VuweZN|nI z+a2ZiKEZ6xai?U}3LRXiB;fJ82j9q{R;)upBWxRoAN^;gk>>k%au0fBF*u?5>35xZ zE6cfO8twc|??%iCXYm87f8Q=0?XKte`!j&PgzCmNcaw6|{vl;p6;Z9(bfI71UKxdL zp-P};N?w6`@E$MX>+<6+PSNNy5UNf?q^N@VJPcXp(c&(f4BM89b9I~i zM&`8>p~Vi~^C)v2=oRxCy^S?U4{pfcb4B<@XSe+K3M&6KPL|~t%#GxR!wDD!uRo&p zR&TgV#J4YF_vC3XEUkqvoM>17|kb^GlV>XM=_=^xXW`;Y&soo zx2Qc72NA+9@$KCCbr{S=*HI>VHIJEUYUy0{ZxhoYt1(;NhS|h*XBJjVntdZylcGk- z;g*qNz@5YxThg|eRHK4O_Z~TQdndCgjoRqPWWTXV-gk!1%%)~48nqHCrc%iQ#nm$H z20tQz#;nuq)yPc=jc1_FZGA2ncw}TTft3(>Dy>Z+b#UK^#Gq2<=a^)xQz9CxlX=iD zBHHE@Sv84dht9;c+2nP0$99?i9y4?SIe${Z>3flL`vjbqpb)zq%u~XdZ_iF^Wm+;R zz-6v6mDT3`r_a^B3umGx>$!VrtSUz*MZR`o-R0MTLb13HdA%2gFhIN=wigySd;W*N zJh8RD~Q&|7}6n4fY z)=qQ=&dw&b&K9mFdiHj9)+UZWjQ*mU)X&%uq32u;COc}Oaf}~= z$IfduXTnW6R%yu{uws}%?JA^bSq%wOw9rM>nJiY90I)SF1AvDQ)$#|zr@z=W(H>?r zlGMrCMIJ{4SO;u@rwbID(5=5yEr=kEbx8yeib2`HGzsHgBr1ShC`jK=U?aCYWz^Kv zfZ>^?V^83jCI}^hQCKCgIya5#NB4cn&lx!u%0V+XT?vdl;xrN?9G~yydZjy_N167z z@ybmUNHdeFI`LOc3Cz@2Kh?{fXenQi&v0KI&e7b2u`s$ShD|$R*P{>c@$izo`X;ZM zY8?p5DyyNI(>J*MKMVM*3T}IrPy^5!@IuUifSq#rNL6{{f9Z>9{NZ(<QW;AmiyF z9AexUHp<@v%%y-TMJf|bn4mPUXUWTf5ruEm#v<+dTcmp+=^2u=j^`7pV`tjMjz_dw zmnto!|Gt=55hvTGuD}ZA-HRhAFcezn`rUi$y}Q6YwS|$!zMyyZ z_eB*0`>R9VkEj&g-K`<)s)M4Z%0AmrN_rf6<8z9#m^|8x7yWlw4=aK8$g;L?-%Qcl z0PC9-URbROLiuK+S$gw=y}}JcyDC!xqZNEtWn|fr;XHp&;rHO=3ewviM_U#Qi-A>< zU2hZd2;Z4pY{?-6^|FWIjXMcZRO#!!oCXN2JUWNO4C(fT7g9xCBW_JS*=KQP%A3^(Rv3%Pc0(~F;OI# zeh7BjW_(=j}3*|KF5j?`UFdVdQN0Pp)afvWi=8 zzwPt`O~`kf8?IXMT7J;y2c_-MMkMHj$ZV#lSBGd8u3JqZ4Jmfbd4J3yB<&&LvWUbQ z6(l+48OR{aI6n9}Q;PWETPLnqdXPye$-AI@sp$|ebyrKtB$yRBvl+XbO-0r;+c2Wql(dy$Rz1lDHUT+ zKCCvG0khxkIB3$TS&Km-L{t^h2ky%?dbTwa`?iB)fArl|Rpo73hc#xsK2o!7K7<*P zp9G9~Uz)1)ny{$P$2;7w<%$f!zv}6J?#<#YQsVrP>vf-?UaIiHYuIGc1XPRrD7bJNs=VkeWn1!ff*XWeON+x zqx%7>6|m+(o$;k}4|*3CpcKs~y zJyEZ%rlpT7J+PHPpQ{44q@nKolnS)IuP+sW{X&}jEK(22r5at9z|18>GCd026PZoKx8OjoxWqPv>ezz0TPzJusOXOOGwey3wHck zhKb!^u&9L@(HqIxAn0MSf>&#Is$ly|zJ35Vwoh(4FSZQh&Bj)6lQb|n&c-NH#RRi3 z8YhPcSyg!jg{BQr!xbI@ey0IPG%muG`Z2 zy~uNK-I0JX2KsT?RO~#82N|WW5^H*GN((b>TpYwc9k;1;)n;3ov)+0Jar5>Pmx&=D zF8J|z<7E$u(0o+cK2Y|Iz>5W#MQfE-<&N?k+nv)i#^4ZDj_?{nzEC!9*GnLTKd}Qb zs*Pl-zdh@8aY5iL+!%~RlU$$-w&@Df!(+@O;efLXXYUTYd_23l;Jm`9ZW>eLoetw) zOKyA7HE-lGCI6zcbL&Gb&b2FZlsM`@7zBwhnW@ARP5B$DM438MR6h`1+F~qcf@H0#+Y1#l&ShFn98%R%L7unQBIy9I2wso z>bfaHttu7ow(|yexCRf9wiX>iOU?F zUk_9h0h|H%%_QQ?C`8UP{yqUGX$2Qf*?It0L5|1kXT&_tEp}9(-BGR8pl`uqVma3Q zV(if6#w;to5{a2CV+=aOR9Yxb-)X{gzxw9mx{O~LnD9?8%ed$a1PGzU?mw4f6&8hx zGqQB6`-YRyUd#{ub*k)CA0>F03c@O=18yW^TfN^R2^v%1_kaT7p4JdQ^86bfjX9?{ z=b6RL<+xZ?rjbx#NNh)Ehq%~3A|f01(#{|}gf|E9eQGq)A@6O<2*cDa((Af;xO6r5 z?23>>OIrxz%TTx~U#QIe0TZzc%l(O5fchgE0^);{0iSH$5H(;_H}!>A<|Od+!Ewq) z%cG$wC^2<$xdcA;^dm(LS3#_#?)I_Qb`UI6+#sI^7ke&K&}>o8M|O??9X{4}UkgK# zb(4Sb%<5-#%x*4QSmx!DL_ldo(nCC5Sy^w7iuF!jYb!qJ*3`CFvi+b{ zrWpAOf|72vk#GF1ea4J(gll0EBr>FIGia4=S}A zf!^O2I8rGtOs@{;L}-E{OblL2QFHmD2hgsL%_^}O{x?#>d5@}g?=m-}J)*N!jb2S_ zw_K@;7g;X72KVi6GOfOoASI@9VOhOi--6OAi~*EP3<2~qj+dGArkwFE8N%*9hJKc` zl`GqGa4&M?JV{ms#wkRsEY^c0E7O8FGuyR%fk%^KW!7W2+Sumx zN%d7$x8w9Z0ZRR|-#wtM-f`}WMtUV9%R>c#Gt;;H_Y+*L_aR;bI-+!KUna*ldJu^_ z-6istT(zPKnZGIJq=VwO7CUdCQ2x-5B#Zp>fLtA z9*iSlf)1aHfg%|h896?@uYXVVrlWF|qER&Y#FVxb$7j2-hx5DiAdEV$>G z=1`FqEowU$;U@P?BngxtyHW?!$7t7elrZi>%|=v@WRWKtS4@b8POVhrj(3EU?+itX-W@0Isgx`jjjTL0ryWPl zsDn;dI7(y|LPd?7-^d5*a6a`B77-{X5n-s55Kj=LMj%R?)pU5#QAnXhZv72tkQt!{ z!6`Jl;=%XU9s=ZD?PTiinU{8Y**YW{A%(dCsxRf`anT1hX6tT6vcZyE%h}${~CN9!89)g=`5)ujAO?Jm6q!h=E$c(ZCbC6V+Tl1TzWcZfCl@H6SVwER{4YTVhln`YJ^u=sducZ@ovjsmOF zqD#=qm)L2*KNn1J=0p?d0{*^?^ffFcL|5kmoWR4krt#s=9RiYH1DO0{L6_b8`MC~@3S9;mQ`UI6gvud^fteYa zqeyIvC$@;S2O43WrpN&>;pLa1#+sY<3x9A7kmJet!1y*_YuOG!qtf`5VyPBj&^Hl#fIq}-}t&R{MMg^}7a@!Za^MGo-)W0=h9rA%o;E1bZZ#F9Lpif5f4E^;n zH=Y?jj;fqFzN%8Oc)Ir)w?@163iKF2UR%G&eH+rzXBhJB@IyF!L}yh|24v!z+u&Y< zB9y@99p(9r|IW>*Hyh|LE0Bzu5L&2#+saSjqaOItmR50c-JXf{TTPwOd__p3ejL%} zkw5(~>CM-`Y?>L(dui;Y1(xEfWH7qx%(!BI{RJnqlD*xbo>H8_NO8w~>Vm_Ftv2N*DEZ7o2ij$C+- z1o(eK*6T2z7eY@Hy3s!>t%VKs1X^Ra_z?on=6VdkzYZu0B12o8bVrdeI7_KDvSxNZ#5Q2`RG?D%>HFk~E)sw$vYL%+ICV!RBJTzF{h=f?c0#Q6q-_s9F$ z6-A?j_UELz_2?~y@B!>}qtg&&l^h{Zc=lQ!io@VOIdAeqHO$@Ys=f`Nl)z!hJEhRk z3o~bOlx1pBS+8^{^gtQ44X-?zKe1o_1|Yqp@nTf}LL|JuS6F~th zreL_qOvZIf(m@@K7sKQWey-3MeW0HA^IrnyrY0N`P>$Nswq^z{h7F*ZWby-)H(b=L zUS|`DCOQ>K$F{K_PjyUMN%((Twit7v(9ZH?UuM|2gaAV9yKEM@sgh#2jDU`6?%vv! zpqnDo5)>Cd7>^bQ5#Q3bxCjUnrR8 z(!~~sxMRu_o+sV6osn@_NWBy=N)W7+cR25n(;b6x$_~4V;`-e5Z-Rz{M25^Y7*Be7 z%J|OH>w{@)(n|CDH@D8`WOCZ@=GUDf?t!X2s-_2hFTR2z0Y0Jjc4`Y@U_FNduIGY2 z?;^Lhkn?kZhGTm2lcgHpA*Qs%9VDpfCHe)VJCxM6 zbqrjk8_u-pa9GrFIT!yX!M-=*XS}7F{lkXIq|_>|zP6^{A<;8@K-TSJ10X;L*m+2Y zXHZMqorPRn6?SIT9<&gbKSG~URACXR2eWK&FtZNKy>vE2*Et$*^~ay?`q$dfp}|57 z&NG{XjjIW=guwqmg`9YsUMkRH=1~&?-rYbEtpOAX0vO7csn|GpdpgE(Bklzah=WFW2m{p zaOyqTfzVEep~+yW4s@}-L!y! zF=1g&8ujm>Q)--bAv2Nn{H{U6QWBpRA!n}-qu?t^uF}A95#EybR+$>hHW@Z~>yev0 zrTov|eO<*h`pk$0>({!l5eHNN??3^~ny>v^Y(=MQffJN^9;Y*8)I3O>3NZF2{tcTM zSB5lvuDDAwF_I2G5EH$6{`@<(e|D$I9#SuHWKO-9Y5g-t!Q>z(mqN85Vj_=KJ!w0U zDhh%GFjQqK1WFB<$c7b?;zn&`V*7lnkDc(^@qvd$O^5b)N!_colRF*$3ZzJf!J6cn zkwsr|tP>oA&u47r32@h>p2OXT7%xGCRU8i#>2@{GA4j$w{e)yf;S=VJ!-YgUr0%cX zb?5m?&~@kS_ra9xx4`rZDlGqrv-0zWH1FCy#q!NL9+Q9GMG7THLs0 zMNRBV1_OKh)zy@N*TLj4^(7kYPuHcxj-U+|`2e&4WHzAJHuU8PR_H+J<5_~B6XHIX|UMm6E93llThFuVA} zwdwugn=8qmY&osbIJ0kvQFI)v^<{)lccYI__d4N+`To-f)RZ*LA>PeoV@tstwH#}6*fc6Yxg##eXWTWE-vlA$$go%l_O z+jsUa_V*_;f3y!?_644sJisi}`jr&GqS zlGUjatU9Y{7Hm+9L()*x-6S|7uwwdXUo_jVcfbDnb$-X~;@k6Ey)(n@w*aAw|5PLq z`S3w>4ziS!+mQ451e%M8{3s`X2{95EF}#WCK^b?emz3NqC2yl#y1}&VjMu5t6?P@* zi*qcNpn#e?an#>2-j*U6*0sUn*)Ei>+OF9F(mqkH#E%e0kmerXL!l;4>jb83I92*e@ zGVd5Y3=ug}iw98fQj3Z-0T%P=_lAd--%-se#~Yr4U9X2z1abpM zvElU{$pfc5qtiJX9f(`*e#%#uwr*ZRWUFQ@Q{grCO!DTQ2&p*MIsG*Kn$A&9ypr`nxt7_I2^ChkZB>@;S3=%x%AD5UWP1S`mZp~D%@sS zpkKqhj1mXH4&v)nQqP5MlXp9bb+dD^Z=VNcRp@#?bo-s3JoD0d31m=%g_`rrYGjGJ zx`RoA+eYbSi>)t~+9SSQ`XC43`~IbWF;L4p8EnWAU$ChrkQzhxWD(SFN~?|07QNes zaI7Zrp1(h&INz2UanI&hm%rn#xn0tiP8>d6^U}uUcxX4yb#+dsN3vCF%nu+C76{Ld zAg&wwxjKRh`{NJ=L#9quirZX@7N*wXW2r~{%lQv;FoT}(ULQWU>xkV3dhu2oYVZLY zbqNnQym{_byk61;OB*-MpnIHlJj1K1g8(=ZvvL>y9IoJ&T5r3d1K zZCwkz@qvDoItl#V9M+ky>UG9|cEuES@W6r5r4;@`tTN5Y?YBmUbLXQkrqn-!_ASdO zr8Wud@dd)n08`U{L1nTGd2=uMvAwO0fF`$#Nj#(vJridMd*CoT(}N?t)IhKx zFd|t?H`nh$9xm)d&n?SpJ!0n`9zZP026wZ%Jw0GY0XFt2Xn1m58bsCamP+S?suxl0 z?>)M&UY{&~+NY7L=aE`h>NJJe6U-Wuz=Tua$0t_P4t=HtQQT?tI!fkQ*aGHxwk-t< zaBfcJ>!Qn;7!aGkGGg)NkXvR&j=>^ujj2lWBJ|<%J{A%&TQ#S{bKd!!A;<+TVmBxD zlmQ%h$iin70L2eD%G$+{Lz1_E76%x@0zk~cn!YJ@=xk_ITuy*Ds4tk@K9jdgJfF8obdQUbKw1cnuSx86ATWI( zNFIudvFIsCqsUOfw5{D=zd1ta^#);<0NRuee`#O_(ZM7(L!zEne5g{CKOdx z3~^i^L)PeK`2jle{R$OG*+aG>?@li}*d2;T=L(=X4w$C+V()p|&h_PqT8M!*D@tc% zc*QFfWf5FKY#QMg3iJtPKAiZ?FTqbJ_GwhK-?<5bsRViHt#M<4v40Am*yiN2!@YKz~JL#uB)oBP6eHUH4^;7c2fg z-^BcKkgu|w)v~lwgI;1@v_t(gvHm?w08L|FY|a{!cDfMAK2uZUZ)?h%jM|y1>|#zP zK4HZ?C1*L*sVdVR;=sVO&y>2fQm(sC9rp$Q?`n(%EYFwvA7NrPIsgFm|9(^dCnNUn zS9FZVdK|VoCjaq-kGJM6><&3!$9a@E#4GXYv+^z{xctXY7^(vn68JVvq3sbvMl)qB(YBSgGs|jm zu;;o1jHa4!y)CEOx|@yWa-UH93CcQY3CX#qhTSv=bCM3h#?Fy!pRB&MDjVuCKy!;v zB8RHnHd$kzF3+q}jG5RnvgA&P!h#+w_v2sI6kNNv2(N^8M}$T0+VzlO`|1~-Yxk=! zxHM%c5&H6WMe&BtnMpY4?sKmQY>^0hq3!mgc4|E({SaFyy=Lq!hjl$~)~ajsxr-BA zec(OhFsD5Rxd6&&YMn!@1n}&Wx|{qKK=!*q z(%t*q9G8Ki4Wq4Aj>UHOXJL-3O(>+uU_i)lpo4aW?!U&`h!V`*x`!Kwhet1CllGIH ze!KA4{JigCK6a7FVY|JQpa(;?8l;4oCKA6ONFl8$%h(t=4sEn0BmO!8sqAYk%>(Z# zX$>m%g$|_vZUA&`vLG{8+RfdYBwi`tWD_+ifi1{6nh~l7pecu(tJkVXNsb2&>k%~P zcB}fAK$xb@QboWU#I^|-pf(8{k=dilD_!A7!yVGTug^num>+mhJH!D?x^j#^KJ4_Y zC@R#g(_CXt*~Zhh4K^jwx=OFlHAmArk6Wz}ynfIPE$G6*`D15la0Pc@VwAD%P8xF9 zh9u&3$_{E%Von)%x|)P3B}@r?3vqd64xP`GXOmy6zK10goaVWNuCE&;G@oLHaSrD` z6LJQ@#1(*W2oh^`!JJi8f#}gFoVjw5@+|;Z7tY+~qxeW|WnRyN1VbzfQ#c+DkFw0Py;&98zT_is8)9mEk`(}6@T}ufo7#oY!LR)q z>Hed63QEXd+ZV*42^Cb@{Ppk_Mc+ctB{3?sY`U zL+44AUF(Hi!nX>SqEmB8;J8^dzr)(Wz*pbU9@#O9&3cv;+`R-HjNdN1d;5;T!fZC9 zYz)R53xkCL?2`*u5x|)XXw8M5d`zRz@6-lB!a8gXgI&_!+rsioc&HP2Q^tID*uCc!32R@ zHR#5*2`CQnaZj!iJ(NN`fs0NUv%KXENlG}?9bltSF1O1CJ9h{A+Z{*p<}EeG-@S^C z^}xD}s$Zfd$&&{%tFVSt&q(mGVU<1n0acvyCK6*(uRG_yGdi;VoOBd){*Yr8Tr zpEbK_g6)*mlsUb|d{GA^RmQRx3Q`>Ng;<9~{Cx3EN$=+I-e&ozCGp;X*WB|_pO*3A zCAE(PT7JGAmanJk)n56SQOAz#OLA8;6Hc3+CxeU>ef!9b<$!{uQPm?eM)l?S%D4-X zC8ouGHs=b)D@OH1v`m^^!?|Ib^{|><=2oxKM5g$eYetgaBULL6|-= z_8+7sIGz<+mZo)2WbSnTanw5xd9VGRh ziXBV$pRnDaynpK@Z!^XYFC7An*-wlb_D%3V*byfzXcrE6hDSp#$EPD z!6l8WO)n6kQWk3!kOFszldIelBkm&-=5%1PjDBaGoeRaq_Nuw$4Ps=}O}QDUJSc}!VSJ&_XtCp0_0b$Go8Ds)o@XD|S5Z9^NV|r;XmlMlvlJl7i zueV@&67|sb>=6>2CQZdtLK`6Ccvck)gu_&|rmsB_JeA#MNc1o?(1`_4Bq`3ozVNA; z4vk2DfjlRK88@ zDe`M77tsv1OKiofQOI}b*O>==V8mIC6=X=ElDr#muX^op+Z0CP=b%yyxBG$VD5#gT z*vGZRxf6cTI{nc#Y#yP_AYKb4614}KGUN=s{3|;L{;;*IjJjZuXJ5!=;9;E8A(AUA z{Yb_OO1zX(2(t(%jJ#0@+g9)}2@-?IgPT?WcCyMN6w0;SyDhoaJky|cGy?>hFO0oz zfOkH>us1{_=;Pm6;StD+q3xd>xxaWs02>gW#*{Js+{?aT{t$dypftF%;{pH;=RXB} zfL~L<@Tu!RZY@KFtvdxu7?8MElWe}r2?;=U+kTrQrCE~cdrK_Bh7y;Q#Xn?6z zZ{fDV%=;mmi>F|l$+WR#VeCgY&w8RM}2}&R8%5!GhedC^;Iaa|eC?+8+~wE`0;0gmP&C1_KxAOT-LFKr>_p z;9|^J_i+&Uk}ez8$RSFpX$>effGh!Mo)gw!;G|oLLp-26@=h2XMv7`wr;zZ z`EoSdZzDxhjVcxuLXh%y2MnN*rs;EqP-6?*sORziF(oNd;|zJse3=BQuvh@{Xsn>D zD~`3Y+5-2+ilghNG&7;&0>&(lPiYOX7NEoGqopp3zlZIB_~Zko23s=_C(#cDAL_KI z#qMrK7w=9jLf5Y#KOY^BLV!m0mgv-~k9ZBpM(nWZ*>g6D$xZT|pZiv8-^WM?{?D zjtEIM(!>!TSvapF-D9ip3bgmQDX zZ!?M0I2=p1?2?pxbLInxWrJG>^2LXB>%n5}oU`ixysJnBWgre(>xD^?lujGpQAw}_ z=uJ;-qDl=sR!!wGjbj3d9mm&`9?b^g{>aJ`Lj0NoT@>tL1p--cZq{_(WNHU~n15dN z*x|5X+(LJ~AYN9{A|i$v9o&hOIm(bTPSVGnA|@%n2``-1G#;=_9)Qn3w8P$racHij zx|*r^Jx!BY*PYaDsU|bda>?1+mjj=I0jwy@z%`I&Lt04as4H~KjN)6fVW#uWNkZRU z$X{3>03`{YA2L39+5KhqQoZft0Oq0;0HC2<>?9%s1vs~p5*QN{cL3smy6IRvO3(Qo zqT3dl=1UXNz2E!$8sti)wS?)ZwkrI*Wvs$odI%nKiYuF8syUuBin5!7R#Xx1?#NhC zf2==6iZ+(C|q+*I4-B^OA2Az$eeuJum;-?5MSAD8>`A75b0A9|VPf5$%m z3)}qfAgGc<+XiRZ^?Az64BtCEfkG-}xhc#2XTmobmc&t4#2u~DO?M?46a#Y`>hJ77 zq%k*7{bfcOku4gnz_1FB=7bZ%g|@=Ev1e84!LsCsM4_9(TI}j)!R6t-3 z%`lZ_y{iYwnb4Dn?UVd}Uc*wqtUurNeQFtb$wH8Cc9M{!_PA)$>R{P+xTEW7MU64< zQQQ}3k#kdIffktIW=3kZCsh2^iUAv#Rc}yjI5Dj;Z_=DE=y%#pdyR#$9kgRV^t1w7 zwTkU2g;vErm@4uo88y_?jhWx=k5u=>=cx>ctJ*4&=wf2`O_Tx1L7K?t%a&X`jTngi zhty1^R|#e9s0UYe4#?IIGRKPB%52Xvhl*P*ZP%pXtlp}UbH*lMeUoi(a1U-CQfiBA z46M1YgSx^>!(gWh9zcLAwV>;O0xPXs=S79V z|LVfuIjw2K5HT3P)%C4d(**fCXTq!^8@5+TsU&0xIw18#xn8N6)T}|9{&tr=jRM1; zCtB+nP{K2EWKUVTtS<2mr_?`Tc&L@j$qUNkW*IgH2uyi(-(Twp1+0;?ED<1F{a`m*WXTS7ZwJVH^m1WjvZkWKCqwqUjC4$KQVd;1DQek$ker z2o463l5S~H1ja7h)T^Z7U?yt|nVbRGM5B2Wp@&J(1aMimE*F(_(_jST`w*zaf4OiqIWmR$HQ*>y=uVdXU;alT;AS)Yjg&OxL z>S;#}o;+j;u#5=qjwI%9=q9%RXQ?V zTw#yW*93d;tDqPX9~%y3y!dP^QobI>iVZwfX}yatTBaKM=2jJ_?QaI8uAbi9{*5)3 z){K3VcH<38X$?5?0zeglyA{T<+&2PVTN^j4mYfP`1hbSUF3o5qT!9*HE+n$}?x`b7 zdl6DmV*A%8{0f-c6|6{RdpGC~4qFAJfuT-Ogc;3^KXf~4r`C;Fs-vMEX@F#$E7dQW zslP>~I`B%t_)jmM(t0Aet9h&nK|Hp)y-V9Ko>vbWZt1%Dryjjt%*UeA{gSH9sKiA* zgbHw1VP8;lU7XV{2+)(lD#ZnmJD?|>73Dp202f6S3;1YsDVn(VJ9^r`W6SuQVjtkN zul{H?l`#!B-7w7NVu)_S@#@N~`ym^k+o6$mHW$Dcj0~A0@VUb{8V{8h0BRp!2-9FZ z9_}vJ+Q?Lf!@@S;HQ!%1vE8Pr=3)0z5|h{baSzpkjK}3WgN4V2uD5>rm8pb$0WXOQ zaoB*O2+J*aLjm+fqOoc%oUecWDzeQgv{Y@L%v3ClU!G%}q6s`y-8RZ~xJja1mx$^T zRvvfwXte5s-N0Gx@K82SkiSy$$zB6-d0jt-U6o@Ii6s*-~rbRS5Wfw$W+!|2J41n_~+ z#hZMA!M^7=t{B*&@u~RMHN%2Gn{$;1Qb|d5{VbsbAQ|}kzg^1|= zImej5i4pV6yjs&uVM9Zq)uRk7H7%cL$LQ0prS7J6hh_Ih^Hi=N*K($`ucS-;@vI86 zJo*?h)9>j6*W)FTCyJtV#&YtgfzB&&42XM|XCWCTx0_$sj(vfr51?6w2trTtj`t%h zCHYH*$Hw4{#`#h`ETYCOH70mKoOFXoYN16)q5QSFWv9XoN9Vn2LWKS__Q_K?e5dc;>cS2et#B_ywVGK;$kJBGk zuCH?R*R&dAw% zdd$jiKLGq8sfM9;%p7bfYZ$GgT$p!t2fv5l#b3`gU%brrxk}UzkK4pFg-{)$`f*un zH_te)qL)w3tWZW_3!{V}DEKV5tShv|nv7g4$iU5#43ubqVYv80^NTcWZKJ-WBlIEFjYm zof~&coB;V})ylj_VJY$hHAc)GsLe6(?Ay6!^=$g}M_z@ds@t%V;v*{JiRr|s#fgA4 z5RwG<>{o6cdZI1u1ayrBfA*03v>S6(Raqm8nZ30fTfpK9D*kX&c4(04(n01a+YE9pY(fKTJ{aeCM0KJP0N*aGxvn#mOm~*=ioh>9**BZ|CONim zISWiB(hzfy?uyaIW&{oCk2rS)_>N@r#jMn$qz>g8opzQ5Hs6g6EgGrtgo=efKD98g zIBqFcf2CPoz|;%*dNusU(f^)MTyE1pC>7?*>_Ik#31gcc$DOvG=Y(A|oz}j5opl&^ z;F-1TVFU-I&^PKghD#n*kyE@%-WKqG4F7lvr{$N&> zAv4!TTzWSf`cC;KrH5&?qrdoqrsHO15<3NKc`dla|GwWj{(g!}-Tf#5+dRn8Ohe12 z&M_~D3i(7P#Us471dSLQfshP+4rOQa69LSk!+;CZw1$L#39+6DP@0gDJliawjI}=XgTu5MydGH z#s8R2nz?nQh5`&<(VvbbRIzTZ61$7$OP|QC@;bYu@4b=bWsJLNeH=<#8~QkX4Llofv3H6v ztfQfRk(qM^)uWN~3j>Yid@{$E$nAcC{ZH)o&-T~wECC1*C;-6Hk5}Cf_4R+%#s6A8 zrvKSKb+s}5$3JpXUDke`1EKR>9p-mblIp_1B~TP7h)rgkXaEUC&89LUM6}4F)_9Vr zQrV_{x3CtHXr$X-K47^p4Ximw*6{G|IHbeMOi=lw&C?)?=?Ht9Cvn(KAHB+(lT+RlX5}bp; zCo&4F2EoxJ!~%PMJ-zL3XV|nzH?@=Y~- zMnnD(1`~}#qT@`OOVKK0yJl>erl^QOS9biM9Ny~3wxa#~=Kk_>&=T_r$xO1qwu*7N zs)(@qq|(q*CQ;!S`}q^jR$vM58H&tW%}(10qb6GMw1<#Yhu7!pev=AQ=dMn8=ht>V zo~)M{cLdR4uAy5zzgPaS;#Yfb_c4w&UO=kk4v4x1Ahz7CCn)v>F|u8W(p$Jf-0TndaGySZjR2=>;Ofxy2{AzGXrK^w-bpUT{lOlg=EGgn7OeHw{q+(K- zPp4R9Vks_Ztb6~~UDk-og8G_4^2b9uZl@Z|Orv70?`!i0wOfe>4t#N&-JXZf6H6D^ z^YM@Z55Bjg#K8^k+fJN-0PPSZm}``>BSt`Y(fG)zb$G~Z)4**5)nZEqk01BX-a4@u zOen9f+gtC2%=$H)=X14JoEQi-QA!3>0qqAuuMbEnM=u!~bX~Er(j&;Q`oXX0VPy}H zb_|fYEfZE)yL?+1hpGZPB6ssMi=S?LQ`r1`I{qqL=ttkS{R-Rz>D?P$ zz>@ZHW@y&tZWv@3sXX1RMAwFW5N{$q=7eZ&D`(^)o0`O|1)7JOkLTKA?=~Tvv#4~b zy=4?d+^>11hIe{Z<+;Ljd*RBFuH^`6u8z$Zt;o~-M8s8{8_;clDJ_?#u^b3y9=^XS zib0et9%c+Rmkc(_T<{hlsyl+ZO3_BfkvToh2locD;tNG&JB0eMN71ss8?2`ebGC zlboTiY60B@?#}rZPjdxsLE}yJ3yHiSD)TDUsdYv1)$01_iMz6j*J(w~?13|s6f_Mf zW{@}$GiV=OjXl8`vPufHEmz#Z-zdr_w>QZ}>i0&cre~RFC6kIkW7uuCl10Kbth9&g zxDL31O-4$|pxDIeG*_%)EXegN%Ep$(X6!p|?Pl`L@2Vtow?xOPB+}ihQ^~p5aq@p; zSR=^K|BiyGQ}UF+c}E&tFJ3o{*-kUT#&bC@4cg7u*3unq?X{<6o(^tIeEqkalN(77 zler)B2CARdiTOX;=07PN|0JLM(>PURtv49{)i@7~l=tx!R)irrLD~psgy2mXBUz>* zSBQyY);GW2ewhAty9~QH)RnreiOgzpZ{X%Ue(`T_lM!1$FB9On%3ZPnx?~6WCLm!r znfaF`>>(9X_M1FgLlV;PGzMQ`@)`_2e4jv+PzyvG2M8ydPEn~71A;KbmvD553<3;y zC509IYx`?FzRiHbKFkmdprCt1+A#Of8Hl!{o$%6h=Iuv9rtmmkH4y_O1XE2j?L?%D zu$0fO!c_X%2i~Eq(lv1Q291tsrr(D(0c}qQtyTBIw#t2pwe<1<3!q2IV5I1adzuww zyDI2V7idpddSY*zBJAS)$up9Imd`b^yP1&7qh*$jCy^tAcDkj}?_aX93ZHylj zc{$#9Nh`kxNB^QwBB8I7Ap3#q56ATzK?BF#L`q1{lqnS)vyGj6%5C^jKh6D8uW&qS zN~*JX!kG0y{p3fPzKf9FG0({?BR6X4@I2gJZTj-nG8kznptwl-q zpHlFCuGXNVcR~o=5Y&S5wWtQ?wUEjU;KLHt`m;Ea5Pz_6T3vn7uHnQFBpIO>EmqH6 zzr`6;d5TxT!-5Afy$i(~WIgC@YjaL8408N}QY6pCKO z_y8z_vsk%DaZs7pT}D45aCA^MfE@{((~P$mP4%+gcdRxxQ-m)KCL?zCedvPiR;c|w zZMR<~d|(!*lQAKDG!#VA5LT6>^-+z7#EcVGoqt`5c}z~^ukC1wu312}PjfpYGvbI; z5o$Wj9B)SUWMXx_~siWl-=`wm6;jXwd!1Pd>0^Z^_~8zPU=1$vn00DGSQm(ZY|ndv|jx9>R%_``{`1D1A9zyV>#HsR+*MrZR)?IH-+!_G z{b!uQ(z(D)00sbf|9MWw_#fq9@BB|O{Kq_H8@2zhNFYSY>zuW+s=$7$FwK z;2jl(Oay0jp8g_1rGW$nX}yS(yQth2bV_CLs6I6xKgqlPc+Z{&9nqUNr7e#ikwJw0 zV+X!jXm17w{1k58q#=Bqi-GGph)zLc@*uNIvYQ(l(T2?~6#H@hQ&6033+piKS?M1% zS1tj}-1H&EYC`V{Pp4B&AwKZ6C;89zj(#^@5ShsracHg%m&B+S%lvxa?y>A+AVIjv z=INy(IE*+Xm}wrR4ymY2jEdKPbG#Lb#2sh#AUws*!r6jl(vi0E-W+iyGR!CK3@Mt?YPRy zkj`4!m&2Y0K3f|GiX)EBE9DlJQj6?>;C??Vv8as*OfjhPO$j-tgoaN~IP5+2KzK+x z*dXQnsCn_ZWen9{Rt#Iw30{#w$Yk6r7B@jBtb-)NChocm-_+WcE<>TDEXkE``88|w z2jhEjWB9lCu4i*j)z#MXIz|)k=-Sh%I~y@DbFbwGYVyLaFK?j@%Jbi~$qiB9&4Zz(n8xcum%a?IM}Op?gAI@QwgS8lar#t>dk zwM=VFo~YFcL%zWl%0)1O+2@|(bQy2t%r;i--;HHfULI7Zb%R9?$wMTQmtZY~Evo6E(i-ELrj|&ubX>+{LHn&p@zEzh^#m+Qlr6Suh~~ASkRz^Qvx` zVfQ^dvYf$MTb?dl5zan*_rTEM^7nwbakn_v*9{mmQ5|s}lcPRLs}*>`*rX7*JSNzj zb9L>HH(wP{@zDBlsS4U&w7jA z2>h?-_#X@Jbe}Z{$e*Q4lpjY7`u~28|Eqle|3ajHXTxGNCKETrP=Bz1wfXS*2)oAh#^h=XPBufCd$I6a7kJeU1#Ncnv>ZgKwhy~W8pYT z+LPKyJaA8&Q-L_;hnAWBG13!FD$f*>?p~}$T^;rM#$~BndS6_b5gxj9a*|Y@g|;>n z+fV4FBc;SQ;$h}O!b$N7=5GIC9_TGA)3rxye70}Bu1eFXjx`#+>fQ591Ei+WrdN=k zlOIY=iEmwoffu45A8t!~Se6!^^y*n|&-B7ts$PionBibrG!!hno^k=UzB?J3hy z^Dy!BDYqc$&5x{@l)lWRh*2tI-8^NcH6vByN>{(79JFhYWYQKa)<5yUanV>`BzMr< zPk>p*Fa@`9DS;`+-GRl!fa|tn>PzpALDdMO-_XxI{H)zc5bNU?#>?mDwQxGh+-yoN zx8#*=@vzefZm)AgpT&>7)#_Z#|J#tBk6DV#VAp5?djD}(I3%{bAaRCa{g4gEm5beK zXZ!JPU(0ByGvDFT)cX!7Ey=b%N&W>xr(yDMN~zYGvX4JZ2%*)>^v`DyvG$deY+r#m8f)&(jWnkt$Gm-QGW=#Vk*7WhIq(VDPOyBw_NlURQU6U6 z9~Y5uD$IIf=Gw5ZD}3&Dq_UjO8!616(x1C~5I$>CQmxmM0!#nk2)S~1f7%r_ZwR6i zWY^pH@-lFSGiK4~t{tsA;o2(6>6o_MlVQNF_P0gGStK(8GJl26dEn?bamP5{{Lq>P zKDKF!0GmY_U2m!`E$a-PIkK^~I3E}p2wV<^F*9qCTXewUXrwWMMeoqk3Lf`ObY7Rt zMAc$rb2jAr=ie++J9J{>5)Q4zqZB@{hL}z5o=+~EOf2)~Zm#h?{th^Z-u-0(NB=b_c%I-7}V0JfMXQSbrCNA~FEA zLh8sS(3md@$on3wQGv!Qv!}~kSNdoGJ5Otppb5~zmuDf=kj`lRQ8|R47>OW|08;Uu z8bM68M<_!^ML`sJaK}=;Ld!it?o)7hCz4K`zOq`86Xmc7nKwai`u$Xz^_-;jos&l{IE)q2xQjS) zz*P-R!roz8SMJ3w2-ZEn&~CG1Rz{+wI|a}6dP%9#qvEU<&B%1@m~8Rr(h0i8!I`yHsLLh&e*B-h+YF7Wr{&m(Lqqxs$(P;HgMzLmEz zhab@69Z2dK+*3wK@kYFH^<2v{pRy1G?4uR%J>J3fdy%%rXO>yGzTcYsqR$ul>Qe(byx$p4emYmolf*lRZ1b-fZXg9(5{RAWwnUJOAb zrpl~!8axJit)gYQT`_pCa2c**7n-KBIS%w{IYT1q;Z&}$!c@5u1WUFP2Dwt%Jo*k2 zooXI_cz2Z4BQct-`4JwL@;9u#Mn1Mf@7UID>wa=G*e zpTks99HsO4%7k8tS?qIP88iOmzEWne&vvNpfZbNHnZO;`} z+27W%Hm$;{nj#91yahEzSxtxej=$f50;1|97hTe=jKiK35ebPDjlPpbQ^R%B-=t ziAIf4MZN&%L0<)qoC%63%#70of)kb!W{xgdBwWdnX^cNCfXctIpVWTOWr4_lLFafg z%PwoJCI+NKZZCWIT)*>0EH42C#(jJ-Z{9$>3aa77o+DPB(b)w;j7S0!0(eM)pk9PS zr1WufnGy)NZqzq7MhFs+FUPb z(6v5_s8&q#P@m6V&VO8*zg?+vVOoC?PB>mttzp`P)v^dkvBmdJrv2VxTGbee0;TWm z(2_JQO4$Xibp~A~ba6TM_~^9B;;MkU_T!M+^#PvO6=obBJd=v6WGS>1G<^;@gW+3V z{K`3MZ3fwOH7rNUEM`ww!w`37L6vhG;%HNfs?TvJo1m(RNy$EwB>Zf{N-sG#8xXO_ zdak#6)uh1Ju2lTCwMQ8*KMMX~#q5@)T!)@lq&fU%|AP&1%(~AXZcZg}P8Mqc$qoto z0)Z4l{O6m!Zbd)qWA8kMGbk_Jr!|?gNT!|O-YLn{l}Tt3%g|@unx}FcnPS=H1{0ut zMN;+ezmJ?SebFckz5T36W}~u+S>U(bT~%ATUFv)8@ZHZN2ugOBR#8Xff{pEMr(%&2 z1Wx?%!@nDds#(Ey23jbDb+Xm8oI%;Wi023Hm)K;FNUG2t`Qm6f#@O#R-GxcZ)O;3l z?~-OH=7=u`oi#pN^8vPBLvoAexV?*dLI)Y@(F`Am@Cy30eTH3WL`#!WT@ohMOG0Cs zK*cRSlZ}`TBd{ytD*`fi>WIjlkFP1Fa)sGpPdS+HX>&yl%?kjz3W=@y$7=4RE~v|e zLfqOR6HGnswdYV{kKY{EFwtrDUAGv~s;6EBai@uGStb?~wac-fI|*mL4apa?ZO8oS zegudgqN8-@55A!P9APg(qIf;2WF@($%5?jpt)b49KeRR$0U+|k=I`EJ0o6w3Hcwp5&#)pz5p&gGWOHRCw zxG(mTEUx?6?{!Shcfo+wFI&HAkjO{Gm{*i@aKSaz{@siaO|qOMg4|+%YT3zA%7pO6 z1lBs3sgp$V2A5BQW7TTok=`OF$A_Iq;r8Lu6>&LksJYJ5WyvWsS81aAg8RrD68F0g z8kq6uHca49h~^z-cqR`q{VBE3M6hi5jip~d+4gW4JJl&HgjWCwQm7lQm+N`NLJ3vy z1by;R>2ilMV_N`LTykpyl1Ijh(CQ=9fn}_RNybf4<@PvdNi+g*|4(rx-%NAKSwFCW4V?$-hjZ;(S$ zm4mp^=eWiPs~YPByi2PWZVYNNnKx`l_xq!r*>6+f%yvZ*-U{VKCT(QS0{Bf8>`*tzHyI#mm;mhdmJRG=+C zbd=|vr287~#}^1_KECY;2<5C=@m1gFRis|mp%mzNM(JMIk~DIlykQfs0K@Qb!8XvE za4D~%f6Z4v0LkMPdgqgk3L!%iQq4fB)qh33@9g<0P#Ot>cAmbi15wJTWL3C_MTHa$;SX|G3b%V zGb;|505=mNJkLYg6Zo6=1fO&(dOpY}*{Sk@=R@gyT&%!wf$d&XRpC&VQk>X-q97x8~vz zIm_J(%0>Qw=VaNQUqOwXAzB;5&vNKjP?>ms;Hy_Lzi@5>&UC*ef&*#0FP~h{B1$s+Jv& zC|d56uD6_okaS|JCK3v3af@btWnrbV|Es@9No1Q%{E<7^qoUVJrp{CZt5{%j+ZTK6v*g5B(f9#>p>NrM%ykqpj1Klgj_n46liG?CPR{;ZiZ; zP*finS=%qRbt2QA$zuj-og{##Mn8{XItiU#H8QHKrYUo1>O+Y35-JT%>3-9!OnBzN zkldw zpu%)q!a4I4SFZgQge`YJE{A^nb!Nra#+2hYKGewq$U zRjD)PGtm8$cUW!pkN@FQH2@Mv~L?ZNv@k#6$V zriQS?Br3Lfaw*t_(L=}>s3s`|+F8Y;DMyR-7)l4*FN;7>@Hgl{UKs1&_h?qp)Tu$Y z7tn#M5d9dxPJpzYi+&WZ$nngXU_vVV1V-guGjB!iCJe}2DG?G6t$`)&_Y3&g3Ap@D z$fmEBW0IXl*qTQ8NSl3~C-{+t`pao}O#7n)#(vA+iYN5*Yw}2y zUIIY{FveLc6wr5Iw$io3(jF%NVkAef?_Iwi zWFKT4^<$onnr9~C2L)P613Vd2d_QT_KY-{O`1i0nx9}VeeAT{dz-{utR$o)Kk6`nj zax+04__7}XDNh1ZfwD8;C&+TraJCfHTC*x5WIgJ1ll8k4`ghF=YJlt%Pa6#*!~yMB zgIGy7gN=EC$GNwP#fYcydaIWlpFF#+UO&c?O$|HkFl-u+l0dUhyx>l41)Z1eBs?{! zX%A*QLX+0PTw^T5QvW^n`X`5yS-|lm4RBw!VE-kW{4Xquv4xYfqlKZ1^Z(@1j#Q-o z;8FgtpU(xAtI8)m$`krySdnbo%ysK`mGvWGT9Tj^NCp)vl8b(NNlvzs@GH{P_tVMa zTRU*X+4W|8-RbYp14P*wWCYPH8U|-;}6H%lj0jKHytgfS*6=zREZkKOQ z4OE>J8enCrx@Rt5klesjC4L=8)}u*j&Hw)RqoT>f4j}WZK22bG-Y*a8iz8a13X4=( z*coFpTg6g`O2Zc2jHO(V&pS(xnp8*x0p*sn5$d4gI5RKX2v${4*PL_t zb<+Bxkc+!ZvRCAm+AoylTZHH=m2&WwA{zU;ap#79$7`G$w0ZAom=Kz!Z!t8N(X~*a zmqeTZHHo8$5~RUGb?_uf5`;X}rG*53689EBgWXy(tbO+#8eqDg;pn-Feev7oKb>#uY zitehJE3OhN$zGs32%Z%x*0uz26*AY3TYJbAc7wzOH;b6}`MG(x-2IBwk91R3R577I zjVvow^s01}M`VlM-_*>FXsk^xC|tJ=-n`HAtb;^f{Bdc`Hsvmx)v1A_;P|Cjv;=2_ zxXMCOet9Ct(!D(sIk%~a*>fstol7@isbNz32@&c&qAW(RnpOr$6TIx_o`U1O$&b_Q zh#zD|4RcnlMuqxu;P+;-85q@KquFtDy18SqG>!!2E^~bA3}ETc{I)U7?koY|){&0l>?F#BqGU}^)4dSrS@!k!{BP%xyJ ztKB5@)Uy1=?tQ#jf}T1f>RZ0TUbT$;cmtcp5+UJ<{Q_)XS4VG5C{GSwYfOeDf}nVE zLB!nxN$yh*^A+B!y226Jtr1TF+9QHOfTFeir;umGkm6PaJKm2?lGFR6>EZ7nvdXuL=mg$2dyhAJKzx7 zk!f`sL?vXDdby-90?Cl)XAp4daNxiJ@IwSKXZ%{R3SJJhCTy`reYelouecKhz+fN# zKN1Hdn|~h=0S&$g8*343h(wJ~_mL0?J1$Q41;6NcatvAQa&!f8@CJOQFgqnS*A*fl zoJNeVrOz00`ygFX$k9nP0!JsWQ^ZWOPaF6^Vfg7b>uBg+>@I~GmAs@_I4~6bsH|pe z->|4=^t-9kVXszkJZx5gAx?0uHgC^4|DoceIp#{7*+1neosI&|)OhonKJJ>u*kCNz z${HD&q2GYUq!&yM!Vw{;A6S>=7@cYOi7k$wZDYwz*hv-OhdgoUy>8cyh^l=Vhq!i! zeck`{q1j$&p(C!1p0c zD3GhQcI`ZM*k^(&tTyygjnIfCypFE@bJ- zLi!ADlmZ<+H@ghBw5kaBKpldPp3iTcR$CsG*)=;3*xYh1HYrMEl|)8vs-lTw)DPiD zsqNwJ_;zwkM7{3V?@kvGor;F&6uno?Dod|~*U1z&+DljFb*mtQ#ofl3sRrg%1Xk_F z8zogzQ_RYjh6yKY$t(v*-c4l(t3Ds6uEF%NZ}?A#w9G84O##r*ea2@u8!+}4&mata z2oF|jPxIDmh>uHCV0o;SXCH-`B0} z{BtUO`x#!%2nfNb0C2T`gcbh6)Y@7AR2BX;o7SjL+x`)8xmH5D6^*i0zK@Rr{z8ij z$uI{dW_^zW23AwznjuvzpxBBbxZT5CtXfDh=@4`^kZQ^|!_P^q8&AEI)69WOJ5fdZ z(oaj=2H(`OP_veRqwytpg4e)>7JGn(edIXseT`+9mPy2E=z(|#6bti7+#p<`r8A2h|hgk}wPn!4&fq8z(C z389Kpd>UuoRSGm-LzVSArtJ)g*JAc%2#>X8ji#X-9#2xhi>$J%_|;qOUQzVSy(&xA z^l_YkCL^ESrrvFY7wQ#F({XGI;hv-Lu9@gm4EP{v1f5CrOA0Jr(?_sV;^Wee z!JdYLqdYY1lozLom{sDH7aJ46?ge~prn273@WZjEs5KYNta5{@;`j!^-q1(-Ub?8J zz+XAFpYf5o712i0k~ww#cZBP;MpO73$>p`>&&rbn4HV}6o zQCriB=#ve}C|!iyic#NgixRyd jV*6X|1M5u*aW>_f4Zq#>SNfUL;`wmjp|ItI zHif}wWvPp5+M!I6zb6bSAWxBps!0jSvK5tUov@BeUYHi*_YM{WHU7fo!eNQZKQ&M~ zu~GomV+a_*47hBUUzENhl>2#lSU#%|Of|6~$y+LkS?NV7NDbas0zRA^V^s|1xfO{( z_&c83wxzP2kyC^ggcNP8crHtHdErVs-mTlz$7sxbW>uXYRx@M7=+5or{sYRHznCSQ{jcS>P!+d7XGc=GOiCRoRvTM|Izq#WfBc?Yu` zIR-H-F|;~!qAwnqFBJ1zcGo{W-Wwm{cyM-c806;-fPsZboq-oXPMGc=<}~R2Z5$ds zHi-~_;zoNn?~d8JU9C)QfJmSP`5XkIt(`?ctHe%vd zPGAHMB@TOO<;qL_p?(AyCEQ+-6qw8Bv++Y&{Yf6c;dZzuV60dN^n@3I%Ub4*k&yA?$XXV%6-ogXM zMk57kyxs`~d8QD7zXfw0u)Wvl?S1Ext+$h;*!>blaN`|sIj8Y>zB!4z{jff%x_AjZ z_`ci!^ppgp>8jRYp1>m|FtycepA+0*Ufkf|)h*|ZhQ?-6s+t;AX zhIO1}9XC$p4&NdA)$1oprvv;`#bfRtxnuLS1V=(ES_tklbM{ZZ2Xh>e7&>*C!yH$ z?xXwk!~@0wr%C*aCcts?m_gu4$=weZ1Jt5>=-)#luqDW5 z-~9W+VU|4xQGDszn7lOx^%-cwKAlV%Oi~B#P?CEKsSYBVFdd|kt)8@{zXCg=p!$eI z5J*kMWybV(;ZJDFQs%~CmC!n~OELS!!nak?dcQ}_=yC6sSoaUsce!;BQ5qR5JYYAP zp9)O2J?~usn0;rFCiI)=uPXFM5X1t9D+gT;4Lsd`bXss+-?ruM)1x+a8QDFZNt#j< zZMLoaEok`1SqEA*9*hCV$7w?SeT8u{GB^2Cx8e8)Xjr52P3{l=_(Uyw*dVD9XH}S< z`cWPjvbePp3?&Rklq{pZHk+d{hwgsG?ZSK26u2c&d)fDn=gySpfH%--jhFIwJ#&02 zmbdWGZkT>)YUR9iyX6$Fx)yMi6&(k2`Q0H@FBI@q{2zp&T{RTh&~&WTT%#JP5G6I( zI*hJiRYPEJjeNk`9+)64H0ZWwysTdjSKqs@4>TH#I2r(8NL5?UqR1Oi4gJ6vE!3QV zy6vopWuCr9Dyaxl%DTz4bidWH*gbOY7^Xtckn+?wuF@)rl3zE79U0iRi6(l6^k#wf z;J7_8a})Y0cvR4Hr)?gT^H|O%lOY7g6XMd`M~VITv#odmZFR!2O{a!e`z{rltBrRW zPs~u*XcUS&qo|u?-H~a8f)fp~?)}I& z=Xb-~B+jD!?BhI}h-FTPOR$rY#R@IwY&a!HEQ>o~qAxrYYi^GP;bTPC>4x9-s%LAr z4EQmGXs;;BzIKpcs{N>P4IMKxb{DXv=X->1SXx!9Z>Hp~;jan&xLk5Jrs{wEC|86w_LU&Gf|MTjms0`QwFtR;w)DX~fQV}#` zeAxB@WL>oAs}NCJ)?N%C0jXkaIFXBq%7)hkJKN%tsn$kbx^v{D-WJwJD6jc<$-#6I$tB8)?yjeYnZ7EK>xAN|cpGYT`lDfGInIYN zMoy)hXfoP&VT>?;`L({!vL1QYAC!JLl^tfl=jQ_hM%X!E()z{pnLk0w}sJ8aZ+A)-S-w1U^bONt{RK`HWoDW41_GrCELx@u?9=qCdBUgz z+!RqxvxD8jbthKo2X0|gcNU5FBtzF16%|&|tQQz2+c%-A3szdkyXE=K1=9)r&Oy;2 zc2Jb~o0-gQ(_S{IaoOTIC0lgMPk9PN znREE#7CWOYc7|y*=it?4DBbs4pzBV)bTkAMfYNGm`_D}hG$X47Z?Vf>kgSCq(p3dU z1Eq+Xf%CxoOK5AMhB8?%Pr=FCqYofZooIGDqlqQ}_VTg~_{rsG{-9+Liab}x`RVxB zOV%$;TKN0%a%W!<;-oIrvYs8goNGS}?sv@Q&gg71YL49_mz)E}Ou=EsKqm++brtUy z$C4+ZK8M6*e(04uWo0k^;-4Mp)mK&{{j-73Or^yP8XJc)Z# zf&-$&_8M4c7r}AQDlc)_ZNdnb>}60pFNHFB@^Ll(9D$^~9ftWzKeQsm1U+z1)1a^2 z4*t$9w)BppMZ0Jf$_wJt2U`OCjpG*K0G)6vp)Ca6;Y)#oewz>y)$xaA|4(Rwl!a0K zYZ04Vx}R4Z5ePg<*l7rKDQYw8%b|x)lAkSk--#^&#T&qL)?!aJwDJVN%SDOe8@{x5 zS7`~|Ib-Zkvo>Lka|Noe3UN0)$u8fK-Vc91t@?02onu{*e}eq)^zjEwVqQ*Dx(0|; z%>%;3f8@mfwY~Z~Huvu#5=n4JpQBu%n0LF`Ce@sy;WzwM@CZ1s zo1m%dE-Po}`z9wbFCmn z5*77ft;$j?(`=i7FuQrXp`3@$3#!ZpnubGJ830)lor}t>03jR?wqsokNRp@`+KY?n$jOU&wMT z`Y4c%t`D(cH2X`CSq>IS_7N#Lz$sW-1G5B+UHwS}vp5oC*y8b6Y|Ca8|GSG`sD7pA zfMPn8{bsY)tR`FzocpE1Q^6uH0T6x>@K5|z1@{~7O{Soml zkqQ(^y&+cZveNd`n;mB&cH2}=GEDH;#1(G*YYQmjCz@kTu}X04N`dfmwHk9Q@$Zc} z7Ktn#8f(R7(EB;Fiv-PcMFqosFtJ)_nUI7_uPWP!l4_nDMX$6{XW=W9r13n?^Pso$ zJx#!S^!-z)dU-7XaZm6b6A%cI*DS5 zFc7=egsaz%y;#*kw{kJ3y;uOlE}RI4$pD2tiQ_EuX}f<^W;UGqqYG8^GmY_spfsI6 z-E>YeJch>xkK6bW80PJMBC)GiuHdHvMJNTDO=qDan_y?7)m zv-mZVq;%=GhVd2)9H~=8dypuOxhm(=%g$Mj1s~1k3l<&5z`&;x{&|nVBgrk=ihavzu6}^+nT3ByzxC$v6}Xy_}`xCmMlt`x;f236iNAkc!-R~M9(nyWcOnFC3-ZDxh@Np4U2j1 zQATb0$gdvm>WjsVFF03`aqED?VtHvYLhxTs1yZH^-6Li+n8 z7Enli(P#~9oK9Y5lbmQUNxk4OAW>)~R!~8vrW;cIN@(f|4FgL9*`K)yBDbh=q&&k% z6ZiV%H-VFsifrdgb<_{u)spWi-CE|~VR@PLo>sO7Hno%b{&8u3uTfX8&WLYYUaAs1;jzD)OW`Pnvt zdw(L+_w{(>Iv1&q5`CD{y$#Gqf=hbV;kxEB{^N`Sfd@BN>&HPdi_M8~TN_RxzL}@$#Mm-)K{3hL;dtoMCsmE*@Xv;6TmtSAm%#OlL>DaFG3=U7 zy>WNKl&msa{D+ba95m`uV9;BEP*J)q2H7T|i7LD{wVdaZyjJeTtR%^LRB3`#amhNR zAFjET_A5<;LSVYwlkfVNB8;{3k2wW0j+YczrBd!S7YxvDDw!h06&p+l5E*FY60W#9 zSr;cQ`_1h0xs*7MORlMqR4dwKATB1N+5E&Xv>%2hW#NVmt;B^idND=C9O|6veS164 z=HDJYtwixh*06t`2Q)4X6xjBQPKR3J8+`KWMolwEX*%Qob~zU4lFe$KP{uhH%k%B> zW=$2wA_vZPG~x9E0~JKi6wZ=ulNh9Be%TvaBX)bS)HBtX3tF4+II>eseWRWl)cL)C zeEX3#g~&eJ4%BL;gyD_>mPu>oWo}B{=2Qm8?rw?Wv@Aums&r4*yVTHEyEp?`~5lqrGiU_mS}3Gz&tk+eZ3O8>V1{wjaauT7Nf-G z?0uiX1#5kehI}nK$u2M+%mWIyI1t#c-GB%fecj$koZXi9-OMFatvwiLOO*AKQJX44 z&a)qm0El)1U;ZuqV-6hRS0c3QciSX!Js&{YY8T#dgvw@sJ^>Xb8u+KQs|*3^173PR z`UMJdKdS-%QSwEf7uRt&?i+SsIQXa0Pg2+w$;U%%d&JM3eqt!x-qwe}9s-`SaI{E_ z`yex!y^5Bl7=N1P2;;ND9)G1U&PRae;6Xvl7n6SNwSD#F{~{WF*?N7AA*elF&bsLNaY0tb zO<4?@OFmSrV)oFA5W*ZO19+ZzT?XA`+*fI$h7NUqWzh!`gaT~2S2c7hdW@t_$k&!b){S0d> zP~JF5EQ7xYAeGW4N@IJ_7P{i>ytfkO)kB@Tc8d*Vpl(M}kH3kig{!&QOv7x1bBYPs z*c#mwU7bR4YKZ_sF}~-R2|0ux&nWw~{MOeVh)9Z18y(4Us9D0X<#(pEHxsFtmlUlG zDWQ0jS*?*jgDuW2meO(=M&txbbj)V9p%o$rE42BlQR!GO*+ikJqi;K(iTGeG@N24p z!9?mfxqqTv2J&v&=~`&fT%Sy-emTw;qq&ZyJO%IA-`nj@NRxv2B@wT%2!&M@at@Sr z7vblu-%LIB(Lle8BHmC%p|W6(2!M6~9rEDi@CQk@;J**=fet(1Q4&4ffSs_^EX}>d9MTul-Ia|mV@yNug1&drU+0L&@zbW16{4^;vA&hKnB3P zW>k#c%6v?(`s}jGSH%%bcM42>S8nNVGfv#4R^`|Qw+q5xDOh!k)(%8|SgHMaZR2u#pnF07>B>IpPT-pYExPsDsU}cOaY#5 z{CW*RgLWuUE43mr^Mm3i}X5KmD#% z<0W;HAVCR0rJWXTntaMjuD%*R#r*L4*0g}30ObHFMQOA!IX^Fgl;)aG^K**ultY&o zPd|qzQR)Nl8@pR2I_)8C_ht$9{Cfk>B1;yl@&>MxA!RhTibelPOi4YiZ<21o*;o7o zTesC^vgdV=_xHg?8=02s`drmCd2KdE!mYI=1KV3w7i5VKq=<}HlCc>;QjMo6yn4%|zj!rO$XC_UblUg@iYizQx#Z4fKjv{48}HJ3)g zkr7CZiY38UTbY?XIMNNNs#_3`tL8D@=s8x; z-xXJy`EXro1sJYN(vi4&Wf%tCQuG14IBOk{G!bFlQ>`KNi8g(Rfo3Mi_Zmd`Nh+N| z1?>9ugeV!)8FD?gJi)F=Tra~j1^N@tMAvBCk3M`^OA)N#K;p${~iw&f5| z2VU1+y^FgDf|Hvl^gJylL>kNjq>%U@K@(IQhIh8$3k7gZNiRf)d{u#WBYY0&CfMrl zC=diu@(4V29Ck_LKkWGLtyl3?@Jwx@66@sTZz2r6mX7tT=n>~g@s^d*8^@&gJ>DM7`> z*&_nDxeRv0Y)U0(Vo3>hB3r0Y8;Lv|w}F6UMkd`=`XEhT%k-GKvGHUAdiKChpQVEd z)Q-p=NLxqW#R#j4vqH8iRU;SU#pl=~|J9RWa8&3nB!hZu!r#2)RFz{)clSBN~kx{FJH5yzG)T0f~ zZE2iN*5zgGy9i&})!gY>*(zhCl^p&AHO3*%kwi{8Ca+RTV!g=)oRn{AnzE34u zz&WJLhr`^w%U>k4E||;C!}el#J}S370;GTtt#1sI(#GNIo*`?w!zMkIsGGoFUwvT55 z%Z#ZVC$k*SFU1ImAWIJ=Nv-I`wam6$H|`jiJaxUOqXPsSyFOqV=Hsr>@W>6}aB-TO zlBmaapx0Q<=+S4A9I7&~CXf`tKPj0^zEKLZC5R7XWH?+SA19c`yL)4bG8{5;8X}-X zt81L{#dXn0K9en&=l05;GH0;zay9i06M7;NQ-GzUKMl0GI6%p428Yn*=!ubT3EaSf zu^-XiwwXyKU%fE{*(|Pxvb=C{l*?Kxl9B=T)TY2}dYNZGFIOxR>Wbv@TA^LAna_dx zVF)cJ7eK9dIRh$#c7!ULgCk4~kP3UlA^S2$1erP=$q3qSFXBIvLJpGL6iaRlaOZ)} z)CqW#t$*nizD8#=qT0f`PhodVoKWToBCIeD9Y^Ynwn3QziD#phDfe~y~*GLfx>Qwa!O^lTr8#qX!L#ylW}ppX$K#oz}0tfm1puoo!s5J4ljdRmluP)x^@tuSyh6q342z3Pol0v{CCM^!v4;_~#z%OM^ zy9Hcg7Re71ei?gzOgwuz)|8l(@?+MA-_l|Mc{(7q$n?VCx%o8YgX>)o4+JZ${De$! z0~+TCG8WPy?GB2@&3+8L`@y?SL`ox)xUsP2%=BiZUMb_H^5vjFRMlYEw_?gc37vZ+ z+_tJHZZ`HCdC?fp9?el%%OqopBMV_H`Y<%Pza)MCUHOn(Qh9jqXKU5;+wFM$=NV!PQF#GZPU`fI zVC}Z3za37-{87rSmy(I}gT!c)=C6yNjSRurylIz@G6FOPwbUbb(?ky~ovQ4a>($Mt zZ@bp`pI|G!VWmbk*NEUT1mZQUFQ&WK2U4h($jx*w|H-`>&R^2`6>9VtuO z5sM;SU3siyh1Fy3U%(Cv4?-n=5rPf-BDgN09ghe`nOIW7nsLQ(fXCn3^{sLK`7m6s zA5oo7;sx?Om9u6T_-GV)z@Xyn7s)6Gijhl)=cdZlZ+tv9V*iW{z7Ajx7#vB0KV!-0ZKXs;fN;0A(%jot7pS@NYBGLXC%Ng_w^K(xL zigOy8yaP1hm`)@W5-BHJ(AkOJj20w=+&+hXoUcP4FJ#OXQq+cp?Uax=sS>IsB4saw z%9}LqhMPw!%pp|6*^qsrCBaC?edm{;x2C)6Is&ZP)P=-LIFn%w2wb~d3q*m|tf5Ui zZd{^~N)QHyNX^iS9wGv{1(_>?XyvqPDgt>~UbgSAc>Z^5@kdW?)Fe|+@mXL6Pp|iG zC{DSsioNcDurV?KcjlQ=nc0(SK!=VBsdL1WX2?NZIC1wG=w}{iw?FwI!%wBcUPd@G zX3KO3q!GTH2CN?8()6xyzn!6K(M;ncnYEjZMi8}D^z--186d3>eZG1&^ML)#e0J2{ z@{D@DS&O818Frg7tSrO67T;#^ToEN>^yd3;o;Omn{I7LR5s+C^M@LsYFGg+p8J6tI z%E(IY;%gLaA)ptu3C0nWn}XJ{u;f^$fiX%`=EyNdUl85pV#tiPT)znE1sk!RQC7ID z5KN*fhnr!j8&2J1+JwV+;2`_Lwu!7*h}iu|y+fFL*rQ&FFwe=csUd-<=C&!F`=(lh zfq%;%4VidXIP;<(d`=k0;l~!ybc@jvaDJ_0bFhP>_!F0>ZZs*3RcIpbLF@7Iv)MtP z9-_IS2dRIQPaX*!=jajfu@ZEZmwa1D*lxb#T9-SGmIFh_8X$k>TsfH8s>Wns<3U8F zl^dSeQLY`>os92&9l7|VM(F>%+fs1Zf~kDWxuMC|KivvlDg5qSWOIBhuvWOZmeRe2 zk?ZjGyRf^a{c^HQ*~boq+lb=TzLM!|U=E;lHSC{3J{7)}qQzG`#inTUpcm0u zFrc??Jq9}i%WO4uCX7 zr~e*I{VR3<&siZwW%hq6)AYJ@xh0`V<5?)rV7?$%y@a5FL>9_RA>vkQdkR>q)Uf47 zG@V|pXXvQaw(xOMoPHJ3<{UwRr1}L=6TE< zRA+VJ><8yU*{JH|slLmh06;g>>EMpkK)%yo4-v*!f{*0X4F?I_Pj4Vzzbf_7(zy^x zJ58TwSjaY0s6IH`7%J}J6on)w%tu?utoP}>|FTUk4A!x25*8V9&0rvfk3z(=nt7Fd z-B-{VYx}dIa|edFqnCb&MJsBscuCjBukpyvrnbt{rza&up)j7grvp$@#(_GIU}dqF3Yzd{IP8 zmMO>cVNB&E8q?k?ZkZiI3CeoNVpblT3qGtc*BGJE!a}yZ;@}BO9-P0^8^|-tdk3=_ z!-(ez4P3thq>UE_$*?4uc686@(YMZ6&? zx9Q{p4y*C^nzDRcd7bmn_*e3{v^?P(Vwk;g$}oRI7^rcE9B8Pmn{1z{Sa2}3x?!WF zU$$}kTNew=t`PDk<5_9<3BWs zZLhY&PIefr%&Y|B2*@%P5@2$vcEYVUT}O3MrH9&jLo7PJs`Q0X(%_M}fR}HJbJ~9Z z_c}NT(dg^p7tAlsD+rldkLtQ!y(3bfzK9MGr$h_>09|1PJf1?tstc09ODK@ZT1UP3%o=5Ur2v)Fe<&CK zYRv&dfNzZroDG~z0B+l#13eo9TLUu_M*uGJKdm@(iNXDOt^|P9?GK^rU#&TS5a0;^ z+X{fz)yzci|2zFJX0BU6BMku;f=&9LW+DLJ{@aYJg|Ug<|BsnJZ`pEm_P`$+NjO0H zz@KXHzglwu5#S&7Z!`Y@yZrtB{<#AG{a*iA7d-4uR|l-*Du6rtL&Ne{`#+7HTSyd9 z7{_NrAq7M4`)1y_s6cj7i*!-6$m<;x{kAHoL3Z8~ZUEhk)1(0iQzvIcEIvUz=g;;{KU?LrTcsS z;NtB=Imv9n{q_T6u)~MW-B3qj>(1A>i~U4yWD162PL9z~G^FlkZoEH~ID)=RJ}T>$ zeSQpPxR|)x`LQ6OdOR-AZJ5K6Cv@ld<1yH=r+~9Ll4Y+N?$4WnvGo!DA)s}b#$Y#g zM?g_;3?=?DFA7vCax^`EQ_Cq_LIoHP#St~Phqexq+Eyh2-M42ssR}KpdFS`7S(&&w zXUEG=2r(}yf+T|;EDnWBR2Sa$A^)1=aK8hlS+{|o5t{46;+VP9o>|Y{OuvqV-@bI zDFS-ZR~4F<&Yk?Ae&k{|q-cZ`^9Ay(K@~YFsWX#g*Q&g}W7!DqbfgyUvesc5D~7<- zOZ|b`G;atm(xHmI{^BZKw~twblRDRE(NUhZCMmk`gMv)s>WIZ*`_~8baV}}7a zu^{^F#sKSLJ=77!rjH_t#MZa^QQQus{7R%+I>q2@8Zi?s5}z`*?m0_F_`Qjd-!~E5 zW~`zl-y`W^8tv1ReAaDuu)OOf17Dk?1S{llwm_mJ@2$ctTwZ2bh>rIN{pe0B^8G;G zX@EicSIQ8pYMvM-;)`6To?$*@NEp5ncnIA@^2EEz;g6^RrQRk^-0qjcs<6h3#T zOwPI!xLjtC!KdKeYxF9M>Mp=7`P}hxc{PTQY;eKzvbr0PYe6vsOE$Y1pmICYNhZkU zRTw=p-!uBTpPxA|29FTr8WD_edpjl2iIw)&^0H{w+EYYZa}zM^0D09L1NeLt!2L#H z2gnPL7$B*P0_e9)u{FsqdEEhPy>mBh&8HQwrE_EmS)Q)S$n{k!GS%3fDo=M|)TaF^ xDyO|WSpKDmf$tb9Fc%Ydu>7T&fuA*~z%*7eH5ShWnwEjT`;#=S_b3K)?Js^rns)#I literal 0 HcmV?d00001 diff --git a/src/jepa.egg-info/PKG-INFO b/src/jepa.egg-info/PKG-INFO new file mode 100644 index 00000000..0f8fd1dc --- /dev/null +++ b/src/jepa.egg-info/PKG-INFO @@ -0,0 +1,19 @@ +Metadata-Version: 2.1 +Name: jepa +Version: 0.0.1 +Summary: JEPA research code. +Requires-Python: >=3.9 +License-File: LICENSE +Requires-Dist: torch>=2 +Requires-Dist: torchvision +Requires-Dist: pyyaml +Requires-Dist: numpy +Requires-Dist: opencv-python +Requires-Dist: submitit +Requires-Dist: braceexpand +Requires-Dist: webdataset +Requires-Dist: timm +Requires-Dist: decord +Requires-Dist: pandas +Requires-Dist: einops +Requires-Dist: beartype diff --git a/src/jepa.egg-info/SOURCES.txt b/src/jepa.egg-info/SOURCES.txt new file mode 100644 index 00000000..9c7e7cd5 --- /dev/null +++ b/src/jepa.egg-info/SOURCES.txt @@ -0,0 +1,33 @@ +LICENSE +README.md +setup.py +src/datasets/data_manager.py +src/datasets/image_dataset.py +src/datasets/video_dataset.py +src/datasets/utils/weighted_sampler.py +src/datasets/utils/video/functional.py +src/datasets/utils/video/randaugment.py +src/datasets/utils/video/randerase.py +src/datasets/utils/video/transforms.py +src/datasets/utils/video/volume_transforms.py +src/jepa.egg-info/PKG-INFO +src/jepa.egg-info/SOURCES.txt +src/jepa.egg-info/dependency_links.txt +src/jepa.egg-info/requires.txt +src/jepa.egg-info/top_level.txt +src/masks/default.py +src/masks/multiblock3d.py +src/masks/random_tube.py +src/masks/utils.py +src/models/attentive_pooler.py +src/models/predictor.py +src/models/vision_transformer.py +src/models/utils/modules.py +src/models/utils/multimask.py +src/models/utils/patch_embed.py +src/models/utils/pos_embs.py +src/utils/distributed.py +src/utils/logging.py +src/utils/monitoring.py +src/utils/schedulers.py +src/utils/tensors.py \ No newline at end of file diff --git a/src/jepa.egg-info/dependency_links.txt b/src/jepa.egg-info/dependency_links.txt new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/src/jepa.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/src/jepa.egg-info/requires.txt b/src/jepa.egg-info/requires.txt new file mode 100644 index 00000000..d2970710 --- /dev/null +++ b/src/jepa.egg-info/requires.txt @@ -0,0 +1,13 @@ +torch>=2 +torchvision +pyyaml +numpy +opencv-python +submitit +braceexpand +webdataset +timm +decord +pandas +einops +beartype diff --git a/src/jepa.egg-info/top_level.txt b/src/jepa.egg-info/top_level.txt new file mode 100644 index 00000000..b421b2a2 --- /dev/null +++ b/src/jepa.egg-info/top_level.txt @@ -0,0 +1,4 @@ +datasets +masks +models +utils From 666607102045119821660ac7fc0f632f7eae6421 Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Thu, 7 Mar 2024 16:08:03 +0000 Subject: [PATCH 02/10] update /.gitignore: ignore all .csv files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 3bb2efd7..cf5a1cd9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .*.swp *.pyc +*.csv From 84d4528aabd09d3c2367645de4bfae492b079cb8 Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Thu, 7 Mar 2024 16:12:11 +0000 Subject: [PATCH 03/10] add save_paths_csv.py: can read dataset directory and save it into .csv file --- data_csv/save_paths_csv.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 data_csv/save_paths_csv.py diff --git a/data_csv/save_paths_csv.py b/data_csv/save_paths_csv.py new file mode 100644 index 00000000..e6b30631 --- /dev/null +++ b/data_csv/save_paths_csv.py @@ -0,0 +1,25 @@ +import glob +import pandas as pd +import random +import os + +# 指定目录路径 +directory_path = '/beacon/data01/chengjie.zheng001/00_datasets/kinetics-dataset/k700-2020/val' + +# 使用 glob 查找所有的 mp4 文件 +mp4_files = glob.glob(os.path.join(directory_path, '**/*.mp4'), recursive=True) + +# 生成与文件列表相同长度的随机整数列表 +random_integers = '$'+str([random.randint(0, 700) for _ in mp4_files]) +# _ 代表的是对 mp4_files 列表中的每个元素进行迭代,但实际上我们不需要在迭代过程中使用这些元素的值。目的仅仅是为了确保生成与 mp4_files 列表长度相同的随机整数列表。 +# 使用 _ 作为变量名称是一种约定,表明该变量是暂时或不被使用的,这有助于提高代码的可读性,让读者知道这个变量在循环体内部没有被用到 + +# 创建一个 DataFrame +df = pd.DataFrame({ + 'FilePath': mp4_files, + 'RandomInteger': random_integers +}) + +# 保存到 CSV 文件 +csv_file_path = '/beacon/data01/chengjie.zheng001/code/MGH/jepa/data_csv/k700_val.csv' +df.to_csv(csv_file_path, index=False) \ No newline at end of file From 38b0dc8ec8eedb5349831d1a724fe384d2d4317f Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Wed, 20 Mar 2024 06:53:11 +0000 Subject: [PATCH 04/10] temp: change 4 files --- configs/pretrain/vitl16.yaml | 6 +-- data_csv/save_paths_csv.py | 4 +- logs/params-pretrain.yaml | 88 +++++++++++++++++++++++++++++++++++ src/datasets/video_dataset.py | 4 +- 4 files changed, 95 insertions(+), 7 deletions(-) create mode 100644 logs/params-pretrain.yaml diff --git a/configs/pretrain/vitl16.yaml b/configs/pretrain/vitl16.yaml index 4996b9de..160b85a4 100644 --- a/configs/pretrain/vitl16.yaml +++ b/configs/pretrain/vitl16.yaml @@ -4,9 +4,7 @@ tasks_per_node: 8 data: dataset_type: VideoDataset datasets: - - /your_path_to_kinetics710_csv_file_index.csv - - /your_path_to_ssv2_csv_file_index.csv - - /your_path_to_howto100m_csv_file_index.csv + - /beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/data_csv/k700_train.csv decode_one_clip: true batch_size: 24 num_clips: 1 @@ -30,7 +28,7 @@ data_aug: - 1.0 reprob: 0.0 logging: - folder: /your_absolute_file_path_for_saving_logs_and_checkpoints/ + folder: /beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/logs/ write_tag: jepa loss: loss_exp: 1.0 diff --git a/data_csv/save_paths_csv.py b/data_csv/save_paths_csv.py index e6b30631..d8f2f75a 100644 --- a/data_csv/save_paths_csv.py +++ b/data_csv/save_paths_csv.py @@ -4,7 +4,7 @@ import os # 指定目录路径 -directory_path = '/beacon/data01/chengjie.zheng001/00_datasets/kinetics-dataset/k700-2020/val' +directory_path = '/beacon/data01/chengjie.zheng001/data/kinetics-dataset/k700-2020/train/' # 使用 glob 查找所有的 mp4 文件 mp4_files = glob.glob(os.path.join(directory_path, '**/*.mp4'), recursive=True) @@ -21,5 +21,5 @@ }) # 保存到 CSV 文件 -csv_file_path = '/beacon/data01/chengjie.zheng001/code/MGH/jepa/data_csv/k700_val.csv' +csv_file_path = '/beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/data_csv/k700_train2.csv' df.to_csv(csv_file_path, index=False) \ No newline at end of file diff --git a/logs/params-pretrain.yaml b/logs/params-pretrain.yaml new file mode 100644 index 00000000..bc8eb3aa --- /dev/null +++ b/logs/params-pretrain.yaml @@ -0,0 +1,88 @@ +app: vjepa +data: + batch_size: 24 + clip_duration: null + crop_size: 224 + dataset_type: VideoDataset + datasets: + - /beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/data_csv/k700_train.csv + decode_one_clip: true + filter_short_videos: false + num_clips: 1 + num_frames: 16 + num_workers: 12 + patch_size: 16 + pin_mem: true + sampling_rate: 4 + tubelet_size: 2 +data_aug: + auto_augment: false + motion_shift: false + random_resize_aspect_ratio: + - 0.75 + - 1.35 + random_resize_scale: + - 0.3 + - 1.0 + reprob: 0.0 +logging: + folder: /beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/logs/ + write_tag: jepa +loss: + loss_exp: 1.0 + reg_coeff: 0.0 +mask: +- aspect_ratio: + - 0.75 + - 1.5 + max_keep: null + max_temporal_keep: 1.0 + num_blocks: 8 + spatial_scale: + - 0.15 + - 0.15 + temporal_scale: + - 1.0 + - 1.0 +- aspect_ratio: + - 0.75 + - 1.5 + max_keep: null + max_temporal_keep: 1.0 + num_blocks: 2 + spatial_scale: + - 0.7 + - 0.7 + temporal_scale: + - 1.0 + - 1.0 +meta: + dtype: bfloat16 + eval_freq: 100 + load_checkpoint: false + read_checkpoint: null + seed: 234 + use_sdpa: true +model: + model_name: vit_large + pred_depth: 12 + pred_embed_dim: 384 + uniform_power: true + use_mask_tokens: true + zero_init_mask_tokens: true +nodes: 16 +optimization: + clip_grad: 10.0 + ema: + - 0.998 + - 1.0 + epochs: 300 + final_lr: 1.0e-06 + final_weight_decay: 0.4 + ipe: 300 + ipe_scale: 1.25 + lr: 0.000625 + start_lr: 0.0002 + warmup: 40 + weight_decay: 0.04 +tasks_per_node: 8 diff --git a/src/datasets/video_dataset.py b/src/datasets/video_dataset.py index b05cc701..3bb2d181 100644 --- a/src/datasets/video_dataset.py +++ b/src/datasets/video_dataset.py @@ -128,7 +128,9 @@ def __init__( for data_path in self.data_paths: if data_path[-4:] == '.csv': - data = pd.read_csv(data_path, header=None, delimiter=" ") + ### 改 + data = pd.read_csv(data_path, header=None, delimiter=" ", on_bad_lines='skip') + # data = pd.read_csv(data_path, header=None, delimiter=" ") samples += list(data.values[:, 0]) labels += list(data.values[:, 1]) num_samples = len(data) From cfe0f94c1a5943108d5697332196f527698fddf5 Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Wed, 20 Mar 2024 07:06:41 +0000 Subject: [PATCH 05/10] create save_paths_csv2.py --- data_csv/save_paths_csv.py | 2 +- data_csv/save_paths_csv2.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 data_csv/save_paths_csv2.py diff --git a/data_csv/save_paths_csv.py b/data_csv/save_paths_csv.py index d8f2f75a..d5f30e38 100644 --- a/data_csv/save_paths_csv.py +++ b/data_csv/save_paths_csv.py @@ -21,5 +21,5 @@ }) # 保存到 CSV 文件 -csv_file_path = '/beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/data_csv/k700_train2.csv' +csv_file_path = '/beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train.csv' df.to_csv(csv_file_path, index=False) \ No newline at end of file diff --git a/data_csv/save_paths_csv2.py b/data_csv/save_paths_csv2.py new file mode 100644 index 00000000..55bd4b61 --- /dev/null +++ b/data_csv/save_paths_csv2.py @@ -0,0 +1,30 @@ +import glob +import csv +import random +import os + +# 定义目标文件夹路径 +directory_path = '/beacon/data01/chengjie.zheng001/data/kinetics-dataset/k700-2020/train/' + +# 定义支持的文件扩展名 +extensions = ['mp4'] + +# 查找目录下所有匹配的文件 +files = [] +for ext in extensions: + files.extend(glob.glob(os.path.join(directory_path, f'**/*.{ext}'), recursive=True)) + +# 生成随机整数标签 +labels = [random.randint(0, 100) for _ in files] + +# CSV 文件保存路径 +csv_file_path = '/beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train.csv' + +# 保存到 CSV 文件 +with open(csv_file_path, 'w', newline='') as csvfile: + filewriter = csv.writer(csvfile) + for file_path, label in zip(files, labels): + formatted_string = f"{file_path} ${label}" + filewriter.writerow([formatted_string]) + +print(f"Saved {len(files)} entries to {csv_file_path}") \ No newline at end of file From ce2e0587b3e215ad1fe6001ec7091d1d5ef7316c Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Thu, 21 Mar 2024 05:38:40 +0000 Subject: [PATCH 06/10] .gitinore updated --- .gitignore | 4 +++- configs/pretrain/vitl16.yaml | 4 ++-- logs/params-pretrain.yaml | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index cf5a1cd9..72e20a7d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ -.*.swp +*.swp *.pyc *.csv +logs/ +logs2/ \ No newline at end of file diff --git a/configs/pretrain/vitl16.yaml b/configs/pretrain/vitl16.yaml index 160b85a4..0342b3b3 100644 --- a/configs/pretrain/vitl16.yaml +++ b/configs/pretrain/vitl16.yaml @@ -4,7 +4,7 @@ tasks_per_node: 8 data: dataset_type: VideoDataset datasets: - - /beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/data_csv/k700_train.csv + - /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train.csv decode_one_clip: true batch_size: 24 num_clips: 1 @@ -28,7 +28,7 @@ data_aug: - 1.0 reprob: 0.0 logging: - folder: /beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/logs/ + folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/logs/ write_tag: jepa loss: loss_exp: 1.0 diff --git a/logs/params-pretrain.yaml b/logs/params-pretrain.yaml index bc8eb3aa..3614bbdb 100644 --- a/logs/params-pretrain.yaml +++ b/logs/params-pretrain.yaml @@ -5,7 +5,7 @@ data: crop_size: 224 dataset_type: VideoDataset datasets: - - /beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/data_csv/k700_train.csv + - /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train.csv decode_one_clip: true filter_short_videos: false num_clips: 1 @@ -26,7 +26,7 @@ data_aug: - 1.0 reprob: 0.0 logging: - folder: /beacon/data01/chengjie.zheng001/code/MGH/umb-jepa/logs/ + folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/logs/ write_tag: jepa loss: loss_exp: 1.0 From 2429ac3721e68a456a3b8915bea94a2cbd45640e Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Thu, 21 Mar 2024 05:48:23 +0000 Subject: [PATCH 07/10] .gitignore update, and remove data_csv/ --- .gitignore | 3 ++- data_csv/save_paths_csv2.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 72e20a7d..3c53fb13 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ *.pyc *.csv logs/ -logs2/ \ No newline at end of file +logs2/ +data_csv/ \ No newline at end of file diff --git a/data_csv/save_paths_csv2.py b/data_csv/save_paths_csv2.py index 55bd4b61..070af19d 100644 --- a/data_csv/save_paths_csv2.py +++ b/data_csv/save_paths_csv2.py @@ -4,7 +4,9 @@ import os # 定义目标文件夹路径 -directory_path = '/beacon/data01/chengjie.zheng001/data/kinetics-dataset/k700-2020/train/' +# directory_path = '/beacon/data01/chengjie.zheng001/data/kinetics-dataset/k700-2020/train/' +directory_path = '/beacon/data01/chengjie.zheng001/data/kinetics-dataset/k700-2020/val/' + # 定义支持的文件扩展名 extensions = ['mp4'] @@ -18,7 +20,7 @@ labels = [random.randint(0, 100) for _ in files] # CSV 文件保存路径 -csv_file_path = '/beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train.csv' +csv_file_path = '/beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_val.csv' # 保存到 CSV 文件 with open(csv_file_path, 'w', newline='') as csvfile: From 8e6bf8944ef29509fe3628ea083d4468623d7269 Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Thu, 21 Mar 2024 05:52:22 +0000 Subject: [PATCH 08/10] --cached untrack data_csv/ --- data_csv/save_paths_csv.py | 25 ------------------------- data_csv/save_paths_csv2.py | 32 -------------------------------- 2 files changed, 57 deletions(-) delete mode 100644 data_csv/save_paths_csv.py delete mode 100644 data_csv/save_paths_csv2.py diff --git a/data_csv/save_paths_csv.py b/data_csv/save_paths_csv.py deleted file mode 100644 index d5f30e38..00000000 --- a/data_csv/save_paths_csv.py +++ /dev/null @@ -1,25 +0,0 @@ -import glob -import pandas as pd -import random -import os - -# 指定目录路径 -directory_path = '/beacon/data01/chengjie.zheng001/data/kinetics-dataset/k700-2020/train/' - -# 使用 glob 查找所有的 mp4 文件 -mp4_files = glob.glob(os.path.join(directory_path, '**/*.mp4'), recursive=True) - -# 生成与文件列表相同长度的随机整数列表 -random_integers = '$'+str([random.randint(0, 700) for _ in mp4_files]) -# _ 代表的是对 mp4_files 列表中的每个元素进行迭代,但实际上我们不需要在迭代过程中使用这些元素的值。目的仅仅是为了确保生成与 mp4_files 列表长度相同的随机整数列表。 -# 使用 _ 作为变量名称是一种约定,表明该变量是暂时或不被使用的,这有助于提高代码的可读性,让读者知道这个变量在循环体内部没有被用到 - -# 创建一个 DataFrame -df = pd.DataFrame({ - 'FilePath': mp4_files, - 'RandomInteger': random_integers -}) - -# 保存到 CSV 文件 -csv_file_path = '/beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train.csv' -df.to_csv(csv_file_path, index=False) \ No newline at end of file diff --git a/data_csv/save_paths_csv2.py b/data_csv/save_paths_csv2.py deleted file mode 100644 index 070af19d..00000000 --- a/data_csv/save_paths_csv2.py +++ /dev/null @@ -1,32 +0,0 @@ -import glob -import csv -import random -import os - -# 定义目标文件夹路径 -# directory_path = '/beacon/data01/chengjie.zheng001/data/kinetics-dataset/k700-2020/train/' -directory_path = '/beacon/data01/chengjie.zheng001/data/kinetics-dataset/k700-2020/val/' - - -# 定义支持的文件扩展名 -extensions = ['mp4'] - -# 查找目录下所有匹配的文件 -files = [] -for ext in extensions: - files.extend(glob.glob(os.path.join(directory_path, f'**/*.{ext}'), recursive=True)) - -# 生成随机整数标签 -labels = [random.randint(0, 100) for _ in files] - -# CSV 文件保存路径 -csv_file_path = '/beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_val.csv' - -# 保存到 CSV 文件 -with open(csv_file_path, 'w', newline='') as csvfile: - filewriter = csv.writer(csvfile) - for file_path, label in zip(files, labels): - formatted_string = f"{file_path} ${label}" - filewriter.writerow([formatted_string]) - -print(f"Saved {len(files)} entries to {csv_file_path}") \ No newline at end of file From a77fa554994ff9615aa7401cd577e3d64f8b3dac Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Tue, 26 Mar 2024 16:32:42 +0000 Subject: [PATCH 09/10] MGH Expriments 1 --- .gitignore | 3 + app/vjepa/train.py | 2 + configs/evals/vith16_k700_16x8x3.yaml | 39 ++++++++ configs/evals/vitl16_k700_16x8x3.yaml | 39 ++++++++ configs/pretrain/vitl16.yaml | 7 +- .../pretrain/vitl16_mgh.yaml | 97 ++++++++++--------- evals/video_classification_frozen/eval.py | 5 + 7 files changed, 141 insertions(+), 51 deletions(-) create mode 100644 configs/evals/vith16_k700_16x8x3.yaml create mode 100644 configs/evals/vitl16_k700_16x8x3.yaml rename logs/params-pretrain.yaml => configs/pretrain/vitl16_mgh.yaml (60%) diff --git a/.gitignore b/.gitignore index 3c53fb13..eebf95b8 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,7 @@ *.csv logs/ logs2/ +logs3/ +logs4/ +log_mgh/ data_csv/ \ No newline at end of file diff --git a/app/vjepa/train.py b/app/vjepa/train.py index 2b556168..1fefcf53 100644 --- a/app/vjepa/train.py +++ b/app/vjepa/train.py @@ -376,6 +376,8 @@ def save_checkpoint(epoch, path): gpu_time_meter = AverageMeter() wall_time_meter = AverageMeter() + ### Air test + print(ipe) for itr in range(ipe): itr_start_time = time.time() diff --git a/configs/evals/vith16_k700_16x8x3.yaml b/configs/evals/vith16_k700_16x8x3.yaml new file mode 100644 index 00000000..0f9bda1c --- /dev/null +++ b/configs/evals/vith16_k700_16x8x3.yaml @@ -0,0 +1,39 @@ +nodes: 8 +tasks_per_node: 8 +tag: k700-16x8x3 +eval_name: video_classification_frozen +resume_checkpoint: false +data: + dataset_train: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train.csv + dataset_val: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_val.csv + dataset_type: VideoDataset + num_classes: 700 + frames_per_clip: 16 + num_segments: 8 + num_views_per_segment: 3 + frame_step: 4 +optimization: + attend_across_segments: true + num_epochs: 20 + resolution: 224 + batch_size: 4 + weight_decay: 0.01 + lr: 0.001 + start_lr: 0.001 + final_lr: 0.0 + warmup: 0. + use_bfloat16: true +pretrain: + model_name: vit_huge + checkpoint_key: target_encoder + clip_duration: null + frames_per_clip: 16 + tubelet_size: 2 + uniform_power: true + use_silu: false + tight_silu: false + use_sdpa: true + patch_size: 16 + folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/logs2/ + checkpoint: jepa-latest.pth.tar # name of pretrained model file inside folder + write_tag: jepa diff --git a/configs/evals/vitl16_k700_16x8x3.yaml b/configs/evals/vitl16_k700_16x8x3.yaml new file mode 100644 index 00000000..f4d07fc5 --- /dev/null +++ b/configs/evals/vitl16_k700_16x8x3.yaml @@ -0,0 +1,39 @@ +nodes: 8 +tasks_per_node: 8 +tag: k700-16x8x3 +eval_name: video_classification_frozen +resume_checkpoint: false +data: + dataset_train: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train_2.csv + dataset_val: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_val_2.csv + dataset_type: VideoDataset + num_classes: 400 + frames_per_clip: 16 + num_segments: 8 + num_views_per_segment: 3 + frame_step: 4 +optimization: + attend_across_segments: true + num_epochs: 20 + resolution: 224 + batch_size: 4 + weight_decay: 0.01 + lr: 0.001 + start_lr: 0.001 + final_lr: 0.0 + warmup: 0. + use_bfloat16: true +pretrain: + model_name: vit_large + checkpoint_key: target_encoder + clip_duration: null + frames_per_clip: 16 + tubelet_size: 2 + uniform_power: true + use_silu: false + tight_silu: false + use_sdpa: true + patch_size: 16 + folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/logs2/ + checkpoint: jepa-latest.pth.tar # name of pretrained model file inside folder + write_tag: jepa diff --git a/configs/pretrain/vitl16.yaml b/configs/pretrain/vitl16.yaml index 0342b3b3..003a1fa3 100644 --- a/configs/pretrain/vitl16.yaml +++ b/configs/pretrain/vitl16.yaml @@ -4,9 +4,10 @@ tasks_per_node: 8 data: dataset_type: VideoDataset datasets: - - /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train.csv + # - /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train_2.csv + - '/beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/MGH_train.csv' decode_one_clip: true - batch_size: 24 + batch_size: 6 num_clips: 1 num_frames: 16 tubelet_size: 2 @@ -73,7 +74,7 @@ model: use_mask_tokens: true zero_init_mask_tokens: true optimization: - ipe: 300 + ipe: 1 ipe_scale: 1.25 clip_grad: 10.0 weight_decay: 0.04 diff --git a/logs/params-pretrain.yaml b/configs/pretrain/vitl16_mgh.yaml similarity index 60% rename from logs/params-pretrain.yaml rename to configs/pretrain/vitl16_mgh.yaml index 3614bbdb..595660c6 100644 --- a/logs/params-pretrain.yaml +++ b/configs/pretrain/vitl16_mgh.yaml @@ -1,22 +1,25 @@ app: vjepa +nodes: 16 +tasks_per_node: 8 data: - batch_size: 24 - clip_duration: null - crop_size: 224 dataset_type: VideoDataset datasets: - - /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train.csv + # - /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train_2.csv + - '/beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/MGH_train.csv' decode_one_clip: true - filter_short_videos: false + batch_size: 3 num_clips: 1 - num_frames: 16 - num_workers: 12 + num_frames: 32 + tubelet_size: 2 + sampling_rate: 2 + crop_size: 224 patch_size: 16 pin_mem: true - sampling_rate: 4 - tubelet_size: 2 + num_workers: 12 + filter_short_videos: false + clip_duration: null data_aug: - auto_augment: false + auto_augment: true motion_shift: false random_resize_aspect_ratio: - 0.75 @@ -26,43 +29,43 @@ data_aug: - 1.0 reprob: 0.0 logging: - folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/logs/ + folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/log_mgh/exp1/ write_tag: jepa loss: loss_exp: 1.0 reg_coeff: 0.0 mask: -- aspect_ratio: - - 0.75 - - 1.5 - max_keep: null - max_temporal_keep: 1.0 - num_blocks: 8 - spatial_scale: - - 0.15 - - 0.15 - temporal_scale: - - 1.0 - - 1.0 -- aspect_ratio: - - 0.75 - - 1.5 - max_keep: null - max_temporal_keep: 1.0 - num_blocks: 2 - spatial_scale: - - 0.7 - - 0.7 - temporal_scale: - - 1.0 - - 1.0 + - aspect_ratio: + - 0.75 + - 1.5 + num_blocks: 8 + spatial_scale: + - 0.15 + - 0.15 + temporal_scale: + - 1.0 + - 1.0 + max_temporal_keep: 1.0 + max_keep: null + - aspect_ratio: + - 0.75 + - 1.5 + num_blocks: 2 + spatial_scale: + - 0.7 + - 0.7 + temporal_scale: + - 1.0 + - 1.0 + max_temporal_keep: 1.0 + max_keep: null meta: - dtype: bfloat16 - eval_freq: 100 load_checkpoint: false read_checkpoint: null seed: 234 + eval_freq: 100 use_sdpa: true + dtype: bfloat16 model: model_name: vit_large pred_depth: 12 @@ -70,19 +73,17 @@ model: uniform_power: true use_mask_tokens: true zero_init_mask_tokens: true -nodes: 16 optimization: + ipe: 1 + ipe_scale: 1.25 clip_grad: 10.0 + weight_decay: 0.04 + final_weight_decay: 0.4 + epochs: 300 + warmup: 40 + start_lr: 0.0002 + lr: 0.000625 + final_lr: 1.0e-06 ema: - 0.998 - 1.0 - epochs: 300 - final_lr: 1.0e-06 - final_weight_decay: 0.4 - ipe: 300 - ipe_scale: 1.25 - lr: 0.000625 - start_lr: 0.0002 - warmup: 40 - weight_decay: 0.04 -tasks_per_node: 8 diff --git a/evals/video_classification_frozen/eval.py b/evals/video_classification_frozen/eval.py index f81f526d..790bfabb 100644 --- a/evals/video_classification_frozen/eval.py +++ b/evals/video_classification_frozen/eval.py @@ -328,7 +328,12 @@ def run_one_epoch( for di in data[0] # iterate over temporal index of clip ] clip_indices = [d.to(device, non_blocking=True) for d in data[2]] + ### AIR Test1 + print(data[1]) + # tensor_data_1 = torch.tensor(data[1], dtype=torch.float32) + # print(type(tensor_data_1)) labels = data[1].to(device) + # labels = tensor_data_1.to(device) batch_size = len(labels) # Forward and prediction From a307393b282df6457823dc5cd4a364370ecc2e6e Mon Sep 17 00:00:00 2001 From: Chengjie Zheng Date: Fri, 29 Mar 2024 18:28:23 +0000 Subject: [PATCH 10/10] code debug fixed --- .gitignore | 5 +---- configs/evals/vitl16_k700_16x8x3.yaml | 4 ++-- configs/pretrain/vitl16.yaml | 7 +++---- evals/video_classification_frozen/eval.py | 2 +- 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index eebf95b8..ccc7c76d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,6 @@ *.swp *.pyc *.csv -logs/ -logs2/ -logs3/ -logs4/ +log_k700/ log_mgh/ data_csv/ \ No newline at end of file diff --git a/configs/evals/vitl16_k700_16x8x3.yaml b/configs/evals/vitl16_k700_16x8x3.yaml index f4d07fc5..f6198cce 100644 --- a/configs/evals/vitl16_k700_16x8x3.yaml +++ b/configs/evals/vitl16_k700_16x8x3.yaml @@ -7,7 +7,7 @@ data: dataset_train: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train_2.csv dataset_val: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_val_2.csv dataset_type: VideoDataset - num_classes: 400 + num_classes: 700 frames_per_clip: 16 num_segments: 8 num_views_per_segment: 3 @@ -34,6 +34,6 @@ pretrain: tight_silu: false use_sdpa: true patch_size: 16 - folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/logs2/ + folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/log_k700/exp2_e300/ checkpoint: jepa-latest.pth.tar # name of pretrained model file inside folder write_tag: jepa diff --git a/configs/pretrain/vitl16.yaml b/configs/pretrain/vitl16.yaml index 003a1fa3..066705a4 100644 --- a/configs/pretrain/vitl16.yaml +++ b/configs/pretrain/vitl16.yaml @@ -4,10 +4,9 @@ tasks_per_node: 8 data: dataset_type: VideoDataset datasets: - # - /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train_2.csv - - '/beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/MGH_train.csv' + - /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/data_csv/k700_train_2.csv decode_one_clip: true - batch_size: 6 + batch_size: 24 num_clips: 1 num_frames: 16 tubelet_size: 2 @@ -29,7 +28,7 @@ data_aug: - 1.0 reprob: 0.0 logging: - folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/logs/ + folder: /beacon/data01/chengjie.zheng001/Projects/MGH/umb-jepa/log_k700/exp2_e300/ write_tag: jepa loss: loss_exp: 1.0 diff --git a/evals/video_classification_frozen/eval.py b/evals/video_classification_frozen/eval.py index 790bfabb..26d64889 100644 --- a/evals/video_classification_frozen/eval.py +++ b/evals/video_classification_frozen/eval.py @@ -329,7 +329,7 @@ def run_one_epoch( ] clip_indices = [d.to(device, non_blocking=True) for d in data[2]] ### AIR Test1 - print(data[1]) + # print(data[1]) # tensor_data_1 = torch.tensor(data[1], dtype=torch.float32) # print(type(tensor_data_1)) labels = data[1].to(device)