diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index b862470a..18fc211e 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/mrphys/tensorflow-manylinux:1.12.0 +FROM ghcr.io/mrphys/tensorflow-manylinux:1.14.0 # To enable plotting. RUN apt-get update && \ @@ -15,6 +15,15 @@ RUN for PYVER in ${PYVERSIONS}; do ${PYBIN}${PYVER} -m pip install ipykernel; do COPY requirements.txt /tmp/requirements.txt RUN for PYVER in ${PYVERSIONS}; do ${PYBIN}${PYVER} -m pip install -r /tmp/requirements.txt; done +# For `tf.keras.utils.plot_model`. +RUN apt-get update && \ + apt-get install -y graphviz && \ + for PYVER in ${PYVERSIONS}; do ${PYBIN}${PYVER} -m pip install pydot; done + +# Reinstall Tensorboard. +RUN for PYVER in ${PYVERSIONS}; do ${PYBIN}${PYVER} -m pip uninstall -y tensorboard tb-nightly; done && \ + for PYVER in ${PYVERSIONS}; do ${PYBIN}${PYVER} -m pip install tensorboard; done + # Create non-root user. ARG USERNAME=vscode ARG USER_UID=1000 diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index f813b572..9022d9f2 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -6,7 +6,8 @@ "extensions": [ "ms-python.python", "ms-vscode.cpptools", - "github.copilot" + "github.copilot", + "github.vscode-pull-request-github" ], // Enable GPUs. "runArgs": [ diff --git a/.github/workflows/build-package.yml b/.github/workflows/build-package.yml index 86e9b9a8..29c645ed 100644 --- a/.github/workflows/build-package.yml +++ b/.github/workflows/build-package.yml @@ -16,7 +16,7 @@ jobs: name: Build package runs-on: ubuntu-latest - + container: image: ghcr.io/mrphys/tensorflow-manylinux:1.12.0 @@ -56,7 +56,7 @@ jobs: - name: Build docs run: | make docs PY_VERSION=${{ matrix.py_version }} - + - name: Upload wheel if: startsWith(github.ref, 'refs/tags') uses: actions/upload-artifact@v2 @@ -81,12 +81,12 @@ jobs: release: - + name: Release needs: build runs-on: ubuntu-latest if: startsWith(github.ref, 'refs/tags') - + steps: - name: Checkout docs branch @@ -122,7 +122,7 @@ jobs: uses: softprops/action-gh-release@v1 with: name: TensorFlow MRI ${{ env.release }} - body_path: RELEASE.rst + body_path: RELEASE.md prerelease: ${{ contains(env.release, 'a') || contains(env.release, 'b') || contains(env.release, 'rc') }} fail_on_unmatched_files: true diff --git a/.gitignore b/.gitignore index 07840dd5..dd1821b7 100644 --- a/.gitignore +++ b/.gitignore @@ -6,8 +6,9 @@ __pycache__/ artifacts/ build/ +logs/ third_party/spiral_waveform tools/docs/_build tools/docs/_templates tools/docs/api_docs -tools/docs/index.rst +tools/docs/index.md diff --git a/.vscode/settings.json b/.vscode/settings.json index 3d253868..8f1c2d83 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -9,5 +9,6 @@ "python.testing.pytestEnabled": false, "python.testing.unittestEnabled": true, "python.linting.pylintEnabled": true, - "python.linting.enabled": true + "python.linting.enabled": true, + "notebook.output.textLineLimit": 500 } \ No newline at end of file diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 00000000..7af508cb --- /dev/null +++ b/AUTHORS @@ -0,0 +1,7 @@ +# This file contains a list of individuals and organizations who are authors +# of this project for copyright purposes. +# For a full list of individuals who have contributed to the project, see the +# CONTRIBUTORS file. + +Javier Montalt-Tordera +University College London diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..16609512 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @jmontalt diff --git a/CONTRIBUTORS b/CONTRIBUTORS new file mode 100644 index 00000000..164b2472 --- /dev/null +++ b/CONTRIBUTORS @@ -0,0 +1,6 @@ +# This file contains a list of individuals who have made a contribution to this +# project. If you are making a contribution, please add yourself to this list +# using the format: +# Name + +Javier Montalt-Tordera diff --git a/Makefile b/Makefile index 5b3ff150..d148e212 100644 --- a/Makefile +++ b/Makefile @@ -14,10 +14,11 @@ TF_LDFLAGS := $(shell $(PYTHON) -c 'import tensorflow as tf; print(" ".join(tf.s CFLAGS := -O3 -march=x86-64 -mtune=generic CXXFLAGS := $(CFLAGS) -CXXFLAGS += $(TF_CFLAGS) -fPIC -std=c++14 +CXXFLAGS += $(TF_CFLAGS) -fPIC -std=c++17 -fopenmp CXXFLAGS += -I$(ROOT_DIR) LDFLAGS := $(TF_LDFLAGS) +LDFLAGS += -lfftw3_omp -lfftw3f_omp -lfftw3 -lfftw3f -lm LDFLAGS += -l:libspiral_waveform.a all: lib wheel diff --git a/README.md b/README.md new file mode 100644 index 00000000..81743ae0 --- /dev/null +++ b/README.md @@ -0,0 +1,135 @@ +
+ +
+ +[![PyPI](https://badge.fury.io/py/tensorflow-mri.svg)](https://badge.fury.io/py/tensorflow-mri) +[![Build](https://github.com/mrphys/tensorflow-mri/actions/workflows/build-package.yml/badge.svg)](https://github.com/mrphys/tensorflow-mri/actions/workflows/build-package.yml) +[![Docs](https://img.shields.io/badge/api-reference-blue.svg)](https://mrphys.github.io/tensorflow-mri/) +[![DOI](https://zenodo.org/badge/388094708.svg)](https://zenodo.org/badge/latestdoi/388094708) + + + +TensorFlow MRI is a library of TensorFlow operators for computational MRI. +The library has a Python interface and is mostly written in Python. However, +computations are efficiently performed by the TensorFlow backend (implemented in +C++/CUDA), which brings together the ease of use and fast prototyping of Python +with the speed and efficiency of optimized lower-level implementations. + +Being an extension of TensorFlow, TensorFlow MRI integrates seamlessly in ML +applications. No additional interfacing is needed to include a SENSE operator +within a neural network, or to use a trained prior as part of an iterative +reconstruction. Therefore, the gap between ML and non-ML components of image +processing pipelines is eliminated. + +Whether an application involves ML or not, TensorFlow MRI operators can take +full advantage of the TensorFlow framework, with capabilities including +automatic differentiation, multi-device support (CPUs and GPUs), automatic +device placement and copying of tensor data, and conversion to fast, +serializable graphs. + +TensorFlow MRI contains operators for: + +- Multicoil arrays + ([`tfmri.coils`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/coils)): + coil combination, coil compression and estimation of coil sensitivity + maps. +- Convex optimization + ([`tfmri.convex`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/convex)): + convex functions (quadratic, L1, L2, Tikhonov, total variation, etc.) and + optimizers (ADMM). +- Keras initializers + ([`tfmri.initializers`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/initializers)): + neural network initializers, including support for complex-valued weights. +- I/O (`tfmri.io`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/io)): + additional I/O functions potentially useful when working with MRI data. +- Keras layers + ([`tfmri.layers`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/layers)): + layers and building blocks for neural networks, including support for + complex-valued weights, inputs and outputs. +- Linear algebra + ([`tfmri.linalg`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/linalg)): + linear operators specialized for image processing and MRI. +- Loss functions + ([`tfmri.losses`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/losses)): + for classification, segmentation and image restoration. +- Metrics + ([`tfmri.metrics`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/metrics)): + for classification, segmentation and image restoration. +- Image processing + ([`tfmri.image`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/image)): + filtering, gradients, phantoms, image quality assessment, etc. +- Image reconstruction + ([`tfmri.recon`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/recon)): + Cartesian/non-Cartesian, 2D/3D, parallel imaging, compressed sensing. +- *k*-space sampling + ([`tfmri.sampling`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/sampling)): + Cartesian masks, non-Cartesian trajectories, sampling density compensation, + etc. +- Signal processing + ([`tfmri.signal`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/signal)): + N-dimensional fast Fourier transform (FFT), non-uniform FFT (NUFFT) + ([see also `TensorFlow NUFFT`](https://github.com/mrphys/tensorflow-nufft)), + discrete wavelet transform (DWT), *k*-space filtering, etc. +- Unconstrained optimization + ([`tfmri.optimize`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/optimize)): + gradient descent, L-BFGS. +- And more, e.g., supporting array manipulation and math tasks. + + + +## Installation + + + +You can install TensorFlow MRI with ``pip``: + +``` +pip install tensorflow-mri +``` + +Note that only Linux is currently supported. + +### TensorFlow Compatibility + +Each TensorFlow MRI release is compiled against a specific version of +TensorFlow. To ensure compatibility, it is recommended to install matching +versions of TensorFlow and TensorFlow MRI according to the table below. + + + +| TensorFlow MRI Version | TensorFlow Compatibility | Release Date | +| ---------------------- | ------------------------ | ------------ | +| v0.22.0 | v2.9.x | Jul 24, 2022 | +| v0.21.0 | v2.9.x | Jul 24, 2022 | +| v0.20.0 | v2.9.x | Jun 18, 2022 | +| v0.19.0 | v2.9.x | Jun 1, 2022 | +| v0.18.0 | v2.8.x | May 6, 2022 | + + + + + +## Documentation + +Visit the [docs](https://mrphys.github.io/tensorflow-mri/) for guides, +tutorials and the API reference. + +## Issues + +If you use this package and something does not work as you expected, please +[file an issue](https://github.com/mrphys/tensorflow-mri/issues/new) +describing your problem. We're here to help! + +## Credits + +If you like this software, star the repository! [![Stars](https://img.shields.io/github/stars/mrphys/tensorflow-mri?style=social)](https://github.com/mrphys/tensorflow-mri/stargazers) + +If you find this software useful in your research, you can cite TensorFlow MRI +using its [Zenodo record](https://doi.org/10.5281/zenodo.5151590). + +In the above link, scroll down to the "Export" section and select your favorite +export format to get an up-to-date citation. + +## Contributions + +Contributions of any kind are welcome! Open an issue or pull request to begin. diff --git a/README.rst b/README.rst deleted file mode 100644 index 18af1d93..00000000 --- a/README.rst +++ /dev/null @@ -1,170 +0,0 @@ -.. image:: https://raw.githubusercontent.com/mrphys/tensorflow-mri/v0.6.0/tools/assets/tfmr_logo.svg?sanitize=true - :align: center - :scale: 100 % - :alt: TFMRI logo - -| - -|pypi| |build| |docs| |doi| - -.. |pypi| image:: https://badge.fury.io/py/tensorflow-mri.svg - :target: https://badge.fury.io/py/tensorflow-mri -.. |build| image:: https://github.com/mrphys/tensorflow-mri/actions/workflows/build-package.yml/badge.svg - :target: https://github.com/mrphys/tensorflow-mri/actions/workflows/build-package.yml -.. |docs| image:: https://img.shields.io/badge/api-reference-blue.svg - :target: https://mrphys.github.io/tensorflow-mri/ -.. |doi| image:: https://zenodo.org/badge/388094708.svg - :target: https://zenodo.org/badge/latestdoi/388094708 - -.. start-intro - -TensorFlow MRI is a library of TensorFlow operators for computational MRI. -The library has a Python interface and is mostly written in Python. However, -computations are efficiently performed by the TensorFlow backend (implemented in -C++/CUDA), which brings together the ease of use and fast prototyping of Python -with the speed and efficiency of optimized lower-level implementations. - -Being an extension of TensorFlow, TensorFlow MRI integrates seamlessly in ML -applications. No additional interfacing is needed to include a SENSE operator -within a neural network, or to use a trained prior as part of an iterative -reconstruction. Therefore, the gap between ML and non-ML components of image -processing pipelines is eliminated. - -Whether an application involves ML or not, TensorFlow MRI operators can take -full advantage of the TensorFlow framework, with capabilities including -automatic differentiation, multi-device support (CPUs and GPUs), automatic -device placement and copying of tensor data, and conversion to fast, -serializable graphs. - -TensorFlow MRI contains operators for: - -* Multicoil arrays - (`tfmri.coils `_): - coil combination, coil compression and estimation of coil sensitivity - maps. -* Convex optimization - (`tfmri.convex `_): - convex functions (quadratic, L1, L2, Tikhonov, total variation, etc.) and - optimizers (ADMM). -* Keras initializers - (`tfmri.initializers `_): - neural network initializers, including support for complex-valued weights. -* I/O (`tfmri.io `_): - additional I/O functions potentially useful when working with MRI data. -* Keras layers - (`tfmri.layers `_): - layers and building blocks for neural networks, including support for - complex-valued weights, inputs and outputs. -* Linear algebra - (`tfmri.linalg `_): - linear operators specialized for image processing and MRI. -* Loss functions - (`tfmri.losses `_): - for classification, segmentation and image restoration. -* Metrics - (`tfmri.metrics `_): - for classification, segmentation and image restoration. -* Image processing - (`tfmri.image `_): - filtering, gradients, phantoms, image quality assessment, etc. -* Image reconstruction - (`tfmri.recon `_): - Cartesian/non-Cartesian, 2D/3D, parallel imaging, compressed sensing. -* *k*-space sampling - (`tfmri.sampling `_): - Cartesian masks, non-Cartesian trajectories, sampling density compensation, - etc. -* Signal processing - (`tfmri.signal `_): - N-dimensional fast Fourier transform (FFT), non-uniform FFT (NUFFT) - (see also `TensorFlow NUFFT `_), - discrete wavelet transform (DWT), *k*-space filtering, etc. -* Unconstrained optimization - (`tfmri.optimize `_): - gradient descent, L-BFGS. -* And more, e.g., supporting array manipulation and math tasks. - -.. end-intro - -Installation ------------- - -.. start-install - -You can install TensorFlow MRI with ``pip``: - -.. code-block:: console - - $ pip install tensorflow-mri - -Note that only Linux is currently supported. - -TensorFlow Compatibility -^^^^^^^^^^^^^^^^^^^^^^^^ - -Each TensorFlow MRI release is compiled against a specific version of -TensorFlow. To ensure compatibility, it is recommended to install matching -versions of TensorFlow and TensorFlow MRI according to the table below. - -.. start-compatibility-table - -====================== ======================== ============ -TensorFlow MRI Version TensorFlow Compatibility Release Date -====================== ======================== ============ -v0.21.0 v2.9.x Jul 24, 2022 -v0.20.0 v2.9.x Jun 18, 2022 -v0.19.0 v2.9.x Jun 1, 2022 -v0.18.0 v2.8.x May 6, 2022 -v0.17.0 v2.8.x Apr 22, 2022 -v0.16.0 v2.8.x Apr 13, 2022 -v0.15.0 v2.8.x Apr 1, 2022 -v0.14.0 v2.8.x Mar 29, 2022 -v0.13.0 v2.8.x Mar 15, 2022 -v0.12.0 v2.8.x Mar 14, 2022 -v0.11.0 v2.8.x Mar 10, 2022 -v0.10.0 v2.8.x Mar 3, 2022 -v0.9.0 v2.7.x Dec 3, 2021 -v0.8.0 v2.7.x Nov 11, 2021 -v0.7.0 v2.6.x Nov 3, 2021 -v0.6.2 v2.6.x Oct 13, 2021 -v0.6.1 v2.6.x Sep 30, 2021 -v0.6.0 v2.6.x Sep 28, 2021 -v0.5.0 v2.6.x Aug 29, 2021 -v0.4.0 v2.6.x Aug 18, 2021 -====================== ======================== ============ - -.. end-compatibility-table - -.. end-install - -Documentation -------------- - -Visit the `docs `_ for guides, -tutorials and the API reference. - -Issues ------- - -If you use this package and something does not work as you expected, please -`file an issue `_ -describing your problem. We're here to help! - -Credits -------- - -If you like this software, star the repository! |stars| - -.. |stars| image:: https://img.shields.io/github/stars/mrphys/tensorflow-mri?style=social - :target: https://github.com/mrphys/tensorflow-mri/stargazers - -If you find this software useful in your research, you can cite TensorFlow MRI -using its `Zenodo record `_. - -In the above link, scroll down to the "Export" section and select your favorite -export format to get an up-to-date citation. - -Contributions -------------- - -Contributions of any kind are welcome! Open an issue or pull request to begin. diff --git a/RELEASE.md b/RELEASE.md new file mode 100644 index 00000000..136d1416 --- /dev/null +++ b/RELEASE.md @@ -0,0 +1,62 @@ +# Release 0.22.0 + + + +## Breaking Changes + +- `tfmri.models` + + - `ConvBlock1D`, `ConvBlock2D` and `ConvBlock3D`contain backwards + incompatible changes. + - `UNet1D`, `UNet2D` and `UNet3D` contain backwards incompatible changes. + + +## Major Features and Improvements + +- `tf`: + + - Added custom FFT kernels for CPU. These can be used directly through the + standard core TF APIs `tf.signal.fft`, `tf.signal.fft2d` and + `tf.signal.fft3d`. + +- `tfmri.activations`: + + - Added new functions `complex_relu` and `mod_relu`. + +- `tfmri.callbacks`: + + - The `TensorBoardImages` callback can now create multiple summaries. + +- `tfmri.coils`: + + - Added new function `estimate_sensitivities_universal`. + +- `tfmri.geometry`: + + - Added new extension type `Rotation2D`. + +- `tfmri.layers`: + + - Added new wrapper layer `Normalized`. + +- `tfmri.models`: + + - Added new models `ConvBlockLSTM1D`, `ConvBlockLSTM2D` and `ConvBlockLSTM3D`. + - Added new models `UNetLSTM1D`, `UNetLSTM2D` and `UNetLSTM3D`. + +- `tfmri.sampling`: + + - Added operator `spiral_waveform` to public API. + - Added new functions `accel_mask` and `center_mask`. + + +## Bug Fixes and Other Changes + +- `tfmri`: + + - Removed the TensorFlow Graphics dependency, which should also eliminate + the common OpenEXR error. + +- `tfmri.recon`: + + - Improved error reporting for ``least_squares``. diff --git a/RELEASE.rst b/RELEASE.rst deleted file mode 100644 index ce299918..00000000 --- a/RELEASE.rst +++ /dev/null @@ -1,61 +0,0 @@ -Release 0.21.0 -============== - -This release contains new functionality for wavelet decomposition and -reconstruction and optimized Gram matrices for some linear operators. It also -redesigns the convex optimization module and contains some improvements to the -documentation. - - -Breaking Changes ----------------- - -* ``tfmri.convex``: - - * Argument ``ndim`` has been removed from all functions. - * All functions will now require the domain dimension to be - specified. Therefore, `domain_dimension` is now the first positional - argument in several functions including ``ConvexFunctionIndicatorBall``, - ``ConvexFunctionNorm`` and ``ConvexFunctionTotalVariation``. However, while - this parameter is no longer optional, it is now possible to pass dynamic - or static information as opposed to static only (at least in the general - case, but specific operators may have additional restrictions). - * For consistency and accuracy, argument ``axis`` of - ``ConvexFunctionTotalVariation`` has been renamed to ``axes``. - - -Major Features and Improvements -------------------------------- - -* ``tfmri.convex``: - - * Added new class ``ConvexFunctionL1Wavelet``, which enables image/signal - reconstruction with L1-wavelet regularization. - * Added new argument ``gram_operator`` to ``ConvexFunctionLeastSquares``, - which allows the user to specify a custom, potentially more efficient Gram - matrix. - -* ``tfmri.linalg``: - - * Added new classes ``LinearOperatorNUFFT`` and ``LinearOperatorGramNUFFT`` - to enable the use of NUFFT as a linear operator. - * Added new class ``LinearOperatorWavelet`` to enable the use of wavelets - as a linear operator. - -* ``tfmri.sampling``: - - * Added new ordering type ``sorted_half`` to ``radial_trajectory``. - -* ``tfmri.signal``: - - * Added new functions ``wavedec`` and ``waverec`` for wavelet decomposition - and reconstruction, as well as utilities ``wavelet_coeffs_to_tensor``, - ``tensor_to_wavelet_coeffs``, and ``max_wavelet_level``. - - -Bug Fixes and Other Changes ---------------------------- - -* ``tfmri.recon``: - - * Improved error reporting for ``least_squares``. diff --git a/pylintrc b/pylintrc index b74f480f..4cf1c83b 100755 --- a/pylintrc +++ b/pylintrc @@ -327,10 +327,10 @@ ignore-exceptions=AssertionError,NotImplementedError,StopIteration,TypeError # Number of spaces of indent required when the last token on the preceding line # is an open (, [, or {. -indent-after-paren=2 +indent-after-paren=4 [GOOGLE LINES] # Regexp for a proper copyright notice. -copyright=Copyright \d{4} University College London\. +All [Rr]ights [Rr]eserved\. +copyright=Copyright \d{4} The TensorFlow MRI Authors\. +All [Rr]ights [Rr]eserved\. diff --git a/requirements.txt b/requirements.txt index 916dc593..ecb06801 100755 --- a/requirements.txt +++ b/requirements.txt @@ -6,8 +6,8 @@ plotly PyWavelets scipy tensorboard -tensorflow>=2.9.0,<2.10.0 -tensorflow-graphics -tensorflow-io>=0.26.0 -tensorflow-nufft>=0.8.0 -tensorflow-probability>=0.16.0 +tensorflow>=2.10.0,<2.11.0 +tensorflow-addons>=0.17.0,<0.18.0 +tensorflow-io>=0.27.0,<0.28.0 +tensorflow-nufft>=0.10.0,<0.11.0 +tensorflow-probability>=0.18.0,<0.19.0 diff --git a/setup.py b/setup.py index e0f01d9a..fe83437b 100755 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ with open(path.join(ROOT, "tensorflow_mri/__about__.py")) as f: exec(f.read(), ABOUT) -with open(path.join(ROOT, "README.rst"), encoding='utf-8') as f: +with open(path.join(ROOT, "README.md"), encoding='utf-8') as f: LONG_DESCRIPTION = f.read() with open(path.join(ROOT, "requirements.txt")) as f: @@ -42,7 +42,7 @@ class BinaryDistribution(Distribution): def has_ext_modules(self): return True - + def is_pure(self): return False @@ -51,7 +51,7 @@ def is_pure(self): version=ABOUT['__version__'], description=ABOUT['__summary__'], long_description=LONG_DESCRIPTION, - long_description_content_type="text/x-rst", + long_description_content_type="text/markdown", author=ABOUT['__author__'], author_email=ABOUT['__email__'], url=ABOUT['__uri__'], @@ -80,5 +80,5 @@ def is_pure(self): 'Topic :: Software Development :: Libraries :: Python Modules' ], license=ABOUT['__license__'], - keywords=['tensorflow', 'mri', 'machine learning', 'ml'] + keywords=['tensorflow', 'mri', 'machine learning', 'ml'] ) diff --git a/tensorflow_mri/__about__.py b/tensorflow_mri/__about__.py index c60e01a2..8670dc3d 100644 --- a/tensorflow_mri/__about__.py +++ b/tensorflow_mri/__about__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,10 +29,10 @@ __summary__ = "A collection of TensorFlow add-ons for computational MRI." __uri__ = "https://github.com/mrphys/tensorflow-mri" -__version__ = "0.21.0" +__version__ = "0.22.0" __author__ = "Javier Montalt Tordera" __email__ = "javier.montalt@outlook.com" __license__ = "Apache 2.0" -__copyright__ = "2021 University College London" +__copyright__ = "2021 The TensorFlow MRI Authors" diff --git a/tensorflow_mri/__init__.py b/tensorflow_mri/__init__.py index 35b8f0e9..339a57ca 100644 --- a/tensorflow_mri/__init__.py +++ b/tensorflow_mri/__init__.py @@ -1,6 +1,7 @@ # This file was automatically generated by tools/build/create_api.py. # Do not edit. """TensorFlow MRI.""" +import glob as _glob import os as _os import sys as _sys @@ -8,12 +9,9 @@ # TODO(jmontalt): Remove these imports on release 1.0.0. from tensorflow_mri.python.ops.array_ops import * -from tensorflow_mri.python.ops.coil_ops import * from tensorflow_mri.python.ops.convex_ops import * from tensorflow_mri.python.ops.fft_ops import * -from tensorflow_mri.python.ops.geom_ops import * from tensorflow_mri.python.ops.image_ops import * -from tensorflow_mri.python.ops.linalg_ops import * from tensorflow_mri.python.ops.math_ops import * from tensorflow_mri.python.ops.optimizer_ops import * from tensorflow_mri.python.ops.recon_ops import * @@ -23,10 +21,12 @@ from tensorflow_mri import python # Import submodules. +from tensorflow_mri._api import activations from tensorflow_mri._api import array from tensorflow_mri._api import callbacks from tensorflow_mri._api import coils from tensorflow_mri._api import convex +from tensorflow_mri._api import geometry from tensorflow_mri._api import image from tensorflow_mri._api import initializers from tensorflow_mri._api import io @@ -54,3 +54,44 @@ __path__ = [_tfmri_api_dir] elif _tfmri_api_dir not in __path__: __path__.append(_tfmri_api_dir) + +# Hook for loading tests by `unittest`. +def load_tests(loader, tests, pattern): + """Loads all TFMRI tests, including unit tests and doc tests. + + For the parameters, see the + [`load_tests` protocol](https://docs.python.org/3/library/unittest.html#load-tests-protocol). + """ + import doctest # pylint: disable=import-outside-toplevel + + # This loads all the regular unit tests. These three lines essentially + # replicate the standard behavior if there was no `load_tests` function. + root_dir = _os.path.dirname(__file__) + unit_tests = loader.discover(start_dir=root_dir, pattern=pattern) + tests.addTests(unit_tests) + + def set_up_doc_test(test): + """Sets up a doctest. + + Runs at the beginning of every doctest. We use it to import common + packages including NumPy, TensorFlow and TensorFlow MRI. Tests are kept + more concise by not repeating these imports each time. + + Args: + test: A `DocTest` object. + """ + # pylint: disable=import-outside-toplevel,import-self + import numpy as _np + import tensorflow as _tf + import tensorflow_mri as _tfmri + # Add these packages to globals. + test.globs['np'] = _np + test.globs['tf'] = _tf + test.globs['tfmri'] = _tfmri + + # Now load all the doctests. + py_files = _glob.glob(_os.path.join(root_dir, '**/*.py'), recursive=True) + tests.addTests(doctest.DocFileSuite( + *py_files, module_relative=False, setUp=set_up_doc_test)) + + return tests diff --git a/tensorflow_mri/_api/activations/__init__.py b/tensorflow_mri/_api/activations/__init__.py new file mode 100644 index 00000000..33edf311 --- /dev/null +++ b/tensorflow_mri/_api/activations/__init__.py @@ -0,0 +1,9 @@ +# This file was automatically generated by tools/build/create_api.py. +# Do not edit. +"""Activation functions.""" + +from tensorflow_mri.python.activations.complex_activations import complex_relu as complex_relu +from tensorflow_mri.python.activations.complex_activations import mod_relu as mod_relu +from tensorflow_mri.python.activations import serialize as serialize +from tensorflow_mri.python.activations import deserialize as deserialize +from tensorflow_mri.python.activations import get as get diff --git a/tensorflow_mri/_api/array/__init__.py b/tensorflow_mri/_api/array/__init__.py index 11b5bcf7..eedb6aae 100644 --- a/tensorflow_mri/_api/array/__init__.py +++ b/tensorflow_mri/_api/array/__init__.py @@ -2,4 +2,5 @@ # Do not edit. """Array processing operations.""" +from tensorflow_mri.python.ops.array_ops import dynamic_meshgrid as meshgrid from tensorflow_mri.python.ops.array_ops import update_tensor as update_tensor diff --git a/tensorflow_mri/_api/coils/__init__.py b/tensorflow_mri/_api/coils/__init__.py index 06f5eacb..300a3855 100644 --- a/tensorflow_mri/_api/coils/__init__.py +++ b/tensorflow_mri/_api/coils/__init__.py @@ -2,7 +2,8 @@ # Do not edit. """Parallel imaging operations.""" -from tensorflow_mri.python.ops.coil_ops import estimate_coil_sensitivities as estimate_sensitivities -from tensorflow_mri.python.ops.coil_ops import combine_coils as combine_coils -from tensorflow_mri.python.ops.coil_ops import compress_coils as compress_coils -from tensorflow_mri.python.ops.coil_ops import CoilCompressorSVD as CoilCompressorSVD +from tensorflow_mri.python.coils.coil_combination import combine_coils as combine_coils +from tensorflow_mri.python.coils.coil_compression import compress_coils as compress_coils +from tensorflow_mri.python.coils.coil_compression import CoilCompressorSVD as CoilCompressorSVD +from tensorflow_mri.python.coils.coil_sensitivities import estimate_sensitivities as estimate_sensitivities +from tensorflow_mri.python.coils.coil_sensitivities import estimate_sensitivities_universal as estimate_sensitivities_universal diff --git a/tensorflow_mri/_api/geometry/__init__.py b/tensorflow_mri/_api/geometry/__init__.py new file mode 100644 index 00000000..c7365abe --- /dev/null +++ b/tensorflow_mri/_api/geometry/__init__.py @@ -0,0 +1,5 @@ +# This file was automatically generated by tools/build/create_api.py. +# Do not edit. +"""Geometric operations.""" + +from tensorflow_mri.python.geometry.rotation_2d import Rotation2D as Rotation2D diff --git a/tensorflow_mri/_api/initializers/__init__.py b/tensorflow_mri/_api/initializers/__init__.py index a1513d99..8eb5ad07 100644 --- a/tensorflow_mri/_api/initializers/__init__.py +++ b/tensorflow_mri/_api/initializers/__init__.py @@ -9,3 +9,6 @@ from tensorflow_mri.python.initializers.initializers import HeUniform as HeUniform from tensorflow_mri.python.initializers.initializers import LecunNormal as LecunNormal from tensorflow_mri.python.initializers.initializers import LecunUniform as LecunUniform +from tensorflow_mri.python.initializers import serialize as serialize +from tensorflow_mri.python.initializers import deserialize as deserialize +from tensorflow_mri.python.initializers import get as get diff --git a/tensorflow_mri/_api/layers/__init__.py b/tensorflow_mri/_api/layers/__init__.py index 09740d52..793dd295 100644 --- a/tensorflow_mri/_api/layers/__init__.py +++ b/tensorflow_mri/_api/layers/__init__.py @@ -2,14 +2,17 @@ # Do not edit. """Keras layers.""" +from tensorflow_mri.python.layers.coil_sensitivities import CoilSensitivityEstimation2D as CoilSensitivityEstimation2D +from tensorflow_mri.python.layers.coil_sensitivities import CoilSensitivityEstimation3D as CoilSensitivityEstimation3D from tensorflow_mri.python.layers.convolutional import Conv1D as Conv1D from tensorflow_mri.python.layers.convolutional import Conv1D as Convolution1D from tensorflow_mri.python.layers.convolutional import Conv2D as Conv2D from tensorflow_mri.python.layers.convolutional import Conv2D as Convolution2D from tensorflow_mri.python.layers.convolutional import Conv3D as Conv3D from tensorflow_mri.python.layers.convolutional import Conv3D as Convolution3D -from tensorflow_mri.python.layers.conv_blocks import ConvBlock as ConvBlock -from tensorflow_mri.python.layers.conv_endec import UNet as UNet +from tensorflow_mri.python.layers.data_consistency import LeastSquaresGradientDescent2D as LeastSquaresGradientDescent2D +from tensorflow_mri.python.layers.data_consistency import LeastSquaresGradientDescent3D as LeastSquaresGradientDescent3D +from tensorflow_mri.python.layers.normalization import Normalized as Normalized from tensorflow_mri.python.layers.pooling import AveragePooling1D as AveragePooling1D from tensorflow_mri.python.layers.pooling import AveragePooling1D as AvgPool1D from tensorflow_mri.python.layers.pooling import AveragePooling2D as AveragePooling2D @@ -22,6 +25,11 @@ from tensorflow_mri.python.layers.pooling import MaxPooling2D as MaxPool2D from tensorflow_mri.python.layers.pooling import MaxPooling3D as MaxPooling3D from tensorflow_mri.python.layers.pooling import MaxPooling3D as MaxPool3D +from tensorflow_mri.python.layers.recon_adjoint import ReconAdjoint2D as ReconAdjoint2D +from tensorflow_mri.python.layers.recon_adjoint import ReconAdjoint3D as ReconAdjoint3D +from tensorflow_mri.python.layers.reshaping import UpSampling1D as UpSampling1D +from tensorflow_mri.python.layers.reshaping import UpSampling2D as UpSampling2D +from tensorflow_mri.python.layers.reshaping import UpSampling3D as UpSampling3D from tensorflow_mri.python.layers.signal_layers import DWT1D as DWT1D from tensorflow_mri.python.layers.signal_layers import DWT2D as DWT2D from tensorflow_mri.python.layers.signal_layers import DWT3D as DWT3D diff --git a/tensorflow_mri/_api/linalg/__init__.py b/tensorflow_mri/_api/linalg/__init__.py index b4fb6a80..eda23081 100644 --- a/tensorflow_mri/_api/linalg/__init__.py +++ b/tensorflow_mri/_api/linalg/__init__.py @@ -2,17 +2,17 @@ # Do not edit. """Linear algebra operations.""" -from tensorflow_mri.python.util.linalg_imaging import LinearOperator as LinearOperator -from tensorflow_mri.python.util.linalg_imaging import LinearOperatorAdjoint as LinearOperatorAdjoint -from tensorflow_mri.python.util.linalg_imaging import LinearOperatorComposition as LinearOperatorComposition -from tensorflow_mri.python.util.linalg_imaging import LinearOperatorAddition as LinearOperatorAddition -from tensorflow_mri.python.util.linalg_imaging import LinearOperatorScaledIdentity as LinearOperatorScaledIdentity -from tensorflow_mri.python.util.linalg_imaging import LinearOperatorDiag as LinearOperatorDiag -from tensorflow_mri.python.util.linalg_imaging import LinearOperatorGramMatrix as LinearOperatorGramMatrix -from tensorflow_mri.python.ops.linalg_ops import LinearOperatorNUFFT as LinearOperatorNUFFT -from tensorflow_mri.python.ops.linalg_ops import LinearOperatorGramNUFFT as LinearOperatorGramNUFFT -from tensorflow_mri.python.ops.linalg_ops import LinearOperatorFiniteDifference as LinearOperatorFiniteDifference -from tensorflow_mri.python.ops.linalg_ops import LinearOperatorWavelet as LinearOperatorWavelet -from tensorflow_mri.python.ops.linalg_ops import LinearOperatorMRI as LinearOperatorMRI -from tensorflow_mri.python.ops.linalg_ops import LinearOperatorGramMRI as LinearOperatorGramMRI -from tensorflow_mri.python.ops.linalg_ops import conjugate_gradient as conjugate_gradient +from tensorflow_mri.python.linalg.linear_operator import LinearOperator as LinearOperator +from tensorflow_mri.python.linalg.linear_operator import LinearOperatorAdjoint as LinearOperatorAdjoint +from tensorflow_mri.python.linalg.conjugate_gradient import conjugate_gradient as conjugate_gradient +from tensorflow_mri.python.linalg.linear_operator_addition import LinearOperatorAddition as LinearOperatorAddition +from tensorflow_mri.python.linalg.linear_operator_composition import LinearOperatorComposition as LinearOperatorComposition +from tensorflow_mri.python.linalg.linear_operator_diag import LinearOperatorDiag as LinearOperatorDiag +from tensorflow_mri.python.linalg.linear_operator_finite_difference import LinearOperatorFiniteDifference as LinearOperatorFiniteDifference +from tensorflow_mri.python.linalg.linear_operator_identity import LinearOperatorScaledIdentity as LinearOperatorScaledIdentity +from tensorflow_mri.python.linalg.linear_operator_gram_matrix import LinearOperatorGramMatrix as LinearOperatorGramMatrix +from tensorflow_mri.python.linalg.linear_operator_nufft import LinearOperatorNUFFT as LinearOperatorNUFFT +from tensorflow_mri.python.linalg.linear_operator_nufft import LinearOperatorGramNUFFT as LinearOperatorGramNUFFT +from tensorflow_mri.python.linalg.linear_operator_mri import LinearOperatorMRI as LinearOperatorMRI +from tensorflow_mri.python.linalg.linear_operator_mri import LinearOperatorGramMRI as LinearOperatorGramMRI +from tensorflow_mri.python.linalg.linear_operator_wavelet import LinearOperatorWavelet as LinearOperatorWavelet diff --git a/tensorflow_mri/_api/models/__init__.py b/tensorflow_mri/_api/models/__init__.py index b32ce647..1bbf6fe8 100644 --- a/tensorflow_mri/_api/models/__init__.py +++ b/tensorflow_mri/_api/models/__init__.py @@ -5,6 +5,12 @@ from tensorflow_mri.python.models.conv_blocks import ConvBlock1D as ConvBlock1D from tensorflow_mri.python.models.conv_blocks import ConvBlock2D as ConvBlock2D from tensorflow_mri.python.models.conv_blocks import ConvBlock3D as ConvBlock3D +from tensorflow_mri.python.models.conv_blocks import ConvBlockLSTM1D as ConvBlockLSTM1D +from tensorflow_mri.python.models.conv_blocks import ConvBlockLSTM2D as ConvBlockLSTM2D +from tensorflow_mri.python.models.conv_blocks import ConvBlockLSTM3D as ConvBlockLSTM3D from tensorflow_mri.python.models.conv_endec import UNet1D as UNet1D from tensorflow_mri.python.models.conv_endec import UNet2D as UNet2D from tensorflow_mri.python.models.conv_endec import UNet3D as UNet3D +from tensorflow_mri.python.models.conv_endec import UNetLSTM1D as UNetLSTM1D +from tensorflow_mri.python.models.conv_endec import UNetLSTM2D as UNetLSTM2D +from tensorflow_mri.python.models.conv_endec import UNetLSTM3D as UNetLSTM3D diff --git a/tensorflow_mri/_api/recon/__init__.py b/tensorflow_mri/_api/recon/__init__.py index 2bee140c..2178ba71 100644 --- a/tensorflow_mri/_api/recon/__init__.py +++ b/tensorflow_mri/_api/recon/__init__.py @@ -1,9 +1,10 @@ # This file was automatically generated by tools/build/create_api.py. # Do not edit. -"""Image reconstruction.""" +"""Signal reconstruction.""" -from tensorflow_mri.python.ops.recon_ops import reconstruct_adj as adjoint -from tensorflow_mri.python.ops.recon_ops import reconstruct_adj as adj +from tensorflow_mri.python.recon.recon_adjoint import recon_adjoint as adjoint_universal +from tensorflow_mri.python.recon.recon_adjoint import recon_adjoint_mri as adjoint +from tensorflow_mri.python.recon.recon_adjoint import recon_adjoint_mri as adj from tensorflow_mri.python.ops.recon_ops import reconstruct_lstsq as least_squares from tensorflow_mri.python.ops.recon_ops import reconstruct_lstsq as lstsq from tensorflow_mri.python.ops.recon_ops import reconstruct_sense as sense diff --git a/tensorflow_mri/_api/sampling/__init__.py b/tensorflow_mri/_api/sampling/__init__.py index b5f337c8..09cb474b 100644 --- a/tensorflow_mri/_api/sampling/__init__.py +++ b/tensorflow_mri/_api/sampling/__init__.py @@ -3,12 +3,16 @@ """k-space sampling operations.""" from tensorflow_mri.python.ops.traj_ops import density_grid as density_grid +from tensorflow_mri.python.ops.traj_ops import frequency_grid as frequency_grid from tensorflow_mri.python.ops.traj_ops import random_sampling_mask as random_mask +from tensorflow_mri.python.ops.traj_ops import center_mask as center_mask +from tensorflow_mri.python.ops.traj_ops import accel_mask as accel_mask from tensorflow_mri.python.ops.traj_ops import radial_trajectory as radial_trajectory from tensorflow_mri.python.ops.traj_ops import spiral_trajectory as spiral_trajectory from tensorflow_mri.python.ops.traj_ops import radial_density as radial_density from tensorflow_mri.python.ops.traj_ops import estimate_radial_density as estimate_radial_density from tensorflow_mri.python.ops.traj_ops import radial_waveform as radial_waveform +from tensorflow_mri.python.ops.traj_ops import spiral_waveform as spiral_waveform from tensorflow_mri.python.ops.traj_ops import estimate_density as estimate_density from tensorflow_mri.python.ops.traj_ops import flatten_trajectory as flatten_trajectory from tensorflow_mri.python.ops.traj_ops import flatten_density as flatten_density diff --git a/tensorflow_mri/_api/signal/__init__.py b/tensorflow_mri/_api/signal/__init__.py index b18f9761..b6f632a6 100644 --- a/tensorflow_mri/_api/signal/__init__.py +++ b/tensorflow_mri/_api/signal/__init__.py @@ -2,14 +2,6 @@ # Do not edit. """Signal processing operations.""" -from tensorflow_mri.python.ops.signal_ops import hann as hann -from tensorflow_mri.python.ops.signal_ops import hamming as hamming -from tensorflow_mri.python.ops.signal_ops import atanfilt as atanfilt -from tensorflow_mri.python.ops.signal_ops import filter_kspace as filter_kspace -from tensorflow_mri.python.ops.signal_ops import crop_kspace as crop_kspace -from tensorflow_mri.python.ops.fft_ops import fftn as fft -from tensorflow_mri.python.ops.fft_ops import ifftn as ifft -from tensorflow_nufft.python.ops.nufft_ops import nufft as nufft from tensorflow_mri.python.ops.wavelet_ops import dwt as dwt from tensorflow_mri.python.ops.wavelet_ops import idwt as idwt from tensorflow_mri.python.ops.wavelet_ops import wavedec as wavedec @@ -17,3 +9,13 @@ from tensorflow_mri.python.ops.wavelet_ops import dwt_max_level as max_wavelet_level from tensorflow_mri.python.ops.wavelet_ops import coeffs_to_tensor as wavelet_coeffs_to_tensor from tensorflow_mri.python.ops.wavelet_ops import tensor_to_coeffs as tensor_to_wavelet_coeffs +from tensorflow_mri.python.ops.fft_ops import fftn as fft +from tensorflow_mri.python.ops.fft_ops import ifftn as ifft +from tensorflow_nufft.python.ops.nufft_ops import nufft as nufft +from tensorflow_mri.python.ops.signal_ops import hann as hann +from tensorflow_mri.python.ops.signal_ops import hamming as hamming +from tensorflow_mri.python.ops.signal_ops import atanfilt as atanfilt +from tensorflow_mri.python.ops.signal_ops import rect as rect +from tensorflow_mri.python.ops.signal_ops import separable_window as separable_window +from tensorflow_mri.python.ops.signal_ops import filter_kspace as filter_kspace +from tensorflow_mri.python.ops.signal_ops import crop_kspace as crop_kspace diff --git a/tensorflow_mri/cc/kernels/fft_kernels.cc b/tensorflow_mri/cc/kernels/fft_kernels.cc new file mode 100644 index 00000000..fa1b9cf3 --- /dev/null +++ b/tensorflow_mri/cc/kernels/fft_kernels.cc @@ -0,0 +1,366 @@ +/* Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// This file is inspired by "tensorflow/tensorflow/core/kernels/fft_ops.cc", +// but CPU kernels have been modified to use the FFTW library. The original +// GPU kernels have been removed. + +#include "tensorflow/core/platform/errors.h" +#define EIGEN_USE_THREADS + +// See docs in ../ops/fft_ops.cc. + +#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/framework/types.h" +#include "tensorflow/core/lib/core/errors.h" +#include "tensorflow/core/platform/logging.h" +#include "tensorflow/core/platform/types.h" +#include "tensorflow/core/util/env_var.h" +#include "tensorflow/core/util/work_sharder.h" + +#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ + (defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM) +#include "tensorflow/core/platform/stream_executor.h" +#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM + +#include "tensorflow_mri/cc/third_party/fftw/fftw.h" + +namespace tensorflow { +namespace mri { + +class FFTBase : public OpKernel { + public: + explicit FFTBase(OpKernelConstruction* ctx) : OpKernel(ctx) {} + + void Compute(OpKernelContext* ctx) override { + const Tensor& in = ctx->input(0); + const TensorShape& input_shape = in.shape(); + const int fft_rank = Rank(); + OP_REQUIRES( + ctx, input_shape.dims() >= fft_rank, + errors::InvalidArgument("Input must have rank of at least ", fft_rank, + " but got: ", input_shape.DebugString())); + + Tensor* out; + TensorShape output_shape = input_shape; + uint64 fft_shape[3] = {0, 0, 0}; + + // In R2C or C2R mode, we use a second input to specify the FFT length + // instead of inferring it from the input shape. + if (IsReal()) { + const Tensor& fft_length = ctx->input(1); + OP_REQUIRES(ctx, + fft_length.shape().dims() == 1 && + fft_length.shape().dim_size(0) == fft_rank, + errors::InvalidArgument("fft_length must have shape [", + fft_rank, "]")); + + auto fft_length_as_vec = fft_length.vec(); + for (int i = 0; i < fft_rank; ++i) { + OP_REQUIRES(ctx, fft_length_as_vec(i) >= 0, + errors::InvalidArgument( + "fft_length[", i, + "] must >= 0, but got: ", fft_length_as_vec(i))); + fft_shape[i] = fft_length_as_vec(i); + // Each input dimension must have length of at least fft_shape[i]. For + // IRFFTs, the inner-most input dimension must have length of at least + // fft_shape[i] / 2 + 1. + bool inner_most = (i == fft_rank - 1); + uint64 min_input_dim_length = + !IsForward() && inner_most ? fft_shape[i] / 2 + 1 : fft_shape[i]; + auto input_index = input_shape.dims() - fft_rank + i; + OP_REQUIRES( + ctx, + // We pass through empty tensors, so special case them here. + input_shape.dim_size(input_index) == 0 || + input_shape.dim_size(input_index) >= min_input_dim_length, + errors::InvalidArgument( + "Input dimension ", input_index, + " must have length of at least ", min_input_dim_length, + " but got: ", input_shape.dim_size(input_index))); + uint64 dim = IsForward() && inner_most && fft_shape[i] != 0 + ? fft_shape[i] / 2 + 1 + : fft_shape[i]; + output_shape.set_dim(output_shape.dims() - fft_rank + i, dim); + } + } else { + for (int i = 0; i < fft_rank; ++i) { + fft_shape[i] = + output_shape.dim_size(output_shape.dims() - fft_rank + i); + } + } + + OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &out)); + + if (IsReal()) { + if (IsForward()) { + OP_REQUIRES( + ctx, + (in.dtype() == DT_FLOAT && out->dtype() == DT_COMPLEX64) || + (in.dtype() == DT_DOUBLE && out->dtype() == DT_COMPLEX128), + errors::InvalidArgument("Wrong types for forward real FFT: in=", + in.dtype(), " out=", out->dtype())); + } else { + OP_REQUIRES( + ctx, + (in.dtype() == DT_COMPLEX64 && out->dtype() == DT_FLOAT) || + (in.dtype() == DT_COMPLEX128 && out->dtype() == DT_DOUBLE), + errors::InvalidArgument("Wrong types for backward real FFT: in=", + in.dtype(), " out=", out->dtype())); + } + } else { + OP_REQUIRES( + ctx, + (in.dtype() == DT_COMPLEX64 && out->dtype() == DT_COMPLEX64) || + (in.dtype() == DT_COMPLEX128 && out->dtype() == DT_COMPLEX128), + errors::InvalidArgument("Wrong types for FFT: in=", in.dtype(), + " out=", out->dtype())); + } + + if (input_shape.num_elements() == 0) { + DCHECK_EQ(0, output_shape.num_elements()); + return; + } + + DoFFT(ctx, in, fft_shape, out); + } + + protected: + virtual int Rank() const = 0; + virtual bool IsForward() const = 0; + virtual bool IsReal() const = 0; + + // The function that actually computes the FFT. + virtual void DoFFT(OpKernelContext* ctx, const Tensor& in, uint64* fft_shape, + Tensor* out) = 0; +}; + +typedef Eigen::ThreadPoolDevice CPUDevice; + +template +class FFTCPU : public FFTBase { + public: + using FFTBase::FFTBase; + + protected: + static unsigned FftwPlanningRigor; + + int Rank() const override { return FFTRank; } + bool IsForward() const override { return Forward; } + bool IsReal() const override { return _Real; } + + void DoFFT(OpKernelContext* ctx, const Tensor& in, uint64* fft_shape, + Tensor* out) override { + // Create the axes (which are always trailing). + const auto axes = Eigen::ArrayXi::LinSpaced(FFTRank, 1, FFTRank); + auto device = ctx->eigen_device(); + + const bool is_complex128 = + in.dtype() == DT_COMPLEX128 || out->dtype() == DT_COMPLEX128; + + if (!IsReal()) { + if (is_complex128) { + DoComplexFFT(ctx, fft_shape, in, out); + } else { + DoComplexFFT(ctx, fft_shape, in, out); + } + } else { + OP_REQUIRES(ctx, false, + errors::Unimplemented("Real FFT is not implemented")); + } + } + + template + void DoComplexFFT(OpKernelContext* ctx, uint64* fft_shape, + const Tensor& in, Tensor* out) { + auto device = ctx->eigen_device(); + auto worker_threads = ctx->device()->tensorflow_cpu_worker_threads(); + auto num_threads = worker_threads->num_threads; + + const bool is_complex128 = + in.dtype() == DT_COMPLEX128 || out->dtype() == DT_COMPLEX128; + + if (is_complex128) { + DCHECK_EQ(in.dtype(), DT_COMPLEX128); + DCHECK_EQ(out->dtype(), DT_COMPLEX128); + } else { + DCHECK_EQ(in.dtype(), DT_COMPLEX64); + DCHECK_EQ(out->dtype(), DT_COMPLEX64); + } + + auto input = Tensor(in).flat_inner_dims, FFTRank + 1>(); + auto output = out->flat_inner_dims, FFTRank + 1>(); + + int dim_sizes[FFTRank]; + int input_distance = 1; + int output_distance = 1; + int num_points = 1; + for (int i = 0; i < FFTRank; ++i) { + dim_sizes[i] = fft_shape[i]; + num_points *= fft_shape[i]; + input_distance *= input.dimension(i + 1); + output_distance *= output.dimension(i + 1); + } + int batch_size = input.dimension(0); + + constexpr auto fft_sign = Forward ? FFTW_FORWARD : FFTW_BACKWARD; + auto fft_flags = FftwPlanningRigor; + + #pragma omp critical + { + static bool is_fftw_initialized = false; + if (!is_fftw_initialized) { + // Set up threading for FFTW. Should be done only once. + #ifdef _OPENMP + fftw::init_threads(); + fftw::plan_with_nthreads(num_threads); + #endif + is_fftw_initialized = true; + } + } + + fftw::plan fft_plan; + #pragma omp critical + { + fft_plan = fftw::plan_many_dft( + FFTRank, dim_sizes, batch_size, + reinterpret_cast*>(input.data()), + nullptr, 1, input_distance, + reinterpret_cast*>(output.data()), + nullptr, 1, output_distance, + fft_sign, fft_flags); + } + + fftw::execute(fft_plan); + + #pragma omp critical + { + fftw::destroy_plan(fft_plan); + } + + // Wait until all threads are done using FFTW, then clean up the FFTW state, + // which only needs to be done once. + #ifdef _OPENMP + #pragma omp barrier + #pragma omp critical + { + static bool is_fftw_finalized = false; + if (!is_fftw_finalized) { + fftw::cleanup_threads(); + is_fftw_finalized = true; + } + } + #endif // _OPENMP + + // FFT normalization. + if (fft_sign == FFTW_BACKWARD) { + output.device(device) = output / output.constant(num_points); + } + } +}; + +unsigned GetFftwPlanningRigor(const string& envvar, + const string& default_value) { + const char* str = getenv(envvar.c_str()); + if (str == nullptr || strcmp(str, "") == 0) { + // envvar is not set, use default value. + str = default_value.c_str(); + } + + if (strcmp(str, "estimate") == 0) { + return FFTW_ESTIMATE; + } else if (strcmp(str, "measure") == 0) { + return FFTW_MEASURE; + } else if (strcmp(str, "patient") == 0) { + return FFTW_PATIENT; + } else if (strcmp(str, "exhaustive") == 0) { + return FFTW_EXHAUSTIVE; + } else { + LOG(FATAL) << "Invalid value for environment variable " << envvar << ": " << str; + } +} + +template +unsigned FFTCPU::FftwPlanningRigor = GetFftwPlanningRigor( + "TFMRI_FFTW_PLANNING_RIGOR", "measure" +); + +// Environment variable `TFMRI_USE_CUSTOM_FFT` can be used to specify whether to +// use custom FFT kernels. +static bool InitModule() { + const char* use_fftw_string = std::getenv("TFMRI_USE_CUSTOM_FFT"); + bool use_fftw; + if (use_fftw_string == nullptr) { + // Default to using FFTW if environment variable is not set. + use_fftw = true; + } else { + // Parse the value of the environment variable. + std::string str(use_fftw_string); + // To lower-case. + std::transform(str.begin(), str.end(), str.begin(), + [](unsigned char c){ return std::tolower(c); }); + if (str == "y" || str == "yes" || str == "t" || str == "true" || + str == "on" || str == "1") { + use_fftw = true; + } else if (str == "n" || str == "no" || str == "f" || str == "false" || + str == "off" || str == "0") { + use_fftw = false; + } else { + LOG(FATAL) << "Invalid value for environment variable " + << "TFMRI_USE_CUSTOM_FFT: " << str; + } + } + if (use_fftw) { + // Register with priority 1 so that these kernels take precedence over the + // default Eigen implementation. Note that core TF registers the FFT GPU + // kernels with priority 1 too, so those still take precedence over these. + REGISTER_KERNEL_BUILDER(Name("FFT").Device(DEVICE_CPU).Priority(1), + FFTCPU); + REGISTER_KERNEL_BUILDER(Name("IFFT").Device(DEVICE_CPU).Priority(1), + FFTCPU); + REGISTER_KERNEL_BUILDER(Name("FFT2D").Device(DEVICE_CPU).Priority(1), + FFTCPU); + REGISTER_KERNEL_BUILDER(Name("IFFT2D").Device(DEVICE_CPU).Priority(1), + FFTCPU); + REGISTER_KERNEL_BUILDER(Name("FFT3D").Device(DEVICE_CPU).Priority(1), + FFTCPU); + REGISTER_KERNEL_BUILDER(Name("IFFT3D").Device(DEVICE_CPU).Priority(1), + FFTCPU); + } + return true; +} + +static bool module_initialized = InitModule(); + +} // namespace mri +} // namespace tensorflow diff --git a/tensorflow_mri/cc/kernels/traj_kernels.cc b/tensorflow_mri/cc/kernels/traj_kernels.cc index 5364fab3..339feda5 100644 --- a/tensorflow_mri/cc/kernels/traj_kernels.cc +++ b/tensorflow_mri/cc/kernels/traj_kernels.cc @@ -1,4 +1,4 @@ -/*Copyright 2021 University College London. All Rights Reserved. +/*Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ class SpiralWaveformOp : public OpKernel { public: explicit SpiralWaveformOp(OpKernelConstruction* ctx) : OpKernel(ctx) { - + string vd_type_str; OP_REQUIRES_OK(ctx, ctx->GetAttr("base_resolution", &base_resolution_)); @@ -64,7 +64,7 @@ class SpiralWaveformOp : public OpKernel { } void Compute(OpKernelContext* ctx) override { - + // Create a buffer tensor. TensorShape temp_waveform_shape({SWF_MAX_WAVEFORM_SIZE, 2}); Tensor temp_waveform; @@ -94,7 +94,7 @@ class SpiralWaveformOp : public OpKernel { ctx, result == 0, errors::Internal( "failed during `calculate_spiral_trajectory`")); - + Tensor waveform = temp_waveform.Slice(0, waveform_length); ctx->set_output(0, waveform); } diff --git a/tensorflow_mri/cc/ops/traj_ops.cc b/tensorflow_mri/cc/ops/traj_ops.cc index c39852d1..75b7131f 100644 --- a/tensorflow_mri/cc/ops/traj_ops.cc +++ b/tensorflow_mri/cc/ops/traj_ops.cc @@ -1,4 +1,4 @@ -/*Copyright 2021 University College London. All Rights Reserved. +/*Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -126,7 +126,7 @@ as follows: * A fixed-density portion between `vd_outer_cutoff` and 1.0, sampled at `vd_outer_density` times the Nyquist rate. -.. [1] Pipe, J.G. and Zwart, N.R. (2014), Spiral trajectory design: A flexible +1. Pipe, J.G. and Zwart, N.R. (2014), Spiral trajectory design: A flexible numerical algorithm and base analytical equations. Magn. Reson. Med, 71: 278-285. https://doi.org/10.1002/mrm.24675 diff --git a/tensorflow_mri/cc/third_party/fftw/fftw.h b/tensorflow_mri/cc/third_party/fftw/fftw.h new file mode 100644 index 00000000..af567379 --- /dev/null +++ b/tensorflow_mri/cc/third_party/fftw/fftw.h @@ -0,0 +1,215 @@ +/* Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_MRI_CC_THIRD_PARTY_FFTW_H_ +#define TENSORFLOW_MRI_CC_THIRD_PARTY_FFTW_H_ + +#include + + +namespace tensorflow { +namespace mri { +namespace fftw { + +template +inline int init_threads(); + +template<> +inline int init_threads() { + return fftwf_init_threads(); +} + +template<> +inline int init_threads() { + return fftw_init_threads(); +} + +template +inline void cleanup_threads(); + +template<> +inline void cleanup_threads() { + return fftwf_cleanup_threads(); +} + +template<> +inline void cleanup_threads() { + return fftw_cleanup_threads(); +} + +template +inline void plan_with_nthreads(int nthreads); + +template<> +inline void plan_with_nthreads(int nthreads) { + fftwf_plan_with_nthreads(nthreads); +} + +template<> +inline void plan_with_nthreads(int nthreads) { + fftw_plan_with_nthreads(nthreads); +} + +template +inline void make_planner_thread_safe(); + +template<> +inline void make_planner_thread_safe() { + fftwf_make_planner_thread_safe(); +} + +template<> +inline void make_planner_thread_safe() { + fftw_make_planner_thread_safe(); +} + +template +struct ComplexType; + +template<> +struct ComplexType { + using Type = fftwf_complex; +}; + +template<> +struct ComplexType { + using Type = fftw_complex; +}; + +template +using complex = typename ComplexType::Type; + +template +inline FloatType* alloc_real(size_t n); + +template<> +inline float* alloc_real(size_t n) { + return fftwf_alloc_real(n); +} + +template<> +inline double* alloc_real(size_t n) { + return fftw_alloc_real(n); +} + +template +inline typename ComplexType::Type* alloc_complex(size_t n); + +template<> +inline typename ComplexType::Type* alloc_complex(size_t n) { + return fftwf_alloc_complex(n); +} + +template<> +inline typename ComplexType::Type* alloc_complex(size_t n) { + return fftw_alloc_complex(n); +} + +template +inline void free(void* p); + +template<> +inline void free(void* p) { + fftwf_free(p); +} + +template<> +inline void free(void* p) { + fftw_free(p); +} + +template +struct PlanType; + +template<> +struct PlanType { + using Type = fftwf_plan; +}; + +template<> +struct PlanType { + using Type = fftw_plan; +}; + +template +using plan = typename PlanType::Type; + +template +inline typename PlanType::Type plan_many_dft( + int rank, const int *n, int howmany, + typename ComplexType::Type *in, const int *inembed, + int istride, int idist, + typename ComplexType::Type *out, const int *onembed, + int ostride, int odist, + int sign, unsigned flags); + +template<> +inline typename PlanType::Type plan_many_dft( + int rank, const int *n, int howmany, + ComplexType::Type *in, const int *inembed, + int istride, int idist, + ComplexType::Type *out, const int *onembed, + int ostride, int odist, + int sign, unsigned flags) { + return fftwf_plan_many_dft( + rank, n, howmany, + in, inembed, istride, idist, + out, onembed, ostride, odist, + sign, flags); +} + +template<> +inline typename PlanType::Type plan_many_dft( + int rank, const int *n, int howmany, + typename ComplexType::Type *in, const int *inembed, + int istride, int idist, + typename ComplexType::Type *out, const int *onembed, + int ostride, int odist, + int sign, unsigned flags) { + return fftw_plan_many_dft( + rank, n, howmany, + in, inembed, istride, idist, + out, onembed, ostride, odist, + sign, flags); +} + +template +inline void execute(typename PlanType::Type& plan); // NOLINT + +template<> +inline void execute(typename PlanType::Type& plan) { // NOLINT + fftwf_execute(plan); +} + +template<> +inline void execute(typename PlanType::Type& plan) { // NOLINT + fftw_execute(plan); +} + +template +inline void destroy_plan(typename PlanType::Type& plan); // NOLINT + +template<> +inline void destroy_plan(typename PlanType::Type& plan) { // NOLINT + fftwf_destroy_plan(plan); +} + +template<> +inline void destroy_plan(typename PlanType::Type& plan) { // NOLINT + fftw_destroy_plan(plan); +} + +} // namespace fftw +} // namespace mri +} // namespace tensorflow + +#endif // TENSORFLOW_MRI_CC_THIRD_PARTY_FFTW_H_ diff --git a/tensorflow_mri/python/__init__.py b/tensorflow_mri/python/__init__.py index a678124c..8bc1069e 100644 --- a/tensorflow_mri/python/__init__.py +++ b/tensorflow_mri/python/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,10 @@ # ============================================================================== "TFMRI Python code." +from tensorflow_mri.python import activations from tensorflow_mri.python import callbacks +from tensorflow_mri.python import coils +from tensorflow_mri.python import geometry from tensorflow_mri.python import initializers from tensorflow_mri.python import io from tensorflow_mri.python import layers @@ -22,5 +25,6 @@ from tensorflow_mri.python import metrics from tensorflow_mri.python import models from tensorflow_mri.python import ops +from tensorflow_mri.python import recon from tensorflow_mri.python import summary from tensorflow_mri.python import util diff --git a/tensorflow_mri/python/activations/__init__.py b/tensorflow_mri/python/activations/__init__.py new file mode 100644 index 00000000..f7c419e7 --- /dev/null +++ b/tensorflow_mri/python/activations/__init__.py @@ -0,0 +1,143 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Keras activations.""" + +import keras + +from tensorflow_mri.python.activations import complex_activations +from tensorflow_mri.python.util import api_util + + +TFMRI_ACTIVATIONS = { + 'complex_relu': complex_activations.complex_relu, + 'mod_relu': complex_activations.mod_relu +} + + +@api_util.export("activations.serialize") +def serialize(activation): + """Returns the string identifier of an activation function. + + ```{note} + This function is a drop-in replacement for `tf.keras.activations.serialize`. + ``` + + Example: + >>> tfmri.activations.serialize(tf.keras.activations.tanh) + 'tanh' + >>> tfmri.activations.serialize(tf.keras.activations.sigmoid) + 'sigmoid' + >>> tfmri.activations.serialize(tfmri.activations.complex_relu) + 'complex_relu' + >>> tfmri.activations.serialize('abcd') + Traceback (most recent call last): + ... + ValueError: ('Cannot serialize', 'abcd') + + Args: + activation: A function object. + + Returns: + A `str` denoting the name attribute of the input function. + + Raises: + ValueError: If the input function is not a valid one. + """ + return keras.activations.serialize(activation) + + +@api_util.export("activations.deserialize") +def deserialize(name, custom_objects=None): + """Returns activation function given a string identifier. + + ```{note} + This function is a drop-in replacement for + `tf.keras.activations.deserialize`. The only difference is that this function + has built-in knowledge of TFMRI activations. + ``` + + Example: + >>> tfmri.activations.deserialize('linear') + + >>> tfmri.activations.deserialize('sigmoid') + + >>> tfmri.activations.deserialize('complex_relu') + + >>> tfmri.activations.deserialize('abcd') + Traceback (most recent call last): + ... + ValueError: Unknown activation function:abcd + + Args: + name: The name of the activation function. + custom_objects: Optional `{function_name: function_obj}` + dictionary listing user-provided activation functions. + + Returns: + The corresponding activation function. + + Raises: + ValueError: If the input string does not denote any defined activation + function. + """ + custom_objects = {**TFMRI_ACTIVATIONS, **(custom_objects or {})} + return keras.activations.deserialize(name, custom_objects) + + +@api_util.export("activations.get") +def get(identifier): + """Retrieve a Keras activation by its identifier. + + ```{note} + This function is a drop-in replacement for + `tf.keras.activations.get`. The only difference is that this function + has built-in knowledge of TFMRI activations. + ``` + + Args: + identifier: A function or a string. + + Returns: + A function corresponding to the input string or input function. + + Example: + + >>> tfmri.activations.get('softmax') + + >>> tfmri.activations.get(tf.keras.activations.softmax) + + >>> tfmri.activations.get(None) + + >>> tfmri.activations.get(abs) + + >>> tfmri.activations.get('complex_relu') + + >>> tfmri.activations.get('abcd') + Traceback (most recent call last): + ... + ValueError: Unknown activation function:abcd + + Raises: + ValueError: If the input is an unknown function or string, i.e., the input + does not denote any defined function. + """ + if identifier is None: + return keras.activations.linear + if isinstance(identifier, (str, dict)): + return deserialize(identifier) + if callable(identifier): + return identifier + raise ValueError( + f'Could not interpret activation function identifier: {identifier}') diff --git a/tensorflow_mri/python/activations/complex_activations.py b/tensorflow_mri/python/activations/complex_activations.py new file mode 100644 index 00000000..e1ea921b --- /dev/null +++ b/tensorflow_mri/python/activations/complex_activations.py @@ -0,0 +1,145 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Complex-valued activations.""" + +import inspect + +import tensorflow as tf + +from tensorflow_mri.python.util import api_util + + +def complexified(name, type_='cartesian'): + """Returns a decorator to create complex-valued activations. + + Args: + name: A `str` denoting the name of the activation function. + type_: A `str` denoting the type of the complex-valued activation function. + Must be one of `'cartesian'` or `'polar'`. + + Returns: + A decorator to convert real-valued activations to complex-valued + activations. + """ + if type_ not in ('cartesian', 'polar'): + raise ValueError( + f"type_ must be one of 'cartesian' or 'polar', but got: {type_}") + def decorator(func): + def wrapper(x, *args, **kwargs): + x = tf.convert_to_tensor(x) + if x.dtype.is_complex: + if type_ == 'polar': + j = tf.dtypes.complex(tf.zeros((), dtype=x.dtype.real_dtype), + tf.ones((), dtype=x.dtype.real_dtype)) + return (tf.cast(func(tf.math.abs(x), *args, **kwargs), x.dtype) * + tf.math.exp(j * tf.cast(tf.math.angle(x), x.dtype))) + if type_ == 'cartesian': + return tf.dtypes.complex(func(tf.math.real(x), *args, **kwargs), + func(tf.math.imag(x), *args, **kwargs)) + return func(x, *args, **kwargs) + wrapper.__name__ = name + wrapper.__signature__ = inspect.signature(func) + return wrapper + return decorator + + + +complex_relu = api_util.export("activations.complex_relu")( + complexified(name='complex_relu', type_='cartesian')( + tf.keras.activations.relu)) +complex_relu.__doc__ = ( + """Applies the rectified linear unit activation function. + + With default values, this returns the standard ReLU activation: + `max(x, 0)`, the element-wise maximum of 0 and the input tensor. + + Modifying default parameters allows you to use non-zero thresholds, + change the max value of the activation, and to use a non-zero multiple of + the input for values below the threshold. + + If passed a complex-valued tensor, the ReLU activation is independently + applied to its real and imaginary parts, i.e., the function returns + `relu(real(x)) + 1j * relu(imag(x))`. + + ```{note} + This activation does not preserve the phase of complex inputs. + ``` + + If passed a real-valued tensor, this function falls back to the standard + `tf.keras.activations.relu`. + + Args: + x: The input `tf.Tensor`. Can be real or complex. + alpha: A `float` that governs the slope for values lower than the + threshold. + max_value: A `float` that sets the saturation threshold (the largest value + the function will return). + threshold: A `float` giving the threshold value of the activation function + below which values will be damped or set to zero. + + Returns: + A `tf.Tensor` of the same shape and dtype of input `x`. + + References: + 1. https://arxiv.org/abs/1705.09792 + """ +) + + +mod_relu = api_util.export("activations.mod_relu")( + complexified(name='mod_relu', type_='polar')( + tf.keras.activations.relu)) +mod_relu.__doc__ = ( + """Applies the rectified linear unit activation function. + + With default values, this returns the standard ReLU activation: + `max(x, 0)`, the element-wise maximum of 0 and the input tensor. + + Modifying default parameters allows you to use non-zero thresholds, + change the max value of the activation, and to use a non-zero multiple of + the input for values below the threshold. + + If passed a complex-valued tensor, the ReLU activation is applied to its + magnitude, i.e., the function returns `relu(abs(x)) * exp(1j * angle(x))`. + + ```{note} + This activation preserves the phase of complex inputs. + ``` + + ```{warning} + With default parameters, this activation is linear, since the magnitude + of the input is never negative. Usually you will want to set one or more + of the provided parameters to non-default values. + ``` + + If passed a real-valued tensor, this function falls back to the standard + `tf.keras.activations.relu`. + + Args: + x: The input `tf.Tensor`. Can be real or complex. + alpha: A `float` that governs the slope for values lower than the + threshold. + max_value: A `float` that sets the saturation threshold (the largest value + the function will return). + threshold: A `float` giving the threshold value of the activation function + below which values will be damped or set to zero. + + Returns: + A `tf.Tensor` of the same shape and dtype of input `x`. + + References: + 1. https://arxiv.org/abs/1705.09792 + """ +) diff --git a/tensorflow_mri/python/activations/complex_activations_test.py b/tensorflow_mri/python/activations/complex_activations_test.py new file mode 100644 index 00000000..1279d884 --- /dev/null +++ b/tensorflow_mri/python/activations/complex_activations_test.py @@ -0,0 +1,68 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `complex_activations`.""" + +import tensorflow as tf + +from tensorflow_mri.python import activations +from tensorflow_mri.python.activations import complex_activations +from tensorflow_mri.python.util import test_util + + +class ReluTest(test_util.TestCase): + """Tests for ReLU-derived activations.""" + # pylint: disable=missing-function-docstring + @test_util.run_all_execution_modes + def test_complex_relu(self): + inputs = [1.0 - 2.0j, 1.0 + 3.0j, -2.0 + 1.0j, -3.0 - 4.0j] + expected = [1.0 + 0.0j, 1.0 + 3.0j, 0.0 + 1.0j, 0.0 + 0.0j] + result = complex_activations.complex_relu(inputs) + self.assertAllClose(expected, result) + + @test_util.run_all_execution_modes + def test_mod_relu(self): + inputs = [1.0 - 2.0j, 1.0 + 3.0j, -2.0 + 1.0j, -3.0 - 4.0j] + expected = [0.0 + 0.0j, 1.0 + 3.0j, 0.0 + 0.0j, -3.0 - 4.0j] + result = complex_activations.mod_relu(inputs, threshold=3.0) + self.assertAllClose(expected, result) + + def test_serialization(self): + fn = activations.get('complex_relu') + self.assertEqual(complex_activations.complex_relu, fn) + + fn = activations.get('mod_relu') + self.assertEqual(complex_activations.mod_relu, fn) + + fn = activations.deserialize('complex_relu') + self.assertEqual(complex_activations.complex_relu, fn) + + fn = activations.deserialize('mod_relu') + self.assertEqual(complex_activations.mod_relu, fn) + + fn = activations.serialize(complex_activations.complex_relu) + self.assertEqual('complex_relu', fn) + + fn = activations.serialize(complex_activations.mod_relu) + self.assertEqual('mod_relu', fn) + + fn = activations.get(complex_activations.complex_relu) + self.assertEqual(complex_activations.complex_relu, fn) + + fn = activations.get(complex_activations.mod_relu) + self.assertEqual(complex_activations.mod_relu, fn) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/callbacks/__init__.py b/tensorflow_mri/python/callbacks/__init__.py index d77bc844..16291601 100644 --- a/tensorflow_mri/python/callbacks/__init__.py +++ b/tensorflow_mri/python/callbacks/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/callbacks/tensorboard_callbacks.py b/tensorflow_mri/python/callbacks/tensorboard_callbacks.py index 9de96d8d..a006fbb6 100644 --- a/tensorflow_mri/python/callbacks/tensorboard_callbacks.py +++ b/tensorflow_mri/python/callbacks/tensorboard_callbacks.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -53,7 +53,10 @@ class TensorBoardImages(tf.keras.callbacks.Callback): logs. Defaults to 1. max_images: Maximum number of images to be written at each step. Defaults to 3. - summary_name: Name for the image summaries. Defaults to `'val_images'`. + summary_name: Name for the image summaries. Defaults to `'val_images'`. Can + be a list of names if you wish to write multiple image summaries for each + example. In this case, you must also specify a list of display functions + in the `display_fn` parameter. volume_mode: Specifies how to save 3D images. Must be `None`, `'gif'` or an integer. If `None` (default), inputs are expected to be 2D images. In `'gif'` mode, each 3D volume is stored as an animated GIF. If an integer, @@ -63,7 +66,9 @@ class TensorBoardImages(tf.keras.callbacks.Callback): image to be written to TensorBoard. Overrides the default function, which concatenates selected features, labels and predictions according to `concat_axis`, `feature_keys`, `label_keys`, `prediction_keys` and - `complex_part`. + `complex_part`. Can be a list of callables if you wish to write multiple + image summaries for each example. In this case, you must also specify a + list of summary names in the `summary_name` parameter. concat_axis: An `int`. The axis along which to concatenate features/labels/predictions. Defaults to -2. feature_keys: A list of `str` or `int` specifying which features to @@ -105,6 +110,13 @@ def __init__(self, self.label_keys = label_keys self.prediction_keys = prediction_keys self.complex_part = complex_part + if not isinstance(self.summary_name, (list, tuple)): + self.summary_name = (self.summary_name,) + if not isinstance(self.display_fn, (list, tuple)): + self.display_fn = (self.display_fn,) + if len(self.summary_name) != len(self.display_fn): + raise ValueError( + "The number of summary names and display functions must be the same.") def on_epoch_end(self, epoch, logs=None): # pylint: disable=unused-argument """Called at the end of an epoch.""" @@ -122,7 +134,7 @@ def _write_image_summaries(self, step=0): image_dir = os.path.join(self.log_dir, 'image') self.file_writer = tf.summary.create_file_writer(image_dir) - images = [] + images = {k: [] for k in self.summary_name} # For each batch. for batch in self.x: @@ -140,29 +152,30 @@ def _write_image_summaries(self, step=0): y_pred = nest_util.unstack_nested_tensors(y_pred) # Create display images. - images.extend(list(map(self.display_fn, x, y, y_pred))) + for name, func in zip(self.summary_name, self.display_fn): + images[name].extend(list(map(func, x, y, y_pred))) # Check how many outputs we have processed. - if len(images) >= self.max_images: + if len(images[tuple(images.keys())[0]]) >= self.max_images: break - # Stack all the images. - images = tf.stack(images) + # Stack all the images. Converting to tensor is required to avoid unexpected + # casting (e.g., without it, a list of NumPy arrays of uint8 inputs returns + # an int32 tensor). + images = {k: tf.stack([tf.convert_to_tensor(image) for image in v]) + for k, v in images.items()} # Keep only selected slice, if requested. if isinstance(self.volume_mode, int): - images = images[:, self.volume_mode, ...] + images = {k: v[:, self.volume_mode, ...] for k, v in images.items()} # Write images. with self.file_writer.as_default(step=step): - if self.volume_mode == 'gif': - image_summary.gif(self.summary_name, - images, - max_outputs=self.max_images) - else: - tf.summary.image(self.summary_name, - images, - max_outputs=self.max_images) + for name, image in images.items(): + if self.volume_mode == 'gif': + image_summary.gif(name, image, max_outputs=self.max_images) + else: + tf.summary.image(name, image, max_outputs=self.max_images) # Close writer. self.file_writer.close() diff --git a/tensorflow_mri/python/callbacks/tensorboard_callbacks_test.py b/tensorflow_mri/python/callbacks/tensorboard_callbacks_test.py index 98c7aa43..f9cea818 100644 --- a/tensorflow_mri/python/callbacks/tensorboard_callbacks_test.py +++ b/tensorflow_mri/python/callbacks/tensorboard_callbacks_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/coils/__init__.py b/tensorflow_mri/python/coils/__init__.py new file mode 100644 index 00000000..c4c17921 --- /dev/null +++ b/tensorflow_mri/python/coils/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Operators for coil arrays.""" + +from tensorflow_mri.python.coils import coil_combination +from tensorflow_mri.python.coils import coil_compression +from tensorflow_mri.python.coils import coil_sensitivities diff --git a/tensorflow_mri/python/coils/coil_combination.py b/tensorflow_mri/python/coils/coil_combination.py new file mode 100644 index 00000000..f83aa7ea --- /dev/null +++ b/tensorflow_mri/python/coils/coil_combination.py @@ -0,0 +1,69 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Coil combination.""" + +import tensorflow as tf + +from tensorflow_mri.python.util import api_util + + +@api_util.export("coils.combine_coils") +def combine_coils(images, maps=None, coil_axis=-1, keepdims=False, name=None): + """Combines a multicoil image into a single-coil image. + + Supports sum of squares (when `maps` is `None`) and adaptive combination. + + Args: + images: A `tf.Tensor`. The input images. + maps: A `tf.Tensor`. The Wcoil sensitivity maps. This argument is optional. + If `maps` is provided, it must have the same shape and type as + `images`. In this case an adaptive coil combination is performed using + the specified maps. If `maps` is `None`, a simple estimate of `maps` + is used (ie, images are combined using the sum of squares method). + coil_axis: An `int`. The coil axis. Defaults to -1. + keepdims: A boolean. If `True`, retains the coil dimension with size 1. + name: A name for the operation. Defaults to "combine_coils". + + Returns: + A `tf.Tensor`. The combined images. + + References: + 1. Roemer, P.B., Edelstein, W.A., Hayes, C.E., Souza, S.P. and + Mueller, O.M. (1990), The NMR phased array. Magn Reson Med, 16: + 192-225. https://doi.org/10.1002/mrm.1910160203 + + 2. Bydder, M., Larkman, D. and Hajnal, J. (2002), Combination of signals + from array coils using image-based estimation of coil sensitivity + profiles. Magn. Reson. Med., 47: 539-548. + https://doi.org/10.1002/mrm.10092 + """ + with tf.name_scope(name or "combine_coils"): + images = tf.convert_to_tensor(images) + if maps is not None: + maps = tf.convert_to_tensor(maps) + + if maps is None: + combined = tf.math.sqrt( + tf.math.reduce_sum(images * tf.math.conj(images), + axis=coil_axis, keepdims=keepdims)) + + else: + combined = tf.math.divide_no_nan( + tf.math.reduce_sum(images * tf.math.conj(maps), + axis=coil_axis, keepdims=keepdims), + tf.math.reduce_sum(maps * tf.math.conj(maps), + axis=coil_axis, keepdims=keepdims)) + + return combined diff --git a/tensorflow_mri/python/coils/coil_combination_test.py b/tensorflow_mri/python/coils/coil_combination_test.py new file mode 100644 index 00000000..86fa3c91 --- /dev/null +++ b/tensorflow_mri/python/coils/coil_combination_test.py @@ -0,0 +1,78 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `coil_combination`.""" + +from absl.testing import parameterized +import tensorflow as tf + +from tensorflow_mri.python.coils import coil_combination +from tensorflow_mri.python.util import test_util + + +class CoilCombineTest(test_util.TestCase): + """Tests for coil combination op.""" + + @parameterized.product(coil_axis=[0, -1], + keepdims=[True, False]) + @test_util.run_in_graph_and_eager_modes + def test_sos(self, coil_axis, keepdims): # pylint: disable=missing-param-doc + """Test sum of squares combination.""" + + images = self._random_complex((20, 20, 8)) + + combined = coil_combination.combine_coils( + images, coil_axis=coil_axis, keepdims=keepdims) + + ref = tf.math.sqrt( + tf.math.reduce_sum(images * tf.math.conj(images), + axis=coil_axis, keepdims=keepdims)) + + self.assertAllEqual(combined.shape, ref.shape) + self.assertAllClose(combined, ref) + + + @parameterized.product(coil_axis=[0, -1], + keepdims=[True, False]) + @test_util.run_in_graph_and_eager_modes + def test_adaptive(self, coil_axis, keepdims): # pylint: disable=missing-param-doc + """Test adaptive combination.""" + + images = self._random_complex((20, 20, 8)) + maps = self._random_complex((20, 20, 8)) + + combined = coil_combination.combine_coils( + images, maps=maps, coil_axis=coil_axis, keepdims=keepdims) + + ref = tf.math.reduce_sum(images * tf.math.conj(maps), + axis=coil_axis, keepdims=keepdims) + + ref /= tf.math.reduce_sum(maps * tf.math.conj(maps), + axis=coil_axis, keepdims=keepdims) + + self.assertAllEqual(combined.shape, ref.shape) + self.assertAllClose(combined, ref) + + def setUp(self): + super().setUp() + tf.random.set_seed(0) + + def _random_complex(self, shape): + return tf.dtypes.complex( + tf.random.normal(shape), + tf.random.normal(shape)) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/coils/coil_compression.py b/tensorflow_mri/python/coils/coil_compression.py new file mode 100644 index 00000000..abe81cb5 --- /dev/null +++ b/tensorflow_mri/python/coils/coil_compression.py @@ -0,0 +1,284 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Coil compression.""" + +import abc + +import tensorflow as tf + +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import check_util + + +@api_util.export("coils.compress_coils") +def compress_coils(kspace, + coil_axis=-1, + out_coils=None, + method='svd', + **kwargs): + """Compresses a multicoil *k*-space/image array. + + This function estimates a coil compression matrix and uses it to compress + `kspace`. If you would like to reuse a coil compression matrix or need to + calibrate the compression using different data, use one of the compressor + classes instead. + + This function supports the following coil compression methods: + + - **SVD**: Based on direct singular-value decomposition (SVD) of *k*-space + data [1]_. This coil compression method supports Cartesian and + non-Cartesian data. This method is resilient to noise, but does not + achieve optimal compression if there are fully-sampled dimensions. + + + + Args: + kspace: A `Tensor`. The multi-coil *k*-space data. Must have type + `complex64` or `complex128`. Must have shape `[..., Cin]`, where `...` are + the encoding dimensions and `Cin` is the number of coils. Alternatively, + the position of the coil axis may be different as long as the `coil_axis` + argument is set accordingly. If `method` is `"svd"`, `kspace` can be + Cartesian or non-Cartesian. If `method` is `"geometric"` or `"espirit"`, + `kspace` must be Cartesian. + coil_axis: An `int`. Defaults to -1. + out_coils: An `int`. The desired number of virtual output coils. + method: A `string`. The coil compression algorithm. Must be `"svd"`. + **kwargs: Additional method-specific keyword arguments to be passed to the + coil compressor. + + Returns: + A `Tensor` containing the compressed *k*-space data. Has shape + `[..., Cout]`, where `Cout` is determined based on `out_coils` or + other inputs and `...` are the unmodified encoding dimensions. + + References: + 1. Huang, F., Vijayakumar, S., Li, Y., Hertel, S. and Duensing, G.R. + (2008). A software channel compression technique for faster + reconstruction with many channels. Magn Reson Imaging, 26(1): 133-141. + 2. Zhang, T., Pauly, J.M., Vasanawala, S.S. and Lustig, M. (2013), Coil + compression for accelerated imaging with Cartesian sampling. Magn + Reson Med, 69: 571-582. https://doi.org/10.1002/mrm.24267 + 3. Bahri, D., Uecker, M., & Lustig, M. (2013). ESPIRIT-based coil + compression for cartesian sampling. In Proceedings of the 21st + Annual Meeting of ISMRM, Salt Lake City, Utah, USA (Vol. 47). + """ + return make_coil_compressor(method, + coil_axis=coil_axis, + out_coils=out_coils, + **kwargs).fit_transform(kspace) + + +class CoilCompressor(): + """Base class for coil compressors. + + Args: + coil_axis: An `int`. The axis of the coil dimension. + out_coils: An `int`. The desired number of virtual output coils. + """ + def __init__(self, coil_axis=-1, out_coils=None): + self._coil_axis = coil_axis + self._out_coils = out_coils + + @abc.abstractmethod + def fit(self, kspace): + pass + + @abc.abstractmethod + def transform(self, kspace): + pass + + def fit_transform(self, kspace): + return self.fit(kspace).transform(kspace) + + +@api_util.export("coils.CoilCompressorSVD") +class CoilCompressorSVD(CoilCompressor): + """SVD-based coil compression. + + This class implements the SVD-based coil compression method [1]_. + + Use this class to compress multi-coil *k*-space data. The method `fit` must + be used first to calculate the coil compression matrix. The method `transform` + can then be used to compress *k*-space data. If the data to be used for + fitting is the same data to be transformed, you can also use the method + `fit_transform` to fit and transform the data in one step. + + Args: + coil_axis: An `int`. Defaults to -1. + out_coils: An `int`. The desired number of virtual output coils. Cannot be + used together with `variance_ratio`. + variance_ratio: A `float` between 0.0 and 1.0. The percentage of total + variance to be retained. The number of virtual coils is automatically + selected to retain at least this percentage of variance. Cannot be used + together with `out_coils`. + + References: + 1. Huang, F., Vijayakumar, S., Li, Y., Hertel, S. and Duensing, G.R. + (2008). A software channel compression technique for faster reconstruction + with many channels. Magn Reson Imaging, 26(1): 133-141. + """ + def __init__(self, coil_axis=-1, out_coils=None, variance_ratio=None): + if out_coils is not None and variance_ratio is not None: + raise ValueError("Cannot specify both `out_coils` and `variance_ratio`.") + super().__init__(coil_axis=coil_axis, out_coils=out_coils) + self._variance_ratio = variance_ratio + self._singular_values = None + self._explained_variance = None + self._explained_variance_ratio = None + + def fit(self, kspace): + """Fits the coil compression matrix. + + Args: + kspace: A `Tensor`. The multi-coil *k*-space data. Must have type + `complex64` or `complex128`. + + Returns: + The fitted `CoilCompressorSVD` object. + """ + kspace = tf.convert_to_tensor(kspace) + + # Move coil axis to innermost dimension if not already there. + kspace, _ = self._permute_coil_axis(kspace) + + # Flatten the encoding dimensions. + num_coils = tf.shape(kspace)[-1] + kspace = tf.reshape(kspace, [-1, num_coils]) + num_samples = tf.shape(kspace)[0] + + # Compute singular-value decomposition. + s, u, v = tf.linalg.svd(kspace) + + # Compresion matrix. + self._matrix = tf.cond(num_samples > num_coils, lambda: v, lambda: u) + + # Get variance. + self._singular_values = s + self._explained_variance = s ** 2 / tf.cast(num_samples - 1, s.dtype) + total_variance = tf.math.reduce_sum(self._explained_variance) + self._explained_variance_ratio = self._explained_variance / total_variance + + # Get output coils from variance ratio. + if self._variance_ratio is not None: + cum_variance = tf.math.cumsum(self._explained_variance_ratio, axis=0) + self._out_coils = tf.math.count_nonzero( + cum_variance <= self._variance_ratio) + + # Remove unnecessary virtual coils. + if self._out_coils is not None: + self._matrix = self._matrix[:, :self._out_coils] + + # If possible, set static number of output coils. + if isinstance(self._out_coils, int): + self._matrix = tf.ensure_shape(self._matrix, [None, self._out_coils]) + + return self + + def transform(self, kspace): + """Applies the coil compression matrix to the input *k*-space. + + Args: + kspace: A `Tensor`. The multi-coil *k*-space data. Must have type + `complex64` or `complex128`. + + Returns: + The transformed k-space. + """ + kspace = tf.convert_to_tensor(kspace) + kspace, inv_perm = self._permute_coil_axis(kspace) + + # Some info. + encoding_dimensions = tf.shape(kspace)[:-1] + num_coils = tf.shape(kspace)[-1] + out_coils = tf.shape(self._matrix)[-1] + + # Flatten the encoding dimensions. + kspace = tf.reshape(kspace, [-1, num_coils]) + + # Apply compression. + kspace = tf.linalg.matmul(kspace, self._matrix) + + # Restore data shape. + kspace = tf.reshape( + kspace, + tf.concat([encoding_dimensions, [out_coils]], 0)) + + if inv_perm is not None: + kspace = tf.transpose(kspace, inv_perm) + + return kspace + + def _permute_coil_axis(self, kspace): + """Permutes the coil axis to the last dimension. + + Args: + kspace: A `Tensor`. The multi-coil *k*-space data. + + Returns: + A tuple of the permuted k-space and the inverse permutation. + """ + if self._coil_axis != -1: + rank = kspace.shape.rank # Rank must be known statically. + canonical_coil_axis = ( + self._coil_axis + rank if self._coil_axis < 0 else self._coil_axis) + perm = ( + [ax for ax in range(rank) if not ax == canonical_coil_axis] + + [canonical_coil_axis]) + kspace = tf.transpose(kspace, perm) + inv_perm = tf.math.invert_permutation(perm) + return kspace, inv_perm + return kspace, None + + @property + def singular_values(self): + """The singular values associated with each virtual coil.""" + return self._singular_values + + @property + def explained_variance(self): + """The variance explained by each virtual coil.""" + return self._explained_variance + + @property + def explained_variance_ratio(self): + """The percentage of variance explained by each virtual coil.""" + return self._explained_variance_ratio + + +def make_coil_compressor(method, **kwargs): + """Creates a coil compressor based on the specified method. + + Args: + method: A `string`. The coil compression algorithm. Must be `"svd"`. + **kwargs: Additional method-specific keyword arguments to be passed to the + coil compressor. + + Returns: + A `CoilCompressor` object. + + Raises: + NotImplementedError: If the specified method is not implemented. + """ + method = check_util.validate_enum( + method, {'svd', 'geometric', 'espirit'}, name='method') + if method == 'svd': + return CoilCompressorSVD(**kwargs) + raise NotImplementedError(f"Method {method} not implemented.") diff --git a/tensorflow_mri/python/coils/coil_compression_test.py b/tensorflow_mri/python/coils/coil_compression_test.py new file mode 100644 index 00000000..9a2dd256 --- /dev/null +++ b/tensorflow_mri/python/coils/coil_compression_test.py @@ -0,0 +1,126 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `coil_compression`.""" + +import itertools + +import tensorflow as tf + +from tensorflow_mri.python.coils import coil_compression +from tensorflow_mri.python.util import io_util +from tensorflow_mri.python.util import test_util + + +class CoilCompressionTest(test_util.TestCase): + """Tests for coil compression op.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.data = io_util.read_hdf5('tests/data/coil_ops_data.h5') + + @test_util.run_in_graph_and_eager_modes + def test_coil_compression_svd(self): + """Test SVD coil compression.""" + kspace = self.data['cc/kspace'] + result = self.data['cc/result/svd'] + + cc_kspace = coil_compression.compress_coils(kspace) + + self.assertAllClose(cc_kspace, result, rtol=1e-2, atol=1e-2) + + @test_util.run_in_graph_and_eager_modes + def test_coil_compression_svd_two_step(self): + """Test SVD coil compression using two-step API.""" + kspace = self.data['cc/kspace'] + result = self.data['cc/result/svd'] + + compressor = coil_compression.CoilCompressorSVD(out_coils=16) + compressor = compressor.fit(kspace) + cc_kspace = compressor.transform(kspace) + self.assertAllClose(cc_kspace, result[..., :16], rtol=1e-2, atol=1e-2) + + @test_util.run_in_graph_and_eager_modes + def test_coil_compression_svd_transposed(self): + """Test SVD coil compression using two-step API.""" + kspace = self.data['cc/kspace'] + result = self.data['cc/result/svd'] + + kspace = tf.transpose(kspace, [2, 0, 1]) + cc_kspace = coil_compression.compress_coils(kspace, coil_axis=0) + cc_kspace = tf.transpose(cc_kspace, [1, 2, 0]) + + self.assertAllClose(cc_kspace, result, rtol=1e-2, atol=1e-2) + + @test_util.run_in_graph_and_eager_modes + def test_coil_compression_svd_basic(self): + """Test coil compression using SVD method with basic arrays.""" + shape = (20, 20, 8) + data = tf.dtypes.complex( + tf.random.stateless_normal(shape, [32, 43]), + tf.random.stateless_normal(shape, [321, 321])) + + params = { + 'out_coils': [None, 4], + 'variance_ratio': [None, 0.75]} + + values = itertools.product(*params.values()) + params = [dict(zip(params.keys(), v)) for v in values] + + for p in params: + with self.subTest(**p): + if p['out_coils'] is not None and p['variance_ratio'] is not None: + with self.assertRaisesRegex( + ValueError, + "Cannot specify both `out_coils` and `variance_ratio`"): + coil_compression.compress_coils(data, **p) + continue + + # Test op. + compressed_data = coil_compression.compress_coils(data, **p) + + # Flatten input data. + encoding_dims = tf.shape(data)[:-1] + input_coils = tf.shape(data)[-1] + data = tf.reshape(data, (-1, tf.shape(data)[-1])) + samples = tf.shape(data)[0] + + # Calculate compression matrix. + # This should be equivalent to TF line below. Not sure why + # not. Giving up. + # u, s, vh = np.linalg.svd(data, full_matrices=False) + # v = vh.T.conj() + s, u, v = tf.linalg.svd(data, full_matrices=False) + matrix = tf.cond(samples > input_coils, lambda v=v: v, lambda u=u: u) + + out_coils = input_coils + if p['variance_ratio'] and not p['out_coils']: + variance = s ** 2 / 399.0 + out_coils = tf.math.count_nonzero( + tf.math.cumsum(variance / tf.math.reduce_sum(variance), axis=0) <= + p['variance_ratio']) + if p['out_coils']: + out_coils = p['out_coils'] + matrix = matrix[:, :out_coils] + + ref_data = tf.matmul(data, matrix) + ref_data = tf.reshape( + ref_data, tf.concat([encoding_dims, [out_coils]], 0)) + + self.assertAllClose(compressed_data, ref_data) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/coils/coil_sensitivities.py b/tensorflow_mri/python/coils/coil_sensitivities.py new file mode 100644 index 00000000..89c0a753 --- /dev/null +++ b/tensorflow_mri/python/coils/coil_sensitivities.py @@ -0,0 +1,597 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Coil sensitivity estimation.""" + +import collections +import functools + +import numpy as np +import tensorflow as tf +import tensorflow.experimental.numpy as tnp + +from tensorflow_mri.python.ops import array_ops +from tensorflow_mri.python.ops import fft_ops +from tensorflow_mri.python.recon import recon_adjoint +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import check_util + + +@api_util.export("coils.estimate_sensitivities") +def estimate_sensitivities(input_, coil_axis=-1, method='walsh', **kwargs): + """Estimates coil sensitivity maps. + + This method supports 2D and 3D inputs. + + Args: + input_: A `Tensor`. Must have type `complex64` or `complex128`. Must have + shape `[height, width, coils]` for 2D inputs, or `[depth, height, + width, coils]` for 3D inputs. Alternatively, this function accepts a + transposed array by setting the `coil_axis` argument accordingly. Inputs + should be images if `method` is `'walsh'` or `'inati'`, and k-space data + if `method` is `'espirit'`. + coil_axis: An `int`. Defaults to -1. + method: A `string`. The coil sensitivity estimation algorithm. Must be one + of: `{'walsh', 'inati', 'espirit'}`. Defaults to `'walsh'`. + **kwargs: Additional keyword arguments for the coil sensitivity estimation + algorithm. See Notes. + + Returns: + A `Tensor`. Has the same type as `input_`. Has shape + `input_.shape + [num_maps]` if `method` is `'espirit'`, or shape + `input_.shape` otherwise. + + Notes: + + This function accepts the following method-specific keyword arguments: + + - For `method="walsh"`: + + - **filter_size**: An `int`. The size of the smoothing filter. + + - For `method="inati"`: + + - **filter_size**: An `int`. The size of the smoothing filter. + - **max_iter**: An `int`. The maximum number of iterations. + - **tol**: A `float`. The convergence tolerance. + + - For `method="espirit"`: + + - **calib_size**: An `int` or a list of `ints`. The size of the + calibration region. If `None`, this is set to `input_.shape[:-1]` (ie, + use full input for calibration). Defaults to 24. + - **kernel_size**: An `int` or a list of `ints`. The kernel size. Defaults + to 6. + - **num_maps**: An `int`. The number of output maps. Defaults to 2. + - **null_threshold**: A `float`. The threshold used to determine the size + of the null-space. Defaults to 0.02. + - **eigen_threshold**: A `float`. The threshold used to determine the + locations where coil sensitivity maps should be masked out. Defaults + to 0.95. + - **image_shape**: A `tf.TensorShape` or a list of `ints`. The shape of + the output maps. If `None`, this is set to `input_.shape`. Defaults to + `None`. + + References: + 1. Walsh, D.O., Gmitro, A.F. and Marcellin, M.W. (2000), Adaptive + reconstruction of phased array MR imagery. Magn. Reson. Med., 43: + 682-690. https://doi.org/10.1002/(SICI)1522-2594(200005)43:5<682::AID-MRM10>3.0.CO;2-G + 2. Inati, S.J., Hansen, M.S. and Kellman, P. (2014). A fast optimal + method for coil sensitivity estimation and adaptive coil combination for + complex images. Proceedings of the 2014 Joint Annual Meeting + ISMRM-ESMRMB. + 3. Uecker, M., Lai, P., Murphy, M.J., Virtue, P., Elad, M., Pauly, J.M., + Vasanawala, S.S. and Lustig, M. (2014), ESPIRiT—an eigenvalue approach + to autocalibrating parallel MRI: Where SENSE meets GRAPPA. Magn. Reson. + Med., 71: 990-1001. https://doi.org/10.1002/mrm.24751 + """ + # pylint: disable=missing-raises-doc + with tf.name_scope(kwargs.get("name", "estimate_sensitivities")): + input_ = tf.convert_to_tensor(input_) + tf.debugging.assert_rank_at_least(input_, 2, message=( + f"Argument `input_` must have rank of at least 2, but got shape: " + f"{input_.shape}")) + coil_axis = check_util.validate_type(coil_axis, int, name='coil_axis') + method = check_util.validate_enum( + method, {'walsh', 'inati', 'espirit'}, name='method') + + # Move coil axis to innermost dimension if not already there. + if coil_axis != -1: + rank = input_.shape.rank + canonical_coil_axis = coil_axis + rank if coil_axis < 0 else coil_axis + perm = ( + [ax for ax in range(rank) if not ax == canonical_coil_axis] + + [canonical_coil_axis]) + input_ = tf.transpose(input_, perm) + + if method == 'walsh': + maps = _estimate_walsh(input_, **kwargs) + elif method == 'inati': + maps = _estimate_inati(input_, **kwargs) + elif method == 'espirit': + maps = _estimate_espirit(input_, **kwargs) + else: + raise RuntimeError("This should never happen.") + + # If necessary, move coil axis back to its original location. + if coil_axis != -1: + inv_perm = tf.math.invert_permutation(perm) + if method == 'espirit': + # When using ESPIRiT method, output has an additional `maps` dimension. + inv_perm = tf.concat([inv_perm, [tf.shape(inv_perm)[0]]], 0) + maps = tf.transpose(maps, inv_perm) + + return maps + + +def _estimate_walsh(images, filter_size=5): + """Estimate coil sensitivity maps using Walsh's method. + + For the parameters, see `estimate`. + """ + rank = images.shape.rank - 1 + image_shape = tf.shape(images)[:-1] + num_coils = tf.shape(images)[-1] + + filter_size = check_util.validate_list( + filter_size, element_type=int, length=rank, name='filter_size') + + # Flatten all spatial dimensions into a single axis, so `images` has shape + # `[num_pixels, num_coils]`. + flat_images = tf.reshape(images, [-1, num_coils]) + + # Compute covariance matrix for each pixel; with shape + # `[num_pixels, num_coils, num_coils]`. + correlation_matrix = tf.math.multiply( + tf.reshape(flat_images, [-1, num_coils, 1]), + tf.math.conj(tf.reshape(flat_images, [-1, 1, num_coils]))) + + # Smooth the covariance tensor along the spatial dimensions. + correlation_matrix = tf.reshape( + correlation_matrix, tf.concat([image_shape, [-1]], 0)) + correlation_matrix = _apply_uniform_filter(correlation_matrix, filter_size) + correlation_matrix = tf.reshape(correlation_matrix, [-1] + [num_coils] * 2) + + # Get sensitivity maps as the dominant eigenvector. + _, eigenvectors = tf.linalg.eig(correlation_matrix) # pylint: disable=no-value-for-parameter + maps = eigenvectors[..., -1] + + # Restore spatial axes. + maps = tf.reshape(maps, tf.concat([image_shape, [num_coils]], 0)) + + return maps + + +def _estimate_inati(images, + filter_size=5, + max_iter=5, + tol=1e-3): + """Estimate coil sensitivity maps using Inati's fast method. + + For the parameters, see `estimate`. + """ + rank = images.shape.rank - 1 + spatial_axes = list(range(rank)) + coil_axis = -1 + + # Validate inputs. + filter_size = check_util.validate_list( + filter_size, element_type=int, length=rank, name='filter_size') + max_iter = check_util.validate_type(max_iter, int, name='max_iter') + tol = check_util.validate_type(tol, float, name='tol') + + d_sum = tf.math.reduce_sum(images, axis=spatial_axes, keepdims=True) + d_sum /= tf.norm(d_sum, axis=coil_axis, keepdims=True) + + r = tf.math.reduce_sum( + tf.math.conj(d_sum) * images, axis=coil_axis, keepdims=True) + + eps = tf.cast( + tnp.finfo(images.dtype).eps * tf.math.reduce_mean(tf.math.abs(images)), + images.dtype) + + State = collections.namedtuple('State', ['i', 'maps', 'r', 'd']) + + def _cond(i, state): + return tf.math.logical_and(i < max_iter, state.d >= tol) + + def _body(i, state): + prev_r = state.r + r = state.r + + r = tf.math.conj(r) + + maps = images * r + smooth_maps = _apply_uniform_filter(maps, filter_size) + d = smooth_maps * tf.math.conj(smooth_maps) + + # Sum over coils. + r = tf.math.reduce_sum(d, axis=coil_axis, keepdims=True) + + r = tf.math.sqrt(r) + r = tf.math.reciprocal(r + eps) + + maps = smooth_maps * r + + d = images * tf.math.conj(maps) + r = tf.math.reduce_sum(d, axis=coil_axis, keepdims=True) + + d = maps * r + + d_sum = tf.math.reduce_sum(d, axis=spatial_axes, keepdims=True) + d_sum /= tf.norm(d_sum, axis=coil_axis, keepdims=True) + + im_t = tf.math.reduce_sum( + tf.math.conj(d_sum) * maps, axis=coil_axis, keepdims=True) + im_t /= (tf.cast(tf.math.abs(im_t), images.dtype) + eps) + r *= im_t + im_t = tf.math.conj(im_t) + maps = maps * im_t + + diff_r = r - prev_r + d = tf.math.abs(tf.norm(diff_r) / tf.norm(r)) + + return i + 1, State(i=i + 1, maps=maps, r=r, d=d) + + i = tf.constant(0, dtype=tf.int32) + state = State(i=i, + maps=tf.zeros_like(images), + r=r, + d=tf.constant(1.0, dtype=images.dtype.real_dtype)) + [i, state] = tf.while_loop(_cond, _body, [i, state]) + + return tf.reshape(state.maps, images.shape) + + +def _estimate_espirit(kspace, + calib_size=24, + kernel_size=6, + num_maps=2, + null_threshold=0.02, + eigen_threshold=0.95, + image_shape=None): + """Estimate coil sensitivity maps using the ESPIRiT method. + + For the parameters, see `estimate`. + """ + kspace = tf.convert_to_tensor(kspace) + rank = kspace.shape.rank - 1 + spatial_axes = list(range(rank)) + num_coils = tf.shape(kspace)[-1] + if image_shape is None: + image_shape = kspace.shape[:-1] + if calib_size is None: + calib_size = image_shape.as_list() + + calib_size = check_util.validate_list( + calib_size, element_type=int, length=rank, name='calib_size') + kernel_size = check_util.validate_list( + kernel_size, element_type=int, length=rank, name='kernel_size') + + with tf.control_dependencies([ + tf.debugging.assert_greater(calib_size, kernel_size, message=( + f"`calib_size` must be greater than `kernel_size`, but got " + f"{calib_size} and {kernel_size}"))]): + kspace = tf.identity(kspace) + + # Get calibration region. + calib = array_ops.central_crop(kspace, calib_size + [-1]) + + # Construct the calibration block Hankel matrix. + conv_size = [cs - ks + 1 for cs, ks in zip(calib_size, kernel_size)] + calib_matrix = tf.zeros([_prod(conv_size), _prod(kernel_size) * num_coils], + dtype=calib.dtype) + idx = 0 + for nd_inds in np.ndindex(*conv_size): + slices = [slice(ii, ii + ks) for ii, ks in zip(nd_inds, kernel_size)] + calib_matrix = tf.tensor_scatter_nd_update( + calib_matrix, [[idx]], tf.reshape(calib[slices], [1, -1])) + idx += 1 + + # Compute SVD decomposition, threshold singular values and reshape V to create + # k-space kernel matrix. + s, _, v = tf.linalg.svd(calib_matrix, full_matrices=True) + num_values = tf.math.count_nonzero(s >= s[0] * null_threshold) + v = v[:, :num_values] + kernel = tf.reshape(v, kernel_size + [num_coils, -1]) + + # Rotate kernel to order by maximum variance. + perm = list(range(kernel.shape.rank)) + perm[-2], perm[-1] = perm[-1], perm[-2] + kernel = tf.transpose(kernel, perm) + kernel = tf.reshape(kernel, [-1, num_coils]) + _, _, rot_matrix = tf.linalg.svd(kernel, full_matrices=False) + kernel = tf.linalg.matmul(kernel, rot_matrix) + kernel = tf.reshape(kernel, kernel_size + [-1, num_coils]) + kernel = tf.transpose(kernel, perm) + + # Compute inverse FFT of k-space kernel. + kernel = tf.reverse(kernel, spatial_axes) + kernel = tf.math.conj(kernel) + + kernel_image = fft_ops.fftn(kernel, + shape=image_shape, + axes=list(range(rank)), + shift=True) + + kernel_image /= tf.cast(tf.sqrt(tf.cast(tf.math.reduce_prod(kernel_size), + kernel_image.dtype.real_dtype)), + kernel_image.dtype) + + values, maps, _ = tf.linalg.svd(kernel_image, full_matrices=False) + + # Apply phase modulation. + maps *= tf.math.exp(tf.complex(tf.constant(0.0, dtype=maps.dtype.real_dtype), + -tf.math.angle(maps[..., 0:1, :]))) + + # Undo rotation. + maps = tf.linalg.matmul(rot_matrix, maps) + + # Keep only the requested number of maps. + values = values[..., :num_maps] + maps = maps[..., :num_maps] + + # Apply thresholding. + mask = tf.expand_dims(values >= eigen_threshold, -2) + maps *= tf.cast(mask, maps.dtype) + + # If possible, set static number of maps. + if isinstance(num_maps, int): + maps_shape = maps.shape.as_list() + maps_shape[-1] = num_maps + maps = tf.ensure_shape(maps, maps_shape) + + return maps + + +def _apply_uniform_filter(tensor, size=5): + """Apply a uniform filter. + + Args: + tensor: A `Tensor`. Must have shape `spatial_shape + [channels]`. + size: An `int`. The size of the filter. Defaults to 5. + + Returns: + A `Tensor`. Has the same type as `tensor`. + """ + rank = tensor.shape.rank - 1 + + # Compute filters. + if isinstance(size, int): + size = [size] * rank + filters_shape = size + [1, 1] + filters = tf.ones(filters_shape, dtype=tensor.dtype.real_dtype) + filters /= _prod(size) + + # Select appropriate convolution function. + conv_nd = { + 1: tf.nn.conv1d, + 2: tf.nn.conv2d, + 3: tf.nn.conv3d}[rank] + + # Move channels dimension to batch dimension. + tensor = tf.transpose(tensor) + + # Add a channels dimension, as required by `tf.nn.conv*` functions. + tensor = tf.expand_dims(tensor, -1) + + if tensor.dtype.is_complex: + # For complex input, we filter the real and imaginary parts separately. + tensor_real = tf.math.real(tensor) + tensor_imag = tf.math.imag(tensor) + + output_real = conv_nd(tensor_real, filters, [1] * (rank + 2), 'SAME') + output_imag = conv_nd(tensor_imag, filters, [1] * (rank + 2), 'SAME') + + output = tf.dtypes.complex(output_real, output_imag) + else: + output = conv_nd(tensor, filters, [1] * (rank + 2), 'SAME') + + # Remove channels dimension. + output = output[..., 0] + + # Move channels dimension back to last dimension. + output = tf.transpose(output) + + return output + + +@api_util.export("coils.estimate_sensitivities_universal") +def estimate_sensitivities_universal( + data, + operator, + calib_data=None, + calib_fn=None, + algorithm='walsh', + **kwargs): + """Estimates coil sensitivities (universal). + + This function is designed to standardize the computation of coil + sensitivities in different contexts. The `data` argument can accept + arbitrary measurement data (e.g., N-dimensional, Cartesian/non-Cartesian + *k*-space tensors). In addition, this function expects a linear `operator` + which describes the action of the measurement system (e.g., the MR imaging + experiment). + + This function also accepts an optional `calib_data` tensor or an optional + `calib_fn` function, in case the calibration should be performed with data + other than `data`. `calib_data` may be used to provide the calibration + data directly, whereas `calib_fn` may be used to specify the rules to extract + it from `data`. + + ```{note} + This function is part of the family of + [universal operators](https://mrphys.github.io/tensorflow-mri/guide/universal/), + a set of functions and classes designed to work flexibly with any linear + system. + ``` + + Example: + >>> # Create an example image. + >>> image_shape = [256, 256] + >>> image = tfmri.image.phantom(shape=image_shape, + ... num_coils=8, + ... dtype=tf.complex64) + >>> kspace = tfmri.signal.fft(image, axes=[-2, -1], shift=True) + >>> # Create an acceleration mask with 4x undersampling along the last axis + >>> # and 24 calibration lines. + >>> mask = tfmri.sampling.accel_mask(shape=image_shape, + ... acceleration=[1, 4], + ... center_size=[256, 24]) + >>> # Create a linear operator describing a basic MR experiment with + >>> # Cartesian undersampling. This operator maps an image to the + >>> # corresponding *k*-space data (by performing an FFT and masking the + >>> # measured values). + >>> linop_mri = tfmri.linalg.LinearOperatorMRI( + ... image_shape=image_shape, mask=mask) + >>> # Generate *k*-space data using the system operator. + >>> kspace = linop_mri.transform(image) + >>> # To compute the sensitivity maps, we typically want to use only the + >>> # fully-sampled central region of *k*-space. Let's create a mask that + >>> # retrieves only the 24 calibration lines. + >>> calib_mask = tfmri.sampling.center_mask(shape=image_shape, + ... center_size=[256, 24]) + >>> # We can create a function that extracts the calibration data from + >>> # an arbitrary *k*-space by applying the calibration mask below. + >>> def calib_fn(data, operator): + ... # Returns `data` where `calib_mask` is `True`, 0 otherwise. + ... return tf.where(calib_mask, data, tf.zeros_like(data)) + >>> # Finally, compute the coil sensitivities using the above function + >>> # to extract the calibration data. + >>> maps = tfmri.coils.estimate_sensitivities_universal( + ... kspace, linop_mri, calib_fn=calib_fn) + + Args: + data: A `tf.Tensor` containing the measurement or observation data. + Must be compatible with the range of `operator`, i.e., it should be a + plausible output of the system operator. Accordingly, it should be a + plausible input for the adjoint of the system operator. + ```{tip} + In MRI, this is usually the *k*-space data. + ``` + operator: A `tfmri.linalg.LinearOperator` describing the action of the + measurement system. `operator` maps the causal factors to the measurement + or observation data. Its range must be compatible with `data`. + ```{tip} + In MRI, this is usually an operator mapping images to the corresponding + *k*-space data. For most MRI experiments, you can use + `tfmri.linalg.LinearOperatorMRI`. + ``` + calib_data: A `tf.Tensor` containing the calibration data. Must be + compatible with `operator`. If `None`, the calibration data will be + extracted from the `data` tensor using the `calib_fn` function. + ```{tip} + In MRI, this is usually the central, fully-sampled region of *k*-space. + ``` + calib_fn: A callable which returns the calibration data given the input + `data` and `operator`. Must have signature + `calib_fn(data: tf.Tensor, operator: tfmri.linalg.LinearOperator) -> tf.Tensor`. + If `None`, `calib_data` will be used for calibration. If `calib_data` is + also `None`, `data` will be used directly for calibration. + algorithm: A `str` or a callable specifying the coil sensitivity estimation + algorithm. Must be one of the following: + - A `str` to use one of the default algorithms, which are: + - `'direct'`: Uses images extracted from calibration data directly as + coil sensitivities. + - `'walsh'`: Implements the algorithm described in Walsh et al. [1]. + - `'inati'`: Implements the algorithm described in Inati et al. [2]. + - `'espirit'`: Implements the algorithm described in Uecker et al. [3]. + - A callable which returns the coil sensitivity maps given + `calib_data` and `operator`. Must have signature + `algorithm(calib_data: tf.Tensor, operator: tfmri.linalg.LinearOperator, **kwargs) -> tf.Tensor`, + i.e., it should accept the calibration data and return the coil + sensitivity maps. + Defaults to `'walsh'`. + **kwargs: Additional keyword arguments to be passed to the coil sensitivity + estimation algorithm. For a list of arguments available for the default + algorithms, see `tfmri.coils.estimate_sensitivites`. + + Returns: + A `tf.Tensor` of shape `[..., coils, *spatial_dims]` containing the coil + sensitivities. + + Raises: + ValueError: If both `calib_data` and `calib_fn` are provided. + + References: + 1. Walsh, D.O., Gmitro, A.F. and Marcellin, M.W. (2000), Adaptive + reconstruction of phased array MR imagery. Magn. Reson. Med., 43: + 682-690. https://doi.org/10.1002/(SICI)1522-2594(200005)43:5<682::AID-MRM10>3.0.CO;2-G + 2. Inati, S.J., Hansen, M.S. and Kellman, P. (2014). A fast optimal + method for coil sensitivity estimation and adaptive coil combination for + complex images. Proceedings of the 2014 Joint Annual Meeting + ISMRM-ESMRMB. + 3. Uecker, M., Lai, P., Murphy, M.J., Virtue, P., Elad, M., Pauly, J.M., + Vasanawala, S.S. and Lustig, M. (2014), ESPIRiT—an eigenvalue approach + to autocalibrating parallel MRI: Where SENSE meets GRAPPA. Magn. Reson. + Med., 71: 990-1001. https://doi.org/10.1002/mrm.24751 + """ # pylint: disable=line-too-long + with tf.name_scope(kwargs.get('name', 'estimate_sensitivities_universal')): + rank = operator.rank + data = tf.convert_to_tensor(data) + + if calib_data is None and calib_fn is None: + calib_data = data + elif calib_data is None and calib_fn is not None: + calib_data = calib_fn(data, operator) + elif calib_data is not None and calib_fn is None: + calib_data = tf.convert_to_tensor(calib_data) + else: + raise ValueError( + "Only one of `calib_data` and `calib_fn` may be specified.") + + if callable(algorithm): + # Using a custom algorithm. + return algorithm(calib_data, operator, **kwargs) + + # Reconstruct image. + calib_data = recon_adjoint.recon_adjoint(calib_data, operator) + + # If method is `'direct'`, we simply return the reconstructed calibration + # data. + if algorithm == 'direct': + return calib_data + + # ESPIRiT method takes in k-space data, so convert back to k-space in this + # case. + if algorithm == 'espirit': + axes = list(range(-rank, 0)) + calib_data = fft_ops.fftn(calib_data, axes=axes, norm='ortho', shift=True) + + # Reshape to single batch dimension. + batch_shape_static = calib_data.shape[:-(rank + 1)] + batch_shape = tf.shape(calib_data)[:-(rank + 1)] + calib_shape = tf.shape(calib_data)[-(rank + 1):] + calib_data = tf.reshape(calib_data, tf.concat([[-1], calib_shape], 0)) + + # Apply estimation for each element in batch. + maps = tf.map_fn( + functools.partial(estimate_sensitivities, + coil_axis=-(rank + 1), + method=algorithm, + **kwargs), + calib_data) + + # Restore batch shape. + output_shape = tf.shape(maps)[1:] + output_shape_static = maps.shape[1:] + maps = tf.reshape(maps, + tf.concat([batch_shape, output_shape], 0)) + maps = tf.ensure_shape( + maps, batch_shape_static.concatenate(output_shape_static)) + + return maps + + +_prod = lambda iterable: functools.reduce(lambda x, y: x * y, iterable) diff --git a/tensorflow_mri/python/coils/coil_sensitivities_test.py b/tensorflow_mri/python/coils/coil_sensitivities_test.py new file mode 100644 index 00000000..89a0382e --- /dev/null +++ b/tensorflow_mri/python/coils/coil_sensitivities_test.py @@ -0,0 +1,153 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `coil_sensitivities`.""" + +import tensorflow as tf + +from tensorflow_mri.python.coils import coil_sensitivities +from tensorflow_mri.python.linalg import linear_operator_mri +from tensorflow_mri.python.ops import fft_ops +from tensorflow_mri.python.ops import image_ops +from tensorflow_mri.python.ops import traj_ops +from tensorflow_mri.python.util import io_util +from tensorflow_mri.python.util import test_util + + +class EstimateTest(test_util.TestCase): + """Tests for ops related to estimation of coil sensitivity maps.""" + @classmethod + def setUpClass(cls): + + super().setUpClass() + cls.data = io_util.read_hdf5('tests/data/coil_ops_data.h5') + + @test_util.run_in_graph_and_eager_modes + def test_walsh(self): + """Test Walsh's method.""" + # GPU results are close, but about 1-2% of values show deviations up to + # 1e-3. This is probably related to TF issue: + # https://github.com/tensorflow/tensorflow/issues/45756 + # In the meantime, we run these tests on the CPU only. Same applies to all + # other tests in this class. + with tf.device('/cpu:0'): + maps = coil_sensitivities.estimate_sensitivities( + self.data['images'], method='walsh') + + self.assertAllClose(maps, self.data['maps/walsh'], rtol=1e-2, atol=1e-2) + + @test_util.run_in_graph_and_eager_modes + def test_walsh_transposed(self): + """Test Walsh's method with a transposed array.""" + with tf.device('/cpu:0'): + maps = coil_sensitivities.estimate_sensitivities( + tf.transpose(self.data['images'], [2, 0, 1]), + coil_axis=0, method='walsh') + + self.assertAllClose(maps, tf.transpose(self.data['maps/walsh'], [2, 0, 1]), + rtol=1e-2, atol=1e-2) + + @test_util.run_in_graph_and_eager_modes + def test_inati(self): + """Test Inati's method.""" + with tf.device('/cpu:0'): + maps = coil_sensitivities.estimate_sensitivities( + self.data['images'], method='inati') + + self.assertAllClose(maps, self.data['maps/inati'], rtol=1e-4, atol=1e-4) + + @test_util.run_in_graph_and_eager_modes + def test_espirit(self): + """Test ESPIRiT method.""" + with tf.device('/cpu:0'): + maps = coil_sensitivities.estimate_sensitivities( + self.data['kspace'], method='espirit') + + self.assertAllClose(maps, self.data['maps/espirit'], rtol=1e-2, atol=1e-2) + + @test_util.run_in_graph_and_eager_modes + def test_espirit_transposed(self): + """Test ESPIRiT method with a transposed array.""" + with tf.device('/cpu:0'): + maps = coil_sensitivities.estimate_sensitivities( + tf.transpose(self.data['kspace'], [2, 0, 1]), + coil_axis=0, method='espirit') + + self.assertAllClose( + maps, tf.transpose(self.data['maps/espirit'], [2, 0, 1, 3]), + rtol=1e-2, atol=1e-2) + + @test_util.run_in_graph_and_eager_modes + def test_walsh_3d(self): + """Test Walsh method with 3D image.""" + with tf.device('/cpu:0'): + image = image_ops.phantom(shape=[64, 64, 64], num_coils=4) + # Currently only testing if it runs. + maps = coil_sensitivities.estimate_sensitivities(image, # pylint: disable=unused-variable + coil_axis=0, + method='walsh') + + +class EstimateUniversalTest(test_util.TestCase): + """Tests for `estimate_sensitivities_universal`.""" + def test_estimate_sensitivities_universal(self): + """Test `estimate_sensitivities_universal`.""" + image_shape = [128, 128] + image = image_ops.phantom(shape=image_shape, num_coils=4, + dtype=tf.complex64) + kspace = fft_ops.fftn(image, axes=[-2, -1], shift=True) + mask = traj_ops.accel_mask(image_shape, [2, 2], [32, 32]) + kspace = tf.where(mask, kspace, tf.zeros_like(kspace)) + + operator = linear_operator_mri.LinearOperatorMRI( + image_shape=image_shape, mask=mask) + + # Test with direct *k*-space. + image = fft_ops.ifftn(kspace, axes=[-2, -1], norm='ortho', shift=True) + maps = coil_sensitivities.estimate_sensitivities_universal( + kspace, operator, method='direct') + self.assertAllClose(image, maps) + + # Test with calibration data. + calib_mask = traj_ops.center_mask(image_shape, [32, 32]) + calib_data = tf.where(calib_mask, kspace, tf.zeros_like(kspace)) + calib_image = fft_ops.ifftn( + calib_data, axes=[-2, -1], norm='ortho', shift=True) + maps = coil_sensitivities.estimate_sensitivities_universal( + kspace, operator, calib_data=calib_data, method='direct') + self.assertAllClose(calib_image, maps) + + # Test with calibration function. + calib_fn = lambda x, _: tf.where(calib_mask, x, tf.zeros_like(x)) + maps = coil_sensitivities.estimate_sensitivities_universal( + kspace, operator, calib_fn=calib_fn, method='direct') + self.assertAllClose(calib_image, maps) + + # Test Walsh. + expected = coil_sensitivities.estimate_sensitivities( + calib_image, coil_axis=-3, method='walsh') + maps = coil_sensitivities.estimate_sensitivities_universal( + kspace, operator, calib_data=calib_data, method='walsh') + self.assertAllClose(expected, maps) + + # Test batch. + kspace_batch = tf.stack([kspace, 2 * kspace], axis=0) + expected = tf.stack([calib_image, 2 * calib_image], axis=0) + maps = coil_sensitivities.estimate_sensitivities_universal( + kspace_batch, operator, calib_fn=calib_fn, method='direct') + self.assertAllClose(expected, maps) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/experimental/__init__.py b/tensorflow_mri/python/experimental/__init__.py index 9ed687ab..c49d30fa 100644 --- a/tensorflow_mri/python/experimental/__init__.py +++ b/tensorflow_mri/python/experimental/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/experimental/layers.py b/tensorflow_mri/python/experimental/layers.py index e0943fd9..368ef2f3 100644 --- a/tensorflow_mri/python/experimental/layers.py +++ b/tensorflow_mri/python/experimental/layers.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/geometry/__init__.py b/tensorflow_mri/python/geometry/__init__.py new file mode 100644 index 00000000..29dd1576 --- /dev/null +++ b/tensorflow_mri/python/geometry/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Geometric operations.""" + +from tensorflow_mri.python.geometry import rotation_2d +from tensorflow_mri.python.geometry import rotation_3d diff --git a/tensorflow_mri/python/geometry/rotation/__init__.py b/tensorflow_mri/python/geometry/rotation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tensorflow_mri/python/geometry/rotation/euler_2d.py b/tensorflow_mri/python/geometry/rotation/euler_2d.py new file mode 100644 index 00000000..fa7851ba --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation/euler_2d.py @@ -0,0 +1,54 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""2D angles.""" + +import tensorflow as tf + + +def from_matrix(matrix): + """Converts a 2D rotation matrix to an angle. + + Args: + matrix: A `tf.Tensor` of shape `[..., 2, 2]`. + + Returns: + A `tf.Tensor` of shape `[..., 1]`. + + Raises: + ValueError: If the shape of `matrix` is invalid. + """ + matrix = tf.convert_to_tensor(matrix) + + if matrix.shape[-1] != 2 or matrix.shape[-2] != 2: + raise ValueError( + f"matrix must have shape `[..., 2, 2]`, but got: {matrix.shape}") + + angle = tf.math.atan2(matrix[..., 1, 0], matrix[..., 0, 0]) + return tf.expand_dims(angle, axis=-1) diff --git a/tensorflow_mri/python/geometry/rotation/quaternion.py b/tensorflow_mri/python/geometry/rotation/quaternion.py new file mode 100644 index 00000000..5287710e --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation/quaternion.py @@ -0,0 +1,141 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Quaternions.""" + +import tensorflow as tf + + +def from_euler(angles): + """Converts Euler angles to a quaternion. + + Args: + angles: A `tf.Tensor` of shape `[..., 3]`. + + Returns: + A `tf.Tensor` of shape `[..., 4]`. + + Raises: + ValueError: If the shape of `angles` is invalid. + """ + angles = tf.convert_to_tensor(angles) + + if angles.shape[-1] != 3: + raise ValueError(f"angles must have shape `[..., 3]`, " + f"but got: {angles.shape}") + + half_angles = angles / 2.0 + cos_half_angles = tf.math.cos(half_angles) + sin_half_angles = tf.math.sin(half_angles) + return _build_quaternion_from_sines_and_cosines(sin_half_angles, + cos_half_angles) + + +def from_small_euler(angles): + """Converts small Euler angles to a quaternion. + + Args: + angles: A `tf.Tensor` of shape `[..., 3]`. + + Returns: + A `tf.Tensor` of shape `[..., 4]`. + + Raises: + ValueError: If the shape of `angles` is invalid. + """ + angles = tf.convert_to_tensor(angles) + + if angles.shape[-1] != 3: + raise ValueError(f"angles must have shape `[..., 3]`, " + f"but got: {angles.shape}") + + half_angles = angles / 2.0 + cos_half_angles = 1.0 - 0.5 * half_angles * half_angles + sin_half_angles = half_angles + quaternion = _build_quaternion_from_sines_and_cosines( + sin_half_angles, cos_half_angles) + + # We need to normalize the quaternion due to the small angle approximation. + return tf.nn.l2_normalize(quaternion, axis=-1) + + +def _build_quaternion_from_sines_and_cosines(sin_half_angles, cos_half_angles): + """Builds a quaternion from sines and cosines of half Euler angles. + + Args: + sin_half_angles: A tensor of shape `[..., 3]`, where the last + dimension represents the sine of half Euler angles. + cos_half_angles: A tensor of shape `[..., 3]`, where the last + dimension represents the cosine of half Euler angles. + + Returns: + A `tf.Tensor` of shape `[..., 4]`, where the last dimension represents + a quaternion. + """ + c1, c2, c3 = tf.unstack(cos_half_angles, axis=-1) + s1, s2, s3 = tf.unstack(sin_half_angles, axis=-1) + w = c1 * c2 * c3 + s1 * s2 * s3 + x = -c1 * s2 * s3 + s1 * c2 * c3 + y = c1 * s2 * c3 + s1 * c2 * s3 + z = -s1 * s2 * c3 + c1 * c2 * s3 + return tf.stack((x, y, z, w), axis=-1) + + +def multiply(quaternion1, quaternion2): + """Multiplies two quaternions. + + Args: + quaternion1: A `tf.Tensor` of shape `[..., 4]`, where the last dimension + represents a quaternion. + quaternion2: A `tf.Tensor` of shape `[..., 4]`, where the last dimension + represents a quaternion. + + Returns: + A `tf.Tensor` of shape `[..., 4]` representing quaternions. + + Raises: + ValueError: If the shape of `quaternion1` or `quaternion2` is invalid. + """ + quaternion1 = tf.convert_to_tensor(value=quaternion1) + quaternion2 = tf.convert_to_tensor(value=quaternion2) + + if quaternion1.shape[-1] != 4: + raise ValueError(f"quaternion1 must have shape `[..., 4]`, " + f"but got: {quaternion1.shape}") + if quaternion2.shape[-1] != 4: + raise ValueError(f"quaternion2 must have shape `[..., 4]`, " + f"but got: {quaternion2.shape}") + + x1, y1, z1, w1 = tf.unstack(quaternion1, axis=-1) + x2, y2, z2, w2 = tf.unstack(quaternion2, axis=-1) + x = x1 * w2 + y1 * z2 - z1 * y2 + w1 * x2 + y = -x1 * z2 + y1 * w2 + z1 * x2 + w1 * y2 + z = x1 * y2 - y1 * x2 + z1 * w2 + w1 * z2 + w = -x1 * x2 - y1 * y2 - z1 * z2 + w1 * w2 + return tf.stack((x, y, z, w), axis=-1) diff --git a/tensorflow_mri/python/geometry/rotation/rotation_matrix.py b/tensorflow_mri/python/geometry/rotation/rotation_matrix.py new file mode 100644 index 00000000..ebc34f2f --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation/rotation_matrix.py @@ -0,0 +1,144 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Rotation matrices.""" + +import tensorflow as tf + + +def rotate(n, point, matrix): + """Rotates an N-D point using rotation matrix. + + Args: + n: An `int`. The dimension of the point and matrix. + point: A `tf.Tensor` of shape `[..., N]`. + matrix: A `tf.Tensor` of shape `[..., N, N]`. + + Returns: + A `tf.Tensor` of shape `[..., N]`. + + Raises: + ValueError: If the shape of the point or matrix is invalid. + """ + point = tf.convert_to_tensor(point) + matrix = tf.convert_to_tensor(matrix) + + if point.shape[-1] != n: + raise ValueError( + f"point must have shape [..., {n}], but got: {point.shape}") + if matrix.shape[-1] != n or matrix.shape[-2] != n: + raise ValueError( + f"matrix must have shape [..., {n}, {n}], but got: {matrix.shape}") + try: + static_batch_shape = tf.broadcast_static_shape( + point.shape[:-1], matrix.shape[:-2]) + except ValueError as err: + raise ValueError( + f"The batch shapes of point and this rotation matrix do not " + f"broadcast: {point.shape[:-1]} vs. {matrix.shape[:-2]}") from err + + common_batch_shape = tf.broadcast_dynamic_shape( + tf.shape(point)[:-1], tf.shape(matrix)[:-2]) + point = tf.broadcast_to(point, tf.concat( + [common_batch_shape, [n]], 0)) + matrix = tf.broadcast_to(matrix, tf.concat( + [common_batch_shape, [n, n]], 0)) + + rotated_point = tf.linalg.matvec(matrix, point) + output_shape = static_batch_shape.concatenate([n]) + return tf.ensure_shape(rotated_point, output_shape) + + +def inverse(n, matrix): + """Inverts an N-D rotation matrix. + + Args: + n: An `int`. The dimension of the matrix. + matrix: A `tf.Tensor` of shape `[..., N, N]`. + + Returns: + A `tf.Tensor` of shape `[..., N, N]`. + + Raises: + ValueError: If the shape of the matrix is invalid. + """ + matrix = tf.convert_to_tensor(matrix) + + if matrix.shape[-1] != n or matrix.shape[-2] != n: + raise ValueError( + f"matrix must have shape [..., {n}, {n}], but got: {matrix.shape}") + + return tf.linalg.matrix_transpose(matrix) + + +def is_valid(n, matrix, atol=1e-3): + """Checks if an N-D rotation matrix is valid. + + Args: + n: An `int`. The dimension of the matrix. + matrix: A `tf.Tensor` of shape `[..., N, N]`. + atol: A `float`. The absolute tolerance for checking if the matrix is valid. + + Returns: + A boolean `tf.Tensor` of shape `[..., 1]`. + + Raises: + ValueError: If the shape of the matrix is invalid. + """ + matrix = tf.convert_to_tensor(matrix) + + if matrix.shape[-1] != n or matrix.shape[-2] != n: + raise ValueError( + f"matrix must have shape [..., {n}, {n}], but got: {matrix.shape}") + + # Compute how far the determinant of the matrix is from 1. + distance_determinant = tf.abs(tf.linalg.det(matrix) - 1.) + + # Computes how far the product of the transposed rotation matrix with itself + # is from the identity matrix. + identity = tf.eye(n, dtype=matrix.dtype) + inverse_matrix = tf.linalg.matrix_transpose(matrix) + distance_identity = tf.matmul(inverse_matrix, matrix) - identity + distance_identity = tf.norm(distance_identity, axis=[-2, -1]) + + # Computes the mask of entries that satisfies all conditions. + mask = tf.math.logical_and(distance_determinant < atol, + distance_identity < atol) + return tf.expand_dims(mask, axis=-1) + + +def check_shape(n, matrix): + matrix = tf.convert_to_tensor(matrix) + if matrix.shape.rank is not None and matrix.shape.rank < 2: + raise ValueError( + f"matrix must have rank >= 2, but got: {matrix.shape}") + if matrix.shape[-2] != n or matrix.shape[-1] != n: + raise ValueError( + f"matrix must have shape [..., {n}, {n}], " + f"but got: {matrix.shape}") diff --git a/tensorflow_mri/python/geometry/rotation/rotation_matrix_2d.py b/tensorflow_mri/python/geometry/rotation/rotation_matrix_2d.py new file mode 100644 index 00000000..72b86655 --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation/rotation_matrix_2d.py @@ -0,0 +1,139 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""2D rotation matrices.""" + +import tensorflow as tf + +from tensorflow_mri.python.geometry.rotation import rotation_matrix + + +def from_euler(angle): + """Converts an angle to a 2D rotation matrix. + + Args: + angle: A `tf.Tensor` of shape `[..., 1]`. + + Returns: + A `tf.Tensor` of shape `[..., 2, 2]`. + + Raises: + ValueError: If the shape of `angle` is invalid. + """ + angle = tf.convert_to_tensor(angle) + + if angle.shape[-1] != 1: + raise ValueError( + f"angle must have shape `[..., 1]`, but got: {angle.shape}") + + cos_angle = tf.math.cos(angle) + sin_angle = tf.math.sin(angle) + matrix = tf.stack([cos_angle, -sin_angle, sin_angle, cos_angle], axis=-1) # pylint: disable=invalid-unary-operand-type + output_shape = tf.concat([tf.shape(angle)[:-1], [2, 2]], axis=-1) # pylint: disable=unexpected-keyword-arg,no-value-for-parameter + return tf.reshape(matrix, output_shape) + + +def from_small_euler(angle): + """Converts a small angle to a 2D rotation matrix. + + Args: + angle: A `tf.Tensor` of shape `[..., 1]`. + + Returns: + A `tf.Tensor` of shape `[..., 2, 2]`. + + Raises: + ValueError: If the shape of `angle` is invalid. + """ + angle = tf.convert_to_tensor(angle) + + if angle.shape[-1] != 1: + raise ValueError( + f"angle must have shape `[..., 1]`, but got: {angle.shape}") + + cos_angle = 1.0 - 0.5 * angle * angle + sin_angle = angle + matrix = tf.stack([cos_angle, -sin_angle, sin_angle, cos_angle], axis=-1) + output_shape = tf.concat([tf.shape(angle)[:-1], [2, 2]], axis=-1) # pylint: disable=unexpected-keyword-arg,no-value-for-parameter + return tf.reshape(matrix, output_shape) + + +def inverse(matrix): + """Inverts a 2D rotation matrix. + + Args: + matrix: A `tf.Tensor` of shape `[..., 2, 2]`. + + Returns: + A `tf.Tensor` of shape `[..., 2, 2]`. + + Raises: + ValueError: If the shape of `matrix` is invalid. + """ + return rotation_matrix.inverse(2, matrix) + + +def is_valid(matrix, atol=1e-3): + """Checks if a 2D rotation matrix is valid. + + Args: + matrix: A `tf.Tensor` of shape `[..., 2, 2]`. + + Returns: + A `tf.Tensor` of shape `[..., 1]` indicating whether the matrix is valid. + """ + return rotation_matrix.is_valid(2, matrix, atol=atol) + + +def rotate(point, matrix): + """Rotates a 2D point using rotation matrix. + + Args: + point: A `tf.Tensor` of shape `[..., 2]`. + matrix: A `tf.Tensor` of shape `[..., 2, 2]`. + + Returns: + A `tf.Tensor` of shape `[..., 2]`. + + Raises: + ValueError: If the shape of `point` or `matrix` is invalid. + """ + return rotation_matrix.rotate(2, point, matrix) + + +def check_shape(matrix): + """Checks the shape of `point` and `matrix`. + + Args: + matrix: A `tf.Tensor` of shape `[..., 2, 2]`. + + Raises: + ValueError: If the shape of `matrix` is invalid. + """ + rotation_matrix.check_shape(2, matrix) diff --git a/tensorflow_mri/python/geometry/rotation/rotation_matrix_3d.py b/tensorflow_mri/python/geometry/rotation/rotation_matrix_3d.py new file mode 100644 index 00000000..a9adee2a --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation/rotation_matrix_3d.py @@ -0,0 +1,261 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""3D rotation matrices.""" + +import tensorflow as tf + +from tensorflow_mri.python.geometry.rotation import rotation_matrix + + +def from_euler(angles): + """Converts Euler angles to a 3D rotation matrix. + + Args: + angles: A `tf.Tensor` of shape `[..., 3]`. + + Returns: + A `tf.Tensor` of shape `[..., 3, 3]`. + + Raises: + ValueError: If the shape of `angles` is invalid. + """ + angles = tf.convert_to_tensor(angles) + + if angles.shape[-1] != 3: + raise ValueError( + f"angles must have shape `[..., 3]`, but got: {angles.shape}") + + sin_angles = tf.math.sin(angles) + cos_angles = tf.math.cos(angles) + return _build_matrix_from_sines_and_cosines(sin_angles, cos_angles) + + +def from_small_euler(angles): + """Converts small Euler angles to a 3D rotation matrix. + + Args: + angles: A `tf.Tensor` of shape `[..., 3]`. + + Returns: + A `tf.Tensor` of shape `[..., 3, 3]`. + + Raises: + ValueError: If the shape of `angles` is invalid. + """ + angles = tf.convert_to_tensor(angles) + + if angles.shape[-1:] != 3: + raise ValueError( + f"angles must have shape `[..., 3]`, but got: {angles.shape}") + + sin_angles = angles + cos_angles = 1.0 - 0.5 * tf.math.square(angles) + return _build_matrix_from_sines_and_cosines(sin_angles, cos_angles) + + +def from_axis_angle(axis, angle): + """Converts an axis-angle to a 3D rotation matrix. + + Args: + axis: A `tf.Tensor` of shape `[..., 3]`. + angle: A `tf.Tensor` of shape `[..., 1]`. + + Returns: + A `tf.Tensor` of shape `[..., 3, 3]`. + + Raises: + ValueError: If the shape of `axis` or `angle` is invalid. + """ + axis = tf.convert_to_tensor(axis) + angle = tf.convert_to_tensor(angle) + + if axis.shape[-1] != 3: + raise ValueError( + f"axis must have shape `[..., 3]`, but got: {axis.shape}") + if angle.shape[-1:] != 1: + raise ValueError( + f"angle must have shape `[..., 1]`, but got: {angle.shape}") + + try: + _ = tf.broadcast_static_shape(axis.shape[:-1], angle.shape[:-1]) + except ValueError as err: + raise ValueError( + f"The batch shapes of axis and angle do not " + f"broadcast: {axis.shape[:-1]} vs. {angle.shape[:-1]}") from err + + sin_axis = tf.sin(angle) * axis + cos_angle = tf.cos(angle) + cos1_axis = (1.0 - cos_angle) * axis + _, axis_y, axis_z = tf.unstack(axis, axis=-1) + cos1_axis_x, cos1_axis_y, _ = tf.unstack(cos1_axis, axis=-1) + sin_axis_x, sin_axis_y, sin_axis_z = tf.unstack(sin_axis, axis=-1) + tmp = cos1_axis_x * axis_y + m01 = tmp - sin_axis_z + m10 = tmp + sin_axis_z + tmp = cos1_axis_x * axis_z + m02 = tmp + sin_axis_y + m20 = tmp - sin_axis_y + tmp = cos1_axis_y * axis_z + m12 = tmp - sin_axis_x + m21 = tmp + sin_axis_x + diag = cos1_axis * axis + cos_angle + diag_x, diag_y, diag_z = tf.unstack(diag, axis=-1) + matrix = tf.stack([diag_x, m01, m02, + m10, diag_y, m12, + m20, m21, diag_z], axis=-1) + output_shape = tf.concat([tf.shape(axis)[:-1], [3, 3]], axis=-1) # pylint: disable=unexpected-keyword-arg,no-value-for-parameter + return tf.reshape(matrix, output_shape) + + +def from_quaternion(quaternion): + """Converts a quaternion to a 3D rotation matrix. + + Args: + quaternion: A `tf.Tensor` of shape `[..., 4]`. + + Returns: + A `tf.Tensor` of shape `[..., 3, 3]`. + + Raises: + ValueError: If the shape of `quaternion` is invalid. + """ + quaternion = tf.convert_to_tensor(quaternion) + + if quaternion.shape[-1] != 4: + raise ValueError(f"quaternion must have shape `[..., 4]`, " + f"but got: {quaternion.shape}") + + x, y, z, w = tf.unstack(quaternion, axis=-1) + tx = 2.0 * x + ty = 2.0 * y + tz = 2.0 * z + twx = tx * w + twy = ty * w + twz = tz * w + txx = tx * x + txy = ty * x + txz = tz * x + tyy = ty * y + tyz = tz * y + tzz = tz * z + matrix = tf.stack([1.0 - (tyy + tzz), txy - twz, txz + twy, + txy + twz, 1.0 - (txx + tzz), tyz - twx, + txz - twy, tyz + twx, 1.0 - (txx + tyy)], axis=-1) + output_shape = tf.concat([tf.shape(quaternion)[:-1], [3, 3]], axis=-1) # pylint: disable=unexpected-keyword-arg,no-value-for-parameter + return tf.reshape(matrix, output_shape) + + +def _build_matrix_from_sines_and_cosines(sin_angles, cos_angles): + """Builds a 3D rotation matrix from sines and cosines of Euler angles. + + Args: + sin_angles: A tensor of shape `[..., 3]`, where the last dimension + represents the sine of the Euler angles. + cos_angles: A tensor of shape `[..., 3]`, where the last dimension + represents the cosine of the Euler angles. + + Returns: + A `tf.Tensor` of shape `[..., 3, 3]`, where the last two dimensions + represent a 3D rotation matrix. + """ + sin_angles.shape.assert_is_compatible_with(cos_angles.shape) + + sx, sy, sz = tf.unstack(sin_angles, axis=-1) + cx, cy, cz = tf.unstack(cos_angles, axis=-1) + m00 = cy * cz + m01 = (sx * sy * cz) - (cx * sz) + m02 = (cx * sy * cz) + (sx * sz) + m10 = cy * sz + m11 = (sx * sy * sz) + (cx * cz) + m12 = (cx * sy * sz) - (sx * cz) + m20 = -sy + m21 = sx * cy + m22 = cx * cy + matrix = tf.stack([m00, m01, m02, + m10, m11, m12, + m20, m21, m22], + axis=-1) + output_shape = tf.concat([tf.shape(sin_angles)[:-1], [3, 3]], axis=-1) # pylint: disable=unexpected-keyword-arg,no-value-for-parameter + return tf.reshape(matrix, output_shape) + + +def inverse(matrix): + """Inverts a 3D rotation matrix. + + Args: + matrix: A `tf.Tensor` of shape `[..., 3, 3]`. + + Returns: + A `tf.Tensor` of shape `[..., 3, 3]`. + + Raises: + ValueError: If the shape of `matrix` is invalid. + """ + return rotation_matrix.inverse(3, matrix) + + +def is_valid(matrix, atol=1e-3): + """Checks if a 3D rotation matrix is valid. + + Args: + matrix: A `tf.Tensor` of shape `[..., 3, 3]`. + + Returns: + A `tf.Tensor` of shape `[..., 1]` indicating whether the matrix is valid. + """ + return rotation_matrix.is_valid(3, matrix, atol=atol) + + +def rotate(point, matrix): + """Rotates a 3D point using rotation matrix. + + Args: + point: A `tf.Tensor` of shape `[..., 3]`. + matrix: A `tf.Tensor` of shape `[..., 3, 3]`. + + Returns: + A `tf.Tensor` of shape `[..., 3]`. + + Raises: + ValueError: If the shape of `point` or `matrix` is invalid. + """ + return rotation_matrix.rotate(3, point, matrix) + + +def check_shape(matrix): + """Checks the shape of `point` and `matrix`. + + Args: + matrix: A `tf.Tensor` of shape `[..., 3, 3]`. + + Raises: + ValueError: If the shape of `matrix` is invalid. + """ + rotation_matrix.check_shape(3, matrix) diff --git a/tensorflow_mri/python/geometry/rotation/test_data.py b/tensorflow_mri/python/geometry/rotation/test_data.py new file mode 100644 index 00000000..3e288c7f --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation/test_data.py @@ -0,0 +1,136 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Module with test data for transformation tests.""" +# This file is copied from TensorFlow Graphics. + +import numpy as np + +ANGLE_0 = np.array((0.,)) +ANGLE_45 = np.array((np.pi / 4.,)) +ANGLE_90 = np.array((np.pi / 2.,)) +ANGLE_180 = np.array((np.pi,)) + +AXIS_2D_0 = np.array((0., 0.)) +AXIS_2D_X = np.array((1., 0.)) +AXIS_2D_Y = np.array((0., 1.)) + + +def _rotation_2d_x(angle): + """Creates a 2d rotation matrix. + + Args: + angle: The angle. + + Returns: + The 2d rotation matrix. + """ + angle = angle.item() + return np.array(((np.cos(angle), -np.sin(angle)), + (np.sin(angle), np.cos(angle)))) # pyformat: disable + + +MAT_2D_ID = np.eye(2) +MAT_2D_45 = _rotation_2d_x(ANGLE_45) +MAT_2D_90 = _rotation_2d_x(ANGLE_90) +MAT_2D_180 = _rotation_2d_x(ANGLE_180) + +AXIS_3D_0 = np.array((0., 0., 0.)) +AXIS_3D_X = np.array((1., 0., 0.)) +AXIS_3D_Y = np.array((0., 1., 0.)) +AXIS_3D_Z = np.array((0., 0., 1.)) + + +def _axis_angle_to_quaternion(axis, angle): + """Converts an axis-angle representation to a quaternion. + + Args: + axis: The axis of rotation. + angle: The angle. + + Returns: + The quaternion. + """ + quat = np.zeros(4) + quat[0:3] = axis * np.sin(0.5 * angle) + quat[3] = np.cos(0.5 * angle) + return quat + + +QUAT_ID = _axis_angle_to_quaternion(AXIS_3D_0, ANGLE_0) +QUAT_X_45 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_45) +QUAT_X_90 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_90) +QUAT_X_180 = _axis_angle_to_quaternion(AXIS_3D_X, ANGLE_180) +QUAT_Y_45 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_45) +QUAT_Y_90 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_90) +QUAT_Y_180 = _axis_angle_to_quaternion(AXIS_3D_Y, ANGLE_180) +QUAT_Z_45 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_45) +QUAT_Z_90 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_90) +QUAT_Z_180 = _axis_angle_to_quaternion(AXIS_3D_Z, ANGLE_180) + + +def _rotation_3d_x(angle): + """Creates a 3d rotation matrix around the x axis. + + Args: + angle: The angle. + + Returns: + The 3d rotation matrix. + """ + angle = angle.item() + return np.array(((1., 0., 0.), + (0., np.cos(angle), -np.sin(angle)), + (0., np.sin(angle), np.cos(angle)))) # pyformat: disable + + +def _rotation_3d_y(angle): + """Creates a 3d rotation matrix around the y axis. + + Args: + angle: The angle. + + Returns: + The 3d rotation matrix. + """ + angle = angle.item() + return np.array(((np.cos(angle), 0., np.sin(angle)), + (0., 1., 0.), + (-np.sin(angle), 0., np.cos(angle)))) # pyformat: disable + + +def _rotation_3d_z(angle): + """Creates a 3d rotation matrix around the z axis. + + Args: + angle: The angle. + + Returns: + The 3d rotation matrix. + """ + angle = angle.item() + return np.array(((np.cos(angle), -np.sin(angle), 0.), + (np.sin(angle), np.cos(angle), 0.), + (0., 0., 1.))) # pyformat: disable + + +MAT_3D_ID = np.eye(3) +MAT_3D_X_45 = _rotation_3d_x(ANGLE_45) +MAT_3D_X_90 = _rotation_3d_x(ANGLE_90) +MAT_3D_X_180 = _rotation_3d_x(ANGLE_180) +MAT_3D_Y_45 = _rotation_3d_y(ANGLE_45) +MAT_3D_Y_90 = _rotation_3d_y(ANGLE_90) +MAT_3D_Y_180 = _rotation_3d_y(ANGLE_180) +MAT_3D_Z_45 = _rotation_3d_z(ANGLE_45) +MAT_3D_Z_90 = _rotation_3d_z(ANGLE_90) +MAT_3D_Z_180 = _rotation_3d_z(ANGLE_180) diff --git a/tensorflow_mri/python/geometry/rotation/test_helpers.py b/tensorflow_mri/python/geometry/rotation/test_helpers.py new file mode 100644 index 00000000..36ca83fa --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation/test_helpers.py @@ -0,0 +1,263 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test helpers for the transformation module.""" +# This file is copied from TensorFlow Graphics. + +import itertools +import math + +import numpy as np +from scipy import stats +from six.moves import range +import tensorflow as tf + +from tensorflow_mri.python.geometry.rotation import rotation_matrix_2d +from tensorflow_mri.python.geometry.rotation import rotation_matrix_3d +from tensorflow_mri.python.geometry.rotation import quaternion + + +def generate_preset_test_euler_angles(dimensions=3): + """Generates a permutation with duplicate of some classic euler angles.""" + permutations = itertools.product( + [0., np.pi, np.pi / 2., np.pi / 3., np.pi / 4., np.pi / 6.], + repeat=dimensions) + return np.array(list(permutations)) + + +def generate_preset_test_translations(dimensions=3): + """Generates a set of translations.""" + permutations = itertools.product([0.1, -0.2, 0.5, 0.7, 0.4, -0.1], + repeat=dimensions) + return np.array(list(permutations)) + + +def generate_preset_test_rotation_matrices_3d(): + """Generates pre-set test 3d rotation matrices.""" + angles = generate_preset_test_euler_angles() + preset_rotation_matrix = rotation_matrix_3d.from_euler(angles) + return preset_rotation_matrix + + +def generate_preset_test_rotation_matrices_2d(): + """Generates pre-set test 2d rotation matrices.""" + angles = generate_preset_test_euler_angles(dimensions=1) + preset_rotation_matrix = rotation_matrix_2d.from_euler(angles) + return preset_rotation_matrix + + +def generate_preset_test_quaternions(): + """Generates pre-set test quaternions.""" + angles = generate_preset_test_euler_angles() + preset_quaternion = quaternion.from_euler(angles) + return preset_quaternion + + +def generate_preset_test_dual_quaternions(): + """Generates pre-set test quaternions.""" + angles = generate_preset_test_euler_angles() + preset_quaternion_real = quaternion.from_euler(angles) + + translations = generate_preset_test_translations() + translations = np.concatenate( + (translations / 2.0, np.zeros((np.ma.size(translations, 0), 1))), axis=1) + preset_quaternion_translation = tf.convert_to_tensor(value=translations) + + preset_quaternion_dual = quaternion.multiply(preset_quaternion_translation, + preset_quaternion_real) + + preset_dual_quaternion = tf.concat( # pylint: disable=unexpected-keyword-arg,no-value-for-parameter + (preset_quaternion_real, preset_quaternion_dual), axis=-1) + + return preset_dual_quaternion + + +def generate_random_test_euler_angles_translations( + dimensions=3, + min_angle=-3.0 * np.pi, + max_angle=3.0 * np.pi, + min_translation=3.0, + max_translation=3.0): + """Generates random test random Euler angles and translations.""" + tensor_dimensions = np.random.randint(3) + tensor_tile = np.random.randint(1, 10, tensor_dimensions).tolist() + return (np.random.uniform(min_angle, max_angle, tensor_tile + [dimensions]), + np.random.uniform(min_translation, max_translation, + tensor_tile + [dimensions])) + + +def generate_random_test_dual_quaternions(): + """Generates random test dual quaternions.""" + angles = generate_random_test_euler_angles() + random_quaternion_real = quaternion.from_euler(angles) + + min_translation = -3.0 + max_translation = 3.0 + translations = np.random.uniform(min_translation, max_translation, + angles.shape) + + translations_quaternion_shape = np.asarray(translations.shape) + translations_quaternion_shape[-1] = 1 + translations = np.concatenate( + (translations / 2.0, np.zeros(translations_quaternion_shape)), axis=-1) + + random_quaternion_translation = tf.convert_to_tensor(value=translations) + + random_quaternion_dual = quaternion.multiply(random_quaternion_translation, + random_quaternion_real) + + random_dual_quaternion = tf.concat( # pylint: disable=unexpected-keyword-arg,no-value-for-parameter + (random_quaternion_real, random_quaternion_dual), axis=-1) + + return random_dual_quaternion + + +def generate_random_test_euler_angles(dimensions=3, + min_angle=-3. * np.pi, + max_angle=3. * np.pi): + """Generates random test random Euler angles.""" + tensor_dimensions = np.random.randint(3) + tensor_tile = np.random.randint(1, 10, tensor_dimensions).tolist() + return np.random.uniform(min_angle, max_angle, tensor_tile + [dimensions]) + + +def generate_random_test_quaternions(tensor_shape=None): # pylint: disable=missing-param-doc + """Generates random test quaternions.""" + if tensor_shape is None: + tensor_dimensions = np.random.randint(low=1, high=3) + tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() + u1 = np.random.uniform(0.0, 1.0, tensor_shape) + u2 = np.random.uniform(0.0, 2.0 * math.pi, tensor_shape) + u3 = np.random.uniform(0.0, 2.0 * math.pi, tensor_shape) + a = np.sqrt(1.0 - u1) + b = np.sqrt(u1) + return np.stack((a * np.sin(u2), + a * np.cos(u2), + b * np.sin(u3), + b * np.cos(u3)), + axis=-1) # pyformat: disable + + +def generate_random_test_axis_angle(): + """Generates random test axis-angles.""" + tensor_dimensions = np.random.randint(3) + tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() + random_axis = np.random.uniform(size=tensor_shape + [3]) + random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True) + random_angle = np.random.uniform(size=tensor_shape + [1]) + return random_axis, random_angle + + +def generate_random_test_rotation_matrix_3d(): + """Generates random test 3d rotation matrices.""" + random_matrix = np.array( + [stats.special_ortho_group.rvs(3) for _ in range(20)]) + return np.reshape(random_matrix, [5, 4, 3, 3]) + + +def generate_random_test_rotation_matrix_2d(): + """Generates random test 2d rotation matrices.""" + random_matrix = np.array( + [stats.special_ortho_group.rvs(2) for _ in range(20)]) + return np.reshape(random_matrix, [5, 4, 2, 2]) + + +def generate_random_test_lbs_blend(): + """Generates random test for the linear blend skinning blend function.""" + tensor_dimensions = np.random.randint(3) + tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() + random_points = np.random.uniform(size=tensor_shape + [3]) + num_weights = np.random.randint(2, 10) + random_weights = np.random.uniform(size=tensor_shape + [num_weights]) + random_weights /= np.sum(random_weights, axis=-1, keepdims=True) + + random_rotations = np.array( + [stats.special_ortho_group.rvs(3) for _ in range(num_weights)]) + random_rotations = np.reshape(random_rotations, [num_weights, 3, 3]) + random_translations = np.random.uniform(size=[num_weights, 3]) + return random_points, random_weights, random_rotations, random_translations + + +def generate_preset_test_lbs_blend(): + """Generates preset test for the linear blend skinning blend function.""" + points = np.array([[[1.0, 0.0, 0.0], [0.1, 0.2, 0.5]], + [[0.0, 1.0, 0.0], [0.3, -0.5, 0.2]], + [[-0.3, 0.1, 0.3], [0.1, -0.9, -0.4]]]) + weights = np.array([[[0.0, 1.0, 0.0, 0.0], [0.4, 0.2, 0.3, 0.1]], + [[0.6, 0.0, 0.4, 0.0], [0.2, 0.2, 0.1, 0.5]], + [[0.0, 0.1, 0.0, 0.9], [0.1, 0.2, 0.3, 0.4]]]) + rotations = np.array( + [[[[1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + [0.0, 0.0, 1.0]], + [[0.36, 0.48, -0.8], + [-0.8, 0.60, 0.00], + [0.48, 0.64, 0.60]], + [[0.0, 0.0, 1.0], + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0]], + [[0.0, 1.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, -1.0]]], + [[[-0.41554751, -0.42205085, -0.80572535], + [0.08028719, -0.89939186, 0.42970716], + [-0.9060211, 0.11387432, 0.40762533]], + [[-0.05240625, -0.24389111, 0.96838562], + [0.99123384, -0.13047444, 0.02078231], + [0.12128095, 0.96098572, 0.2485908]], + [[-0.32722936, -0.06793413, -0.94249981], + [-0.70574479, 0.68082693, 0.19595657], + [0.62836712, 0.72928708, -0.27073072]], + [[-0.22601332, -0.95393284, 0.19730719], + [-0.01189659, 0.20523618, 0.97864017], + [-0.97405157, 0.21883843, -0.05773466]]]]) # pyformat: disable + translations = np.array( + [[[0.1, -0.2, 0.5], + [-0.2, 0.7, 0.7], + [0.8, -0.2, 0.4], + [-0.1, 0.2, -0.3]], + [[0.5, 0.6, 0.9], + [-0.1, -0.3, -0.7], + [0.4, -0.2, 0.8], + [0.7, 0.8, -0.4]]]) # pyformat: disable + blended_points = np.array([[[[0.16, -0.1, 1.18], [0.3864, 0.148, 0.7352]], + [[0.38, 0.4, 0.86], [-0.2184, 0.152, 0.0088]], + [[-0.05, 0.01, -0.46], [-0.3152, -0.004, + -0.1136]]], + [[[-0.15240625, 0.69123384, -0.57871905], + [0.07776242, 0.33587402, 0.55386645]], + [[0.17959584, 0.01269566, 1.22003942], + [0.71406514, 0.6187734, -0.43794053]], + [[0.67662743, 0.94549789, -0.14946982], + [0.88587099, -0.09324637, -0.45012815]]]]) + + return points, weights, rotations, translations, blended_points + + +def generate_random_test_axis_angle_translation(): + """Generates random test angles, axes, translations.""" + tensor_dimensions = np.random.randint(3) + tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() + random_axis = np.random.uniform(size=tensor_shape + [3]) + random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True) + random_angle = np.random.uniform(size=tensor_shape + [1]) + random_translation = np.random.uniform(size=tensor_shape + [3]) + return random_axis, random_angle, random_translation + + +def generate_random_test_points(): + """Generates random 3D points.""" + tensor_dimensions = np.random.randint(3) + tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist() + random_point = np.random.uniform(size=tensor_shape + [3]) + return random_point diff --git a/tensorflow_mri/python/geometry/rotation_2d.py b/tensorflow_mri/python/geometry/rotation_2d.py new file mode 100644 index 00000000..e6a96d71 --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation_2d.py @@ -0,0 +1,420 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""2D rotation.""" + +import tensorflow as tf + +from tensorflow_mri.python.geometry.rotation import euler_2d +from tensorflow_mri.python.geometry.rotation import rotation_matrix_2d +from tensorflow_mri.python.util import api_util + + +@api_util.export("geometry.Rotation2D") +class Rotation2D(tf.experimental.BatchableExtensionType): # pylint: disable=abstract-method + """Represents a rotation in 2D space (or a batch thereof). + + A `Rotation2D` contains all the information needed to represent a rotation + in 2D space (or a multidimensional array of rotations) and provides + convenient methods to work with rotations. + + ## Initialization + + You can initialize a `Rotation2D` object using one of the `from_*` class + methods: + + - `from_matrix`, to initialize using a + [rotation matrix](https://en.wikipedia.org/wiki/Rotation_matrix). + - `from_euler`, to initialize using an angle (in radians). + - `from_small_euler`, to initialize using an angle which is small enough + to fall under the [small angle approximation](https://en.wikipedia.org/wiki/Small-angle_approximation). + + All of the above methods accept batched inputs, in which case the returned + `Rotation2D` object will represent a batch of rotations. + + ## Methods + + Once initialized, `Rotation2D` objects expose several methods to operate + easily with rotations. These methods are all used in the same way regardless + of how the `Rotation2D` was originally initialized. + + - `rotate` rotates a point or a batch of points. The batch shapes of the + point and this rotation will be broadcasted. + - `inverse` returns a new `Rotation2D` object representing the inverse of + the current rotation. + - `is_valid` can be used to check if the rotation is valid. + + ## Conversion to other representations + + The `as_*` methods can be used to obtain an explicit representation + of this rotation as a standard `tf.Tensor`. + + - `as_matrix` returns the corresponding rotation matrix. + - `as_euler` returns the corresponding angle (in radians). + + ## Shape and dtype + + `Rotation2D` objects have a shape and a dtype, accessible via the `shape` and + `dtype` properties. Because this operator acts like a rotation matrix, its + shape corresponds to the shape of the rotation matrix. In other words, + `rot.shape` is equal to `rot.as_matrix().shape`. + + ```{note} + As with `tf.Tensor`s, the `shape` attribute contains the static shape + as a `tf.TensorShape` and may not be fully defined outside eager execution. + To obtain the dynamic shape of a `Rotation2D` object, use `tf.shape`. + ``` + + ## Operators + + `Rotation2D` objects also override a few operators for concise and intuitive + use. + + - `==` (equality operator) can be used to check if two `Rotation2D` objects + are equal. This checks if the rotations are equivalent, regardless of how + they were defined (`rot1 == rot2`). + - `@` (matrix multiplication operator) can be used to compose two rotations + (`rot = rot1 @ rot2`). + + ## Compatibility with TensorFlow APIs + + Some TensorFlow APIs are explicitly overriden to operate with `Rotation2D` + objects. These include: + + ```{list-table} + --- + header-rows: 1 + --- + + * - API + - Description + - Notes + * - `tf.convert_to_tensor` + - Converts a `Rotation2D` to a `tf.Tensor` containing the corresponding + rotation matrix. + - `tf.convert_to_tensor(rot)` is equivalent to `rot.as_matrix()`. + * - `tf.linalg.matmul` + - Composes two `Rotation2D` objects. + - `tf.linalg.matmul(rot1, rot2)` is equivalent to `rot1 @ rot2`. + * - `tf.linalg.matvec` + - Rotates a point or a batch of points. + - `tf.linalg.matvec(rot, point)` is equivalent to `rot.rotate(point)`. + * - `tf.shape` + - Returns the dynamic shape of a `Rotation2D` object. + - + ``` + + ```{tip} + In general, a `Rotation2D` object behaves like a rotation matrix, although + its internal representation may differ. + ``` + + ```{warning} + While other TensorFlow APIs may also work as expected when passed a + `Rotation2D`, this is not supported and their behavior may change in the + future. + ``` + + Example: + + >>> # Initialize a rotation object using a rotation matrix. + >>> rot = tfmri.geometry.Rotation2D.from_matrix([[0.0, -1.0], [1.0, 0.0]]) + >>> print(rot) + tfmri.geometry.Rotation2D(shape=(2, 2), dtype=float32) + >>> # Rotate a point. + >>> point = tf.constant([1.0, 0.0], dtype=tf.float32) + >>> rotated = rot.rotate(point) + >>> print(rotated) + tf.Tensor([0. 1.], shape=(2,), dtype=float32) + >>> # Rotate the point back using the inverse rotation. + >>> inv_rot = rot.inverse() + >>> restored = inv_rot.rotate(rotated) + >>> print(restored) + tf.Tensor([1. 0.], shape=(2,), dtype=float32) + >>> # Get the rotation matrix for the inverse rotation. + >>> print(inv_rot.as_matrix()) + tf.Tensor( + [[ 0. 1.] + [-1. 0.]], shape=(2, 2), dtype=float32) + >>> # You can also initialize a rotation using an angle: + >>> rot2 = tfmri.geometry.Rotation2D.from_euler([np.pi / 2]) + >>> rotated2 = rot.rotate(point) + >>> np.allclose(rotated2, rotated) + True + + """ + __name__ = "tfmri.geometry.Rotation2D" + _matrix: tf.Tensor + + @classmethod + def from_matrix(cls, matrix, name=None): + r"""Creates a 2D rotation from a rotation matrix. + + Args: + matrix: A `tf.Tensor` of shape `[..., 2, 2]`, where the last two + dimensions represent a rotation matrix. + name: A name for this op. Defaults to `"rotation_2d/from_matrix"`. + + Returns: + A `Rotation2D`. + """ + with tf.name_scope(name or "rotation_2d/from_matrix"): + return cls(_matrix=matrix) + + @classmethod + def from_euler(cls, angle, name=None): + r"""Creates a 2D rotation from an angle. + + The resulting rotation acts like the following rotation matrix: + + $$ + \mathbf{R} = + \begin{bmatrix} + \cos(\theta) & -\sin(\theta) \\ + \sin(\theta) & \cos(\theta) + \end{bmatrix}. + $$ + + ```{note} + The resulting rotation rotates points in the $xy$-plane counterclockwise. + ``` + + Args: + angle: A `tf.Tensor` of shape `[..., 1]`, where the last dimension + represents an angle in radians. + name: A name for this op. Defaults to `"rotation_2d/from_euler"`. + + Returns: + A `Rotation2D`. + + Raises: + ValueError: If the shape of `angle` is invalid. + """ + with tf.name_scope(name or "rotation_2d/from_euler"): + return cls(_matrix=rotation_matrix_2d.from_euler(angle)) + + @classmethod + def from_small_euler(cls, angle, name=None): + r"""Creates a 2D rotation from a small angle. + + Uses the small angle approximation to compute the rotation. Under the + small angle assumption, $\sin(x)$$ and $$\cos(x)$ can be approximated by + their second order Taylor expansions, where $\sin(x) \approx x$ and + $\cos(x) \approx 1 - \frac{x^2}{2}$. + + The resulting rotation acts like the following rotation matrix: + + $$ + \mathbf{R} = + \begin{bmatrix} + 1.0 - 0.5\theta^2 & -\theta \\ + \theta & 1.0 - 0.5\theta^2 + \end{bmatrix}. + $$ + + ```{note} + The resulting rotation rotates points in the $xy$-plane counterclockwise. + ``` + + ```{note} + This function does not verify the smallness of the angles. + ``` + + Args: + angle: A `tf.Tensor` of shape `[..., 1]`, where the last dimension + represents an angle in radians. + name: A name for this op. Defaults to "rotation_2d/from_small_euler". + + Returns: + A `Rotation2D`. + + Raises: + ValueError: If the shape of `angle` is invalid. + """ + with tf.name_scope(name or "rotation_2d/from_small_euler"): + return cls(_matrix=rotation_matrix_2d.from_small_euler(angle)) + + def as_matrix(self, name=None): + r"""Returns a rotation matrix representation of this rotation. + + Args: + name: A name for this op. Defaults to `"rotation_2d/as_matrix"`. + + Returns: + A `tf.Tensor` of shape `[..., 2, 2]`, where the last two dimensions + represent a rotation matrix. + """ + with tf.name_scope(name or "rotation_2d/as_matrix"): + return tf.identity(self._matrix) + + def as_euler(self, name=None): + r"""Returns an angle representation of this rotation. + + Args: + name: A name for this op. Defaults to `"rotation_2d/as_euler"`. + + Returns: + A `tf.Tensor` of shape `[..., 1]`, where the last dimension represents an + angle in radians. + """ + with tf.name_scope(name or "rotation_2d/as_euler"): + return euler_2d.from_matrix(self._matrix) + + def inverse(self, name=None): + r"""Computes the inverse of this rotation. + + Args: + name: A name for this op. Defaults to `"rotation_2d/inverse"`. + + Returns: + A `Rotation2D` representing the inverse of this rotation. + """ + with tf.name_scope(name or "rotation_2d/inverse"): + return Rotation2D(_matrix=rotation_matrix_2d.inverse(self._matrix)) + + def is_valid(self, atol=1e-3, name=None): + r"""Determines if this is a valid rotation. + + A rotation matrix $\mathbf{R}$ is a valid rotation matrix if + $\mathbf{R}^T\mathbf{R} = \mathbf{I}$ and $\det(\mathbf{R}) = 1$. + + Args: + atol: A `float`. The absolute tolerance parameter. + name: A name for this op. Defaults to `"rotation_2d/is_valid"`. + + Returns: + A boolean `tf.Tensor` with shape `[..., 1]`, `True` if the corresponding + matrix is valid and `False` otherwise. + """ + with tf.name_scope(name or "rotation_2d/is_valid"): + return rotation_matrix_2d.is_valid(self._matrix, atol=atol) + + def rotate(self, point, name=None): + r"""Rotates a 2D point. + + Args: + point: A `tf.Tensor` of shape `[..., 2]`, where the last dimension + represents a 2D point and `...` represents any number of batch + dimensions, which must be broadcastable with the batch shape of this + rotation. + name: A name for this op. Defaults to `"rotation_2d/rotate"`. + + Returns: + A `tf.Tensor` of shape `[..., 2]`, where the last dimension represents + a 2D point and `...` is the result of broadcasting the batch shapes of + `point` and this rotation matrix. + + Raises: + ValueError: If the shape of `point` is invalid. + """ + with tf.name_scope(name or "rotation_2d/rotate"): + return rotation_matrix_2d.rotate(point, self._matrix) + + def __eq__(self, other): + """Returns true if this rotation is equivalent to the other rotation.""" + return tf.math.reduce_all( + tf.math.equal(self._matrix, other._matrix), axis=[-2, -1]) + + def __matmul__(self, other): + """Composes this rotation with another rotation.""" + if isinstance(other, Rotation2D): + return Rotation2D(_matrix=tf.matmul(self._matrix, other._matrix)) + raise ValueError( + f"Cannot compose a `Rotation2D` with a `{type(other).__name__}`.") + + def __repr__(self): + """Returns a string representation of this rotation.""" + name = self.__name__ + return f"<{name}(shape={str(self.shape)}, dtype={self.dtype.name})>" + + def __str__(self): + """Returns a string representation of this rotation.""" + return self.__repr__()[1:-1] + + def __validate__(self): + """Checks that this rotation is a valid rotation. + + Only performs static checks. + """ + rotation_matrix_2d.check_shape(self._matrix) + + @property + def shape(self): + """Returns the shape of this rotation. + + Returns: + A `tf.TensorShape`. + """ + return self._matrix.shape + + @property + def dtype(self): + """Returns the dtype of this rotation. + + Returns: + A `tf.dtypes.DType`. + """ + return self._matrix.dtype + + +@tf.experimental.dispatch_for_api(tf.convert_to_tensor, {'value': Rotation2D}) +def convert_to_tensor(value, dtype=None, dtype_hint=None, name=None): + """Overrides `tf.convert_to_tensor` for `Rotation2D` objects.""" + return tf.convert_to_tensor( + value.as_matrix(), dtype=dtype, dtype_hint=dtype_hint, name=name) + + +@tf.experimental.dispatch_for_api( + tf.linalg.matmul, {'a': Rotation2D, 'b': Rotation2D}) +def matmul(a, b, # pylint: disable=missing-param-doc + transpose_a=False, + transpose_b=False, + adjoint_a=False, + adjoint_b=False, + a_is_sparse=False, + b_is_sparse=False, + output_type=None, + name=None): + """Overrides `tf.linalg.matmul` for `Rotation2D` objects.""" + if a_is_sparse or b_is_sparse: + raise ValueError("Rotation2D does not support sparse matmul.") + return Rotation2D(_matrix=tf.linalg.matmul(a.as_matrix(), b.as_matrix(), + transpose_a=transpose_a, + transpose_b=transpose_b, + adjoint_a=adjoint_a, + adjoint_b=adjoint_b, + output_type=output_type, + name=name)) + + +@tf.experimental.dispatch_for_api(tf.linalg.matvec, {'a': Rotation2D}) +def matvec(a, b, # pylint: disable=missing-param-doc + transpose_a=False, + adjoint_a=False, + a_is_sparse=False, + b_is_sparse=False, + name=None): + """Overrides `tf.linalg.matvec` for `Rotation2D` objects.""" + if a_is_sparse or b_is_sparse: + raise ValueError("Rotation2D does not support sparse matvec.") + return tf.linalg.matvec(a.as_matrix(), b, + transpose_a=transpose_a, + adjoint_a=adjoint_a, + name=name) + + +@tf.experimental.dispatch_for_api(tf.shape, {'input': Rotation2D}) +def shape(input, out_type=tf.int32, name=None): # pylint: disable=redefined-builtin + """Overrides `tf.shape` for `Rotation2D` objects.""" + return tf.shape(input.as_matrix(), out_type=out_type, name=name) diff --git a/tensorflow_mri/python/geometry/rotation_2d_test.py b/tensorflow_mri/python/geometry/rotation_2d_test.py new file mode 100644 index 00000000..132de2e7 --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation_2d_test.py @@ -0,0 +1,178 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for 2D rotation.""" +# This file is partly inspired by TensorFlow Graphics. +# pylint: disable=missing-param-doc + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow_mri.python.geometry.rotation import test_data as td +from tensorflow_mri.python.geometry.rotation import test_helpers +from tensorflow_mri.python.geometry.rotation_2d import Rotation2D +from tensorflow_mri.python.util import test_util + + +class Rotation2DTest(test_util.TestCase): + """Tests for `Rotation2D`.""" + def test_shape(self): + """Tests shape.""" + rot = Rotation2D.from_euler([0.0]) + self.assertAllEqual([2, 2], rot.shape) + self.assertAllEqual([2, 2], tf.shape(rot)) + + rot = Rotation2D.from_euler([[0.0], [np.pi]]) + self.assertAllEqual([2, 2, 2], rot.shape) + self.assertAllEqual([2, 2, 2], tf.shape(rot)) + + def test_equal(self): + """Tests equality operator.""" + rot1 = Rotation2D.from_euler([0.0]) + rot2 = Rotation2D.from_euler([0.0]) + self.assertAllEqual(True, rot1 == rot2) + + rot1 = Rotation2D.from_euler([0.0]) + rot2 = Rotation2D.from_euler([np.pi]) + self.assertAllEqual(False, rot1 == rot2) + + rot1 = Rotation2D.from_euler([[0.0], [np.pi]]) + rot2 = Rotation2D.from_euler([[0.0], [np.pi]]) + self.assertAllEqual([True, True], rot1 == rot2) + + rot1 = Rotation2D.from_euler([[0.0], [0.0]]) + rot2 = Rotation2D.from_euler([[0.0], [np.pi]]) + self.assertAllEqual([True, False], rot1 == rot2) + + def test_repr(self): + """Tests that repr works.""" + expected = "" + rot = Rotation2D.from_euler([0.0]) + self.assertEqual(expected, repr(rot)) + self.assertEqual(expected[1:-1], str(rot)) + + def test_matmul(self): + """Tests that matmul works.""" + rot = Rotation2D.from_euler([np.pi]) + composed = rot @ rot + self.assertAllClose(np.eye(2), composed.as_matrix()) + + composed = tf.linalg.matmul(rot, rot) + self.assertAllClose(np.eye(2), composed.as_matrix()) + + def test_matvec(self): + """Tests that matvec works.""" + rot = Rotation2D.from_euler([np.pi]) + vec = tf.constant([1.0, -1.0]) + self.assertAllClose(rot.rotate(vec), tf.linalg.matvec(rot, vec)) + + def test_convert_to_tensor(self): + """Tests that conversion to tensor works.""" + rot = Rotation2D.from_euler([0.0]) + self.assertIsInstance(tf.convert_to_tensor(rot), tf.Tensor) + self.assertAllClose(np.eye(2), tf.convert_to_tensor(rot)) + + @parameterized.named_parameters( + ("0", [0.0]), + ("45", [np.pi / 4]), + ("90", [np.pi / 2]), + ("135", [np.pi * 3 / 4]), + ("-45", [-np.pi / 4]), + ("-90", [-np.pi / 2]), + ("-135", [-np.pi * 3 / 4]) + ) + def test_as_euler(self, angle): # pylint: disable=missing-param-doc + """Tests that `as_euler` returns the correct angle.""" + rot = Rotation2D.from_euler(angle) + self.assertAllClose(angle, rot.as_euler()) + + def test_from_matrix(self): + """Tests that rotation can be initialized from matrix.""" + rot = Rotation2D.from_matrix(np.eye(2)) + self.assertAllClose(np.eye(2), rot.as_matrix()) + + def test_from_euler_normalized(self): + """Tests that an angle maps to correct matrix.""" + euler_angles = test_helpers.generate_preset_test_euler_angles(dimensions=1) + + rot = Rotation2D.from_euler(euler_angles) + self.assertAllEqual(np.ones(euler_angles.shape[0:-1] + (1,), dtype=bool), + rot.is_valid()) + + @parameterized.named_parameters( + ("0", td.ANGLE_0, td.MAT_2D_ID), + ("45", td.ANGLE_45, td.MAT_2D_45), + ("90", td.ANGLE_90, td.MAT_2D_90), + ("180", td.ANGLE_180, td.MAT_2D_180), + ) + def test_from_euler(self, angle, expected): + """Tests that an angle maps to correct matrix.""" + self.assertAllClose(expected, Rotation2D.from_euler(angle).as_matrix()) + + def test_from_euler_with_small_angles_approximation_random(self): + """Tests small angles approximation by comparing to exact calculation.""" + # Only generate small angles. For a test tolerance of 1e-3, 0.17 was found + # empirically to be the range where the small angle approximation works. + random_euler_angles = test_helpers.generate_random_test_euler_angles( + min_angle=-0.17, max_angle=0.17, dimensions=1) + + exact_rot = Rotation2D.from_euler(random_euler_angles) + approx_rot = Rotation2D.from_small_euler(random_euler_angles) + + self.assertAllClose(exact_rot.as_matrix(), approx_rot.as_matrix(), + atol=1e-3) + + def test_inverse_random(self): + """Checks that inverting rotated points results in no transformation.""" + random_euler_angles = test_helpers.generate_random_test_euler_angles( + dimensions=1) + tensor_shape = random_euler_angles.shape[:-1] + + random_rot = Rotation2D.from_euler(random_euler_angles) + random_point = np.random.normal(size=tensor_shape + (2,)) + rotated_random_points = random_rot.rotate(random_point) + predicted_invert_random_matrix = random_rot.inverse() + predicted_invert_rotated_random_points = ( + predicted_invert_random_matrix.rotate(rotated_random_points)) + + self.assertAllClose(random_point, predicted_invert_rotated_random_points) + + @parameterized.named_parameters( + ("preset1", td.AXIS_2D_0, td.ANGLE_90, td.AXIS_2D_0), + ("preset2", td.AXIS_2D_X, td.ANGLE_90, td.AXIS_2D_Y), + ) + def test_rotate(self, point, angle, expected): + """Tests that the rotate function correctly rotates points.""" + result = Rotation2D.from_euler(angle).rotate(point) + self.assertAllClose(expected, result) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensorflow_mri/python/geometry/rotation_3d.py b/tensorflow_mri/python/geometry/rotation_3d.py new file mode 100644 index 00000000..b1a95850 --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation_3d.py @@ -0,0 +1,302 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""3D rotation.""" + +import tensorflow as tf + +from tensorflow_mri.python.geometry.rotation import rotation_matrix_3d + + +class Rotation3D(tf.experimental.BatchableExtensionType): # pylint: disable=abstract-method + """Represents a rotation in 3D space (or a batch thereof).""" + __name__ = "tfmri.geometry.Rotation3D" + _matrix: tf.Tensor + + @classmethod + def from_matrix(cls, matrix, name=None): + r"""Creates a 3D rotation from a rotation matrix. + + Args: + matrix: A `tf.Tensor` of shape `[..., 3, 3]`, where the last two + dimensions represent a rotation matrix. + name: A name for this op. Defaults to `"rotation_3d/from_matrix"`. + + Returns: + A `Rotation3D`. + """ + with tf.name_scope(name or "rotation_3d/from_matrix"): + return cls(_matrix=matrix) + + @classmethod + def from_euler(cls, angles, name=None): + r"""Creates a 3D rotation from Euler angles. + + The resulting rotation acts like the rotation matrix + $\mathbf{R} = \mathbf{R}_z\mathbf{R}_y\mathbf{R}_x$. + + ```{note} + Uses the $z$-$y$-$x$ rotation convention (Tait-Bryan angles). + ``` + + Args: + angles: A `tf.Tensor` of shape `[..., 3]`, where the last dimension + represents the three Euler angles in radians. `angles[..., 0]` + is the angles about `x`, `angles[..., 1]` is the angles about `y`, + and `angles[..., 2]` is the angles about `z`. + name: A name for this op. Defaults to `"rotation_3d/from_euler"`. + + Returns: + A `Rotation3D`. + + Raises: + ValueError: If the shape of `angles` is invalid. + """ + with tf.name_scope(name or "rotation_3d/from_euler"): + return cls(_matrix=rotation_matrix_3d.from_euler(angles)) + + @classmethod + def from_small_euler(cls, angles, name=None): + r"""Creates a 3D rotation from small Euler angles. + + The resulting rotation acts like the rotation matrix + $\mathbf{R} = \mathbf{R}_z\mathbf{R}_y\mathbf{R}_x$. + + Uses the small angle approximation to compute the rotation. Under the + small angle assumption, $\sin(x)$$ and $$\cos(x)$ can be approximated by + their second order Taylor expansions, where $\sin(x) \approx x$ and + $\cos(x) \approx 1 - \frac{x^2}{2}$. + + ```{note} + Uses the $z$-$y$-$x$ rotation convention (Tait-Bryan angles). + ``` + + ```{note} + This function does not verify the smallness of the angles. + ``` + + Args: + angles: A `tf.Tensor` of shape `[..., 3]`, where the last dimension + represents the three Euler angles in radians. `angles[..., 0]` + is the angles about `x`, `angles[..., 1]` is the angles about `y`, + and `angles[..., 2]` is the angles about `z`. + name: A name for this op. Defaults to "rotation_3d/from_small_euler". + + Returns: + A `Rotation3D`. + + Raises: + ValueError: If the shape of `angles` is invalid. + """ + with tf.name_scope(name or "rotation_3d/from_small_euler"): + return cls(_matrix=rotation_matrix_3d.from_small_euler(angles)) + + @classmethod + def from_axis_angle(cls, axis, angle, name=None): + """Creates a 3D rotation from an axis-angle representation. + + Args: + axis: A `tf.Tensor` of shape `[..., 3]`, where the last dimension + represents a normalized axis. + angle: A `tf.Tensor` of shape `[..., 1]`, where the last dimension + represents a normalized axis. + name: A name for this op. Defaults to "rotation_3d/from_axis_angle". + + Returns: + A `Rotation3D`. + + Raises: + ValueError: If the shape of `axis` or `angle` is invalid. + """ + with tf.name_scope(name or "rotation_3d/from_axis_angle"): + return cls(_matrix=rotation_matrix_3d.from_axis_angle(axis, angle)) + + @classmethod + def from_quaternion(cls, quaternion, name=None): + """Creates a 3D rotation from a quaternion. + + Args: + quaternion: A `tf.Tensor` of shape `[..., 4]`, where the last dimension + represents a normalized quaternion. + name: A name for this op. Defaults to `"rotation_3d/from_quaternion"`. + + Returns: + A `Rotation3D`. + + Raises: + ValueError: If the shape of `quaternion` is invalid. + """ + with tf.name_scope(name or "rotation_3d/from_quaternion"): + return cls(_matrix=rotation_matrix_3d.from_quaternion(quaternion)) + + def as_matrix(self, name=None): + r"""Returns a rotation matrix representation of this rotation. + + Args: + name: A name for this op. Defaults to `"rotation_3d/as_matrix"`. + + Returns: + A `tf.Tensor` of shape `[..., 3, 3]`, where the last two dimensions + represent a rotation matrix. + """ + with tf.name_scope(name or "rotation_3d/as_matrix"): + return tf.identity(self._matrix) + + def inverse(self, name=None): + r"""Computes the inverse of this rotation. + + Args: + name: A name for this op. Defaults to `"rotation_3d/inverse"`. + + Returns: + A `Rotation3D` representing the inverse of this rotation. + """ + with tf.name_scope(name or "rotation_3d/inverse"): + return Rotation3D(_matrix=rotation_matrix_3d.inverse(self._matrix)) + + def is_valid(self, atol=1e-3, name=None): + r"""Determines if this is a valid rotation. + + A rotation matrix $\mathbf{R}$ is a valid rotation matrix if + $\mathbf{R}^T\mathbf{R} = \mathbf{I}$ and $\det(\mathbf{R}) = 1$. + + Args: + atol: A `float`. The absolute tolerance parameter. + name: A name for this op. Defaults to `"rotation_3d/is_valid"`. + + Returns: + A boolean `tf.Tensor` with shape `[..., 1]`, `True` if the corresponding + matrix is valid and `False` otherwise. + """ + with tf.name_scope(name or "rotation_3d/is_valid"): + return rotation_matrix_3d.is_valid(self._matrix, atol=atol) + + def rotate(self, point, name=None): + r"""Rotates a 3D point. + + Args: + point: A `tf.Tensor` of shape `[..., 3]`, where the last dimension + represents a 3D point and `...` represents any number of batch + dimensions, which must be broadcastable with the batch shape of this + rotation. + name: A name for this op. Defaults to `"rotation_3d/rotate"`. + + Returns: + A `tf.Tensor` of shape `[..., 3]`, where the last dimension represents + a 3D point and `...` is the result of broadcasting the batch shapes of + `point` and this rotation matrix. + + Raises: + ValueError: If the shape of `point` is invalid. + """ + with tf.name_scope(name or "rotation_3d/rotate"): + return rotation_matrix_3d.rotate(point, self._matrix) + + def __eq__(self, other): + """Returns true if this rotation is equivalent to the other rotation.""" + return tf.math.reduce_all( + tf.math.equal(self._matrix, other._matrix), axis=[-2, -1]) + + def __matmul__(self, other): + """Composes this rotation with another rotation.""" + if isinstance(other, Rotation3D): + return Rotation3D(_matrix=tf.matmul(self._matrix, other._matrix)) + raise ValueError( + f"Cannot compose a `Rotation2D` with a `{type(other).__name__}`.") + + def __repr__(self): + """Returns a string representation of this rotation.""" + name = self.__name__ + return f"<{name}(shape={str(self.shape)}, dtype={self.dtype.name})>" + + def __str__(self): + """Returns a string representation of this rotation.""" + return self.__repr__()[1:-1] + + def __validate__(self): + """Checks that this rotation is a valid rotation. + + Only performs static checks. + """ + rotation_matrix_3d.check_shape(self._matrix) + + @property + def shape(self): + """Returns the shape of this rotation. + + Returns: + A `tf.TensorShape`. + """ + return self._matrix.shape + + @property + def dtype(self): + """Returns the dtype of this rotation. + + Returns: + A `tf.dtypes.DType`. + """ + return self._matrix.dtype + + +@tf.experimental.dispatch_for_api(tf.convert_to_tensor, {'value': Rotation3D}) +def convert_to_tensor(value, dtype=None, dtype_hint=None, name=None): + """Overrides `tf.convert_to_tensor` for `Rotation3D` objects.""" + return tf.convert_to_tensor( + value.as_matrix(), dtype=dtype, dtype_hint=dtype_hint, name=name) + + +@tf.experimental.dispatch_for_api( + tf.linalg.matmul, {'a': Rotation3D, 'b': Rotation3D}) +def matmul(a, b, # pylint: disable=missing-param-doc + transpose_a=False, + transpose_b=False, + adjoint_a=False, + adjoint_b=False, + a_is_sparse=False, + b_is_sparse=False, + output_type=None, + name=None): + """Overrides `tf.linalg.matmul` for `Rotation3D` objects.""" + if a_is_sparse or b_is_sparse: + raise ValueError("Rotation3D does not support sparse matmul.") + return Rotation3D(_matrix=tf.linalg.matmul(a.as_matrix(), b.as_matrix(), + transpose_a=transpose_a, + transpose_b=transpose_b, + adjoint_a=adjoint_a, + adjoint_b=adjoint_b, + output_type=output_type, + name=name)) + + +@tf.experimental.dispatch_for_api(tf.linalg.matvec, {'a': Rotation3D}) +def matvec(a, b, # pylint: disable=missing-param-doc + transpose_a=False, + adjoint_a=False, + a_is_sparse=False, + b_is_sparse=False, + name=None): + """Overrides `tf.linalg.matvec` for `Rotation3D` objects.""" + if a_is_sparse or b_is_sparse: + raise ValueError("Rotation3D does not support sparse matvec.") + return tf.linalg.matvec(a.as_matrix(), b, + transpose_a=transpose_a, + adjoint_a=adjoint_a, + name=name) + + +@tf.experimental.dispatch_for_api(tf.shape, {'input': Rotation3D}) +def shape(input, out_type=tf.int32, name=None): # pylint: disable=redefined-builtin + """Overrides `tf.shape` for `Rotation3D` objects.""" + return tf.shape(input.as_matrix(), out_type=out_type, name=name) diff --git a/tensorflow_mri/python/geometry/rotation_3d_test.py b/tensorflow_mri/python/geometry/rotation_3d_test.py new file mode 100644 index 00000000..93ce456f --- /dev/null +++ b/tensorflow_mri/python/geometry/rotation_3d_test.py @@ -0,0 +1,280 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for 3D rotation.""" +# This file is partly inspired by TensorFlow Graphics. +# pylint: disable=missing-param-doc + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow_mri.python.geometry.rotation import test_data as td +from tensorflow_mri.python.geometry.rotation import test_helpers +from tensorflow_mri.python.geometry.rotation_3d import Rotation3D +from tensorflow_mri.python.util import test_util + + +class Rotation3DTest(test_util.TestCase): + """Tests for `Rotation3D`.""" + def test_shape(self): + """Tests shape.""" + rot = Rotation3D.from_euler([0.0, 0.0, 0.0]) + self.assertAllEqual([3, 3], rot.shape) + self.assertAllEqual([3, 3], tf.shape(rot)) + + rot = Rotation3D.from_euler([[0.0, 0.0, 0.0], [np.pi, 0.0, 0.0]]) + self.assertAllEqual([2, 3, 3], rot.shape) + self.assertAllEqual([2, 3, 3], tf.shape(rot)) + + def test_equal(self): + """Tests equality operator.""" + rot1 = Rotation3D.from_euler([0.0, 0.0, 0.0]) + rot2 = Rotation3D.from_euler([0.0, 0.0, 0.0]) + self.assertAllEqual(True, rot1 == rot2) + + rot1 = Rotation3D.from_euler([0.0, 0.0, 0.0]) + rot2 = Rotation3D.from_euler([np.pi, 0.0, 0.0]) + self.assertAllEqual(False, rot1 == rot2) + + rot1 = Rotation3D.from_euler([[0.0, 0.0, 0.0], [np.pi, 0.0, 0.0]]) + rot2 = Rotation3D.from_euler([[0.0, 0.0, 0.0], [np.pi, 0.0, 0.0]]) + self.assertAllEqual([True, True], rot1 == rot2) + + rot1 = Rotation3D.from_euler([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) + rot2 = Rotation3D.from_euler([[0.0, 0.0, 0.0], [np.pi, 0.0, 0.0]]) + self.assertAllEqual([True, False], rot1 == rot2) + + def test_repr(self): + rot = Rotation3D.from_euler([0.0, 0.0, 0.0]) + self.assertEqual( + "", repr(rot)) + + def test_convert_to_tensor(self): + """Tests that conversion to tensor works.""" + rot = Rotation3D.from_euler([0.0, 0.0, 0.0]) + self.assertIsInstance(tf.convert_to_tensor(rot), tf.Tensor) + self.assertAllClose(np.eye(3), tf.convert_to_tensor(rot)) + + def test_from_axis_angle_normalized_random(self): + """Tests that axis-angles can be converted to rotation matrices.""" + tensor_shape = np.random.randint(1, 10, size=np.random.randint(3)).tolist() + random_axis = np.random.normal(size=tensor_shape + [3]) + random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True) + random_angle = np.random.normal(size=tensor_shape + [1]) + + rotation = Rotation3D.from_axis_angle(random_axis, random_angle) + + self.assertAllEqual(rotation.is_valid(), np.ones(tensor_shape + [1])) + + @parameterized.named_parameters( + ("preset0", td.AXIS_3D_X, td.ANGLE_45, td.MAT_3D_X_45), + ("preset1", td.AXIS_3D_Y, td.ANGLE_45, td.MAT_3D_Y_45), + ("preset2", td.AXIS_3D_Z, td.ANGLE_45, td.MAT_3D_Z_45), + ("preset3", td.AXIS_3D_X, td.ANGLE_90, td.MAT_3D_X_90), + ("preset4", td.AXIS_3D_Y, td.ANGLE_90, td.MAT_3D_Y_90), + ("preset5", td.AXIS_3D_Z, td.ANGLE_90, td.MAT_3D_Z_90), + ("preset6", td.AXIS_3D_X, td.ANGLE_180, td.MAT_3D_X_180), + ("preset7", td.AXIS_3D_Y, td.ANGLE_180, td.MAT_3D_Y_180), + ("preset8", td.AXIS_3D_Z, td.ANGLE_180, td.MAT_3D_Z_180) + ) + def test_from_axis_angle(self, axis, angle, matrix): + """Tests that an axis-angle maps to correct matrix.""" + self.assertAllClose( + matrix, Rotation3D.from_axis_angle(axis, angle).as_matrix()) + + def test_from_axis_angle_random(self): + """Tests conversion to matrix.""" + tensor_shape = np.random.randint(1, 10, size=np.random.randint(3)).tolist() + random_axis = np.random.normal(size=tensor_shape + [3]) + random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True) + random_angle = np.random.normal(size=tensor_shape + [1]) + + rotation = Rotation3D.from_axis_angle(random_axis, random_angle) + + # Checks that resulting rotation matrices are normalized. + self.assertAllEqual(rotation.is_valid(), np.ones(tensor_shape + [1])) + + @parameterized.named_parameters( + ("preset0", td.AXIS_3D_X, td.ANGLE_90, td.AXIS_3D_X, td.AXIS_3D_X), + ("preset1", td.AXIS_3D_X, td.ANGLE_90, td.AXIS_3D_Y, td.AXIS_3D_Z), + ("preset2", td.AXIS_3D_X, -td.ANGLE_90, td.AXIS_3D_Z, td.AXIS_3D_Y), + ("preset3", td.AXIS_3D_Y, -td.ANGLE_90, td.AXIS_3D_X, td.AXIS_3D_Z), + ("preset4", td.AXIS_3D_Y, td.ANGLE_90, td.AXIS_3D_Y, td.AXIS_3D_Y), + ("preset5", td.AXIS_3D_Y, td.ANGLE_90, td.AXIS_3D_Z, td.AXIS_3D_X), + ("preset6", td.AXIS_3D_Z, td.ANGLE_90, td.AXIS_3D_X, td.AXIS_3D_Y), + ("preset7", td.AXIS_3D_Z, -td.ANGLE_90, td.AXIS_3D_Y, td.AXIS_3D_X), + ("preset8", td.AXIS_3D_Z, td.ANGLE_90, td.AXIS_3D_Z, td.AXIS_3D_Z), + ) + def test_from_axis_angle_rotate_vector_preset( + self, axis, angle, point, expected): + """Tests the directionality of axis-angle rotations.""" + self.assertAllClose( + expected, Rotation3D.from_axis_angle(axis, angle).rotate(point)) + + def test_from_euler_normalized_preset(self): + """Tests that euler angles can be converted to rotation matrices.""" + euler_angles = test_helpers.generate_preset_test_euler_angles() + + matrix = Rotation3D.from_euler(euler_angles) + self.assertAllEqual( + matrix.is_valid(), np.ones(euler_angles.shape[0:-1] + (1,))) + + def test_from_euler_normalized_random(self): + """Tests that euler angles can be converted to rotation matrices.""" + random_euler_angles = test_helpers.generate_random_test_euler_angles() + + matrix = Rotation3D.from_euler(random_euler_angles) + self.assertAllEqual( + matrix.is_valid(), np.ones(random_euler_angles.shape[0:-1] + (1,))) + + @parameterized.named_parameters( + ("preset0", td.AXIS_3D_0, td.MAT_3D_ID), + ("preset1", td.ANGLE_45 * td.AXIS_3D_X, td.MAT_3D_X_45), + ("preset2", td.ANGLE_45 * td.AXIS_3D_Y, td.MAT_3D_Y_45), + ("preset3", td.ANGLE_45 * td.AXIS_3D_Z, td.MAT_3D_Z_45), + ("preset4", td.ANGLE_90 * td.AXIS_3D_X, td.MAT_3D_X_90), + ("preset5", td.ANGLE_90 * td.AXIS_3D_Y, td.MAT_3D_Y_90), + ("preset6", td.ANGLE_90 * td.AXIS_3D_Z, td.MAT_3D_Z_90), + ("preset7", td.ANGLE_180 * td.AXIS_3D_X, td.MAT_3D_X_180), + ("preset8", td.ANGLE_180 * td.AXIS_3D_Y, td.MAT_3D_Y_180), + ("preset9", td.ANGLE_180 * td.AXIS_3D_Z, td.MAT_3D_Z_180), + ) + def test_from_euler(self, angle, expected): + """Tests that Euler angles create the expected matrix.""" + rotation = Rotation3D.from_euler(angle) + self.assertAllClose(expected, rotation.as_matrix()) + + def test_from_euler_random(self): + """Tests that Euler angles produce the same result as axis-angle.""" + angles = test_helpers.generate_random_test_euler_angles() + matrix = Rotation3D.from_euler(angles) + tensor_tile = angles.shape[:-1] + + x_axis = np.tile(td.AXIS_3D_X, tensor_tile + (1,)) + y_axis = np.tile(td.AXIS_3D_Y, tensor_tile + (1,)) + z_axis = np.tile(td.AXIS_3D_Z, tensor_tile + (1,)) + x_angle = np.expand_dims(angles[..., 0], axis=-1) + y_angle = np.expand_dims(angles[..., 1], axis=-1) + z_angle = np.expand_dims(angles[..., 2], axis=-1) + x_rotation = Rotation3D.from_axis_angle(x_axis, x_angle) + y_rotation = Rotation3D.from_axis_angle(y_axis, y_angle) + z_rotation = Rotation3D.from_axis_angle(z_axis, z_angle) + expected_matrix = z_rotation @ (y_rotation @ x_rotation) + + self.assertAllClose(expected_matrix.as_matrix(), matrix.as_matrix(), + rtol=1e-3) + + def test_from_quaternion_normalized_random(self): + """Tests that random quaternions can be converted to rotation matrices.""" + random_quaternion = test_helpers.generate_random_test_quaternions() + tensor_shape = random_quaternion.shape[:-1] + + random_rot = Rotation3D.from_quaternion(random_quaternion) + + self.assertAllEqual( + random_rot.is_valid(), + np.ones(tensor_shape + (1,))) + + def test_from_quaternion(self): + """Tests that a quaternion maps to correct matrix.""" + preset_quaternions = test_helpers.generate_preset_test_quaternions() + + preset_matrices = test_helpers.generate_preset_test_rotation_matrices_3d() + + self.assertAllClose( + preset_matrices, + Rotation3D.from_quaternion(preset_quaternions).as_matrix()) + + def test_inverse_normalized_random(self): + """Checks that inverted rotation matrices are valid rotations.""" + random_euler_angle = test_helpers.generate_random_test_euler_angles() + tensor_tile = random_euler_angle.shape[:-1] + + random_rot = Rotation3D.from_euler(random_euler_angle) + predicted_invert_random_rot = random_rot.inverse() + + self.assertAllEqual( + predicted_invert_random_rot.is_valid(), + np.ones(tensor_tile + (1,))) + + def test_inverse_random(self): + """Checks that inverting rotated points results in no transformation.""" + random_euler_angle = test_helpers.generate_random_test_euler_angles() + tensor_tile = random_euler_angle.shape[:-1] + random_rot = Rotation3D.from_euler(random_euler_angle) + random_point = np.random.normal(size=tensor_tile + (3,)) + + rotated_random_points = random_rot.rotate(random_point) + inv_random_rot = random_rot.inverse() + inv_rotated_random_points = inv_random_rot.rotate(rotated_random_points) + + self.assertAllClose(random_point, inv_rotated_random_points, rtol=1e-6) + + def test_is_valid_random(self): + """Tests that is_valid works as intended.""" + random_euler_angle = test_helpers.generate_random_test_euler_angles() + tensor_tile = random_euler_angle.shape[:-1] + + rotation = Rotation3D.from_euler(random_euler_angle) + pred_normalized = rotation.is_valid() + + with self.subTest(name="all_normalized"): + self.assertAllEqual(pred_normalized, + np.ones(shape=tensor_tile + (1,), dtype=bool)) + + with self.subTest(name="non_orthonormal"): + test_matrix = np.array([[2., 0., 0.], [0., 0.5, 0], [0., 0., 1.]]) + rotation = Rotation3D.from_matrix(test_matrix) + pred_normalized = rotation.is_valid() + self.assertAllEqual(pred_normalized, np.zeros(shape=(1,), dtype=bool)) + + with self.subTest(name="negative_orthonormal"): + test_matrix = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., 1.]]) + rotation = Rotation3D.from_matrix(test_matrix) + pred_normalized = rotation.is_valid() + self.assertAllEqual(pred_normalized, np.zeros(shape=(1,), dtype=bool)) + + @parameterized.named_parameters( + ("preset0", td.ANGLE_90 * td.AXIS_3D_X, td.AXIS_3D_X, td.AXIS_3D_X), + ("preset1", td.ANGLE_90 * td.AXIS_3D_X, td.AXIS_3D_Y, td.AXIS_3D_Z), + ("preset2", -td.ANGLE_90 * td.AXIS_3D_X, td.AXIS_3D_Z, td.AXIS_3D_Y), + ("preset3", -td.ANGLE_90 * td.AXIS_3D_Y, td.AXIS_3D_X, td.AXIS_3D_Z), + ("preset4", td.ANGLE_90 * td.AXIS_3D_Y, td.AXIS_3D_Y, td.AXIS_3D_Y), + ("preset5", td.ANGLE_90 * td.AXIS_3D_Y, td.AXIS_3D_Z, td.AXIS_3D_X), + ("preset6", td.ANGLE_90 * td.AXIS_3D_Z, td.AXIS_3D_X, td.AXIS_3D_Y), + ("preset7", -td.ANGLE_90 * td.AXIS_3D_Z, td.AXIS_3D_Y, td.AXIS_3D_X), + ("preset8", td.ANGLE_90 * td.AXIS_3D_Z, td.AXIS_3D_Z, td.AXIS_3D_Z), + ) + def test_rotate_vector_preset(self, angles, point, expected): + """Tests that the rotate function produces the expected results.""" + self.assertAllClose(expected, Rotation3D.from_euler(angles).rotate(point)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensorflow_mri/python/initializers/__init__.py b/tensorflow_mri/python/initializers/__init__.py index 33ca9575..ef834c19 100644 --- a/tensorflow_mri/python/initializers/__init__.py +++ b/tensorflow_mri/python/initializers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,12 @@ # ============================================================================== """Keras initializers.""" +import inspect + +import keras + from tensorflow_mri.python.initializers import initializers +from tensorflow_mri.python.util import api_util TFMRI_INITIALIZERS = { @@ -33,3 +38,100 @@ 'lecun_normal': initializers.LecunNormal, 'lecun_uniform': initializers.LecunUniform, } + + +@api_util.export("initializers.serialize") +def serialize(initializer): + """Serialize a Keras initializer. + + ```{note} + This function is a drop-in replacement for `tf.keras.initializers.serialize`. + ``` + + Args: + initializer: A Keras initializer. + + Returns: + A serialized Keras initializer. + """ + return keras.initializers.serialize(initializer) + + +@api_util.export("initializers.deserialize") +def deserialize(config, custom_objects=None): + """Deserialize a Keras initializer. + + ```{note} + This function is a drop-in replacement for + `tf.keras.initializers.deserialize`. The only difference is that this function + has built-in knowledge of TFMRI initializers. Where a TFMRI initializer exists + that replaces the corresponding Keras initializer, this function prefers the + TFMRI initializer. + ``` + + Args: + config: A Keras initializer configuration. + custom_objects: Optional dictionary mapping names (strings) to custom + classes or functions to be considered during deserialization. + + Returns: + A Keras initializer. + """ + custom_objects = {**TFMRI_INITIALIZERS, **(custom_objects or {})} + return keras.initializers.deserialize(config, custom_objects) + + +@api_util.export("initializers.get") +def get(identifier): + """Retrieve a Keras initializer by the identifier. + + ```{note} + This function is a drop-in replacement for + `tf.keras.initializers.get`. The only difference is that this function + has built-in knowledge of TFMRI initializers. Where a TFMRI initializer exists + that replaces the corresponding Keras initializer, this function prefers the + TFMRI initializer. + ``` + + The `identifier` may be the string name of a initializers function or class ( + case-sensitively). + + >>> identifier = 'Ones' + >>> tfmri.initializers.deserialize(identifier) + <...keras.initializers.initializers_v2.Ones...> + + You can also specify `config` of the initializer to this function by passing + dict containing `class_name` and `config` as an identifier. Also note that the + `class_name` must map to a `Initializer` class. + + >>> cfg = {'class_name': 'Ones', 'config': {}} + >>> tfmri.initializers.deserialize(cfg) + <...keras.initializers.initializers_v2.Ones...> + + In the case that the `identifier` is a class, this method will return a new + instance of the class by its constructor. + + Args: + identifier: A `str` or `dict` containing the initializer name or + configuration. + + Returns: + An initializer instance based on the input identifier. + + Raises: + ValueError: If the input identifier is not a supported type or in a bad + format. + """ + if identifier is None: + return None + if isinstance(identifier, dict): + return deserialize(identifier) + if isinstance(identifier, str): + identifier = str(identifier) + return deserialize(identifier) + if callable(identifier): + if inspect.isclass(identifier): + identifier = identifier() + return identifier + raise ValueError('Could not interpret initializer identifier: ' + + str(identifier)) diff --git a/tensorflow_mri/python/initializers/initializers.py b/tensorflow_mri/python/initializers/initializers.py index e8216f3c..6b3ff6c1 100644 --- a/tensorflow_mri/python/initializers/initializers.py +++ b/tensorflow_mri/python/initializers/initializers.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -48,13 +48,12 @@ EXTENSION_NOTE = string.Template(""" - .. note:: - This initializer can be used as a drop-in replacement for - `tf.keras.initializers.${name}`_. However, this one also supports - initialization of complex-valued weights. Simply pass `dtype='complex64'` - or `dtype='complex128'` to its `__call__` method. - - .. _tf.keras.initializers.${name}: https://www.tensorflow.org/api_docs/python/tf/keras/initializers/${name} + ```{note} + This initializer can be used as a drop-in replacement for + `tf.keras.initializers.${name}`. However, this one also supports + initialization of complex-valued weights. Simply pass `dtype='complex64'` + or `dtype='complex128'` to its `__call__` method. + ``` """) diff --git a/tensorflow_mri/python/initializers/initializers_test.py b/tensorflow_mri/python/initializers/initializers_test.py index 511c7280..cefe1a8c 100644 --- a/tensorflow_mri/python/initializers/initializers_test.py +++ b/tensorflow_mri/python/initializers/initializers_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/io/__init__.py b/tensorflow_mri/python/io/__init__.py index 44032b6c..3b19357b 100644 --- a/tensorflow_mri/python/io/__init__.py +++ b/tensorflow_mri/python/io/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/io/image_io.py b/tensorflow_mri/python/io/image_io.py index 885214e6..eff8451c 100644 --- a/tensorflow_mri/python/io/image_io.py +++ b/tensorflow_mri/python/io/image_io.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/io/image_io_test.py b/tensorflow_mri/python/io/image_io_test.py index a4d16783..f8c62568 100644 --- a/tensorflow_mri/python/io/image_io_test.py +++ b/tensorflow_mri/python/io/image_io_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/io/twix_io.py b/tensorflow_mri/python/io/twix_io.py index f322135f..936f508c 100644 --- a/tensorflow_mri/python/io/twix_io.py +++ b/tensorflow_mri/python/io/twix_io.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,34 +39,35 @@ def parse_twix(contents): """Parses the contents of a TWIX RAID file (Siemens raw data). - .. warning:: + ```{warning} This function does not support graph execution. + ``` Example: >>> # Read bytes from file. - >>> contents = tf.io.read_file("/path/to/file.dat") + >>> contents = tf.io.read_file("/path/to/file.dat") # doctest: +SKIP >>> # Parse the contents. - >>> twix = tfmri.io.parse_twix(contents) + >>> twix = tfmri.io.parse_twix(contents) # doctest: +SKIP >>> # Access the first measurement. - >>> meas = twix.measurements[0] + >>> meas = twix.measurements[0] # doctest: +SKIP >>> # Get the protocol... - >>> protocol = meas.protocol + >>> protocol = meas.protocol # doctest: +SKIP >>> # You can index the protocol to access any of the protocol buffers, >>> # e.g., the measurement protocol. - >>> meas_prot = protocol['Meas'] + >>> meas_prot = protocol['Meas'] # doctest: +SKIP >>> # Protocol buffers are nested structures accessible with "dot notation" >>> # or "bracket notation". The following are equivalent: - >>> base_res = meas_prot.MEAS.sKSpace.lBaseResolution.value - >>> base_res = meas_prot['MEAS']['sKSpace']['lBaseResolution'].value + >>> base_res = meas_prot.MEAS.sKSpace.lBaseResolution.value # doctest: +SKIP + >>> base_res = meas_prot['MEAS']['sKSpace']['lBaseResolution'].value # doctest: +SKIP >>> # The measurement object also contains the scan data. - >>> scans = meas.scans + >>> scans = meas.scans # doctest: +SKIP >>> # Each scan has a header and the list of channels. - >>> scan_header = scans[0].header - >>> channels = scans[0].channels + >>> scan_header = scans[0].header # doctest: +SKIP + >>> channels = scans[0].channels # doctest: +SKIP >>> # Each channel also has its own header as well as the raw measurement >>> # data. - >>> channel_header = channels[0].header - >>> data = channels[0].data + >>> channel_header = channels[0].header # doctest: +SKIP + >>> data = channels[0].data # doctest: +SKIP Args: contents: A scalar `tf.Tensor` of type `string`. The encoded contents of a diff --git a/tensorflow_mri/python/io/twix_io_test.py b/tensorflow_mri/python/io/twix_io_test.py index a3e850db..4adb9270 100644 --- a/tensorflow_mri/python/io/twix_io_test.py +++ b/tensorflow_mri/python/io/twix_io_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/layers/__init__.py b/tensorflow_mri/python/layers/__init__.py index 14bbaba9..d97fe263 100644 --- a/tensorflow_mri/python/layers/__init__.py +++ b/tensorflow_mri/python/layers/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,9 +14,13 @@ # ============================================================================== """Keras layers.""" +from tensorflow_mri.python.layers import coil_sensitivities +from tensorflow_mri.python.layers import concatenate from tensorflow_mri.python.layers import convolutional -from tensorflow_mri.python.layers import conv_blocks -from tensorflow_mri.python.layers import conv_endec +from tensorflow_mri.python.layers import data_consistency +from tensorflow_mri.python.layers import normalization from tensorflow_mri.python.layers import pooling from tensorflow_mri.python.layers import preproc_layers +from tensorflow_mri.python.layers import recon_adjoint +from tensorflow_mri.python.layers import reshaping from tensorflow_mri.python.layers import signal_layers diff --git a/tensorflow_mri/python/layers/coil_sensitivities.py b/tensorflow_mri/python/layers/coil_sensitivities.py new file mode 100644 index 00000000..04f4465a --- /dev/null +++ b/tensorflow_mri/python/layers/coil_sensitivities.py @@ -0,0 +1,152 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Coil sensitivity estimation layer.""" + +import string + +import tensorflow as tf + +from tensorflow_mri.python.coils import coil_sensitivities +from tensorflow_mri.python.ops import math_ops +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import doc_util +from tensorflow_mri.python.util import model_util + + +class CoilSensitivityEstimation(tf.keras.layers.Layer): + r"""${rank}D coil sensitivity estimation layer. + + This layer extracts a calibration region and estimates the coil sensitivity + maps. + """ + def __init__(self, + rank, + calib_fn=None, + algorithm='walsh', + algorithm_kwargs=None, + refine_sensitivities=False, + refinement_network=None, + normalize_sensitivities=True, + expand_channel_dim=False, + reinterpret_complex=False, + **kwargs): + super().__init__(**kwargs) + self.rank = rank + self.calib_fn = calib_fn + self.algorithm = algorithm + self.algorithm_kwargs = algorithm_kwargs or {} + self.refine_sensitivities = refine_sensitivities + self.refinement_network = refinement_network + self.normalize_sensitivities = normalize_sensitivities + self.expand_channel_dim = expand_channel_dim + self.reinterpret_complex = reinterpret_complex + + if self.refine_sensitivities and self.refinement_network is None: + # Default map refinement network. + dtype = tf.as_dtype(self.dtype) + network_class = model_util.get_nd_model('UNet', rank) + network_kwargs = dict( + filters=[32, 64, 128], + kernel_size=3, + activation=('relu' if self.reinterpret_complex else 'complex_relu'), + output_filters=2 if self.reinterpret_complex else 1, + dtype=dtype.real_dtype if self.reinterpret_complex else dtype) + self.refinement_network = tf.keras.layers.TimeDistributed( + network_class(**network_kwargs)) + + def call(self, inputs): # pylint: arguments-differ + data, operator, calib_data = parse_inputs(inputs) + + # Compute coil sensitivities. + maps = coil_sensitivities.estimate_sensitivities_universal( + data, + operator, + calib_data=calib_data, + calib_fn=self.calib_fn, + algorithm=self.algorithm, + **self.algorithm_kwargs) + + # Maybe refine coil sensitivities. + if self.refine_sensitivities: + maps = tf.expand_dims(maps, axis=-1) + if self.reinterpret_complex: + maps = math_ops.view_as_real(maps, stacked=False) + maps = self.refinement_network(maps) + if self.reinterpret_complex: + maps = math_ops.view_as_complex(maps, stacked=False) + maps = tf.squeeze(maps, axis=-1) + + # Maybe normalize coil sensitivities. + if self.normalize_sensitivities: + coil_axis = -(self.rank + 1) + maps = math_ops.normalize_no_nan(maps, axis=coil_axis) + + # # Post-processing. + # if self.expand_channel_dim: + # maps = tf.expand_dims(maps, axis=-1) + # if self.reinterpret_complex and maps.dtype.is_complex: + # maps = math_ops.view_as_real(maps, stacked=False) + + return maps + + def get_config(self): + base_config = super().get_config() + config = { + 'calib_fn': self.calib_fn, + 'algorithm': self.algorithm, + 'algorithm_kwargs': self.algorithm_kwargs, + 'refine_sensitivities': self.refine_sensitivities, + 'refinement_network': self.refinement_network, + 'normalize_sensitivities': self.normalize_sensitivities, + 'expand_channel_dim': self.expand_channel_dim, + 'reinterpret_complex': self.reinterpret_complex, + } + return {**base_config, **config} + + +def parse_inputs(inputs): + def _parse_inputs(data, operator, calib_data=None): + return data, operator, calib_data + if isinstance(inputs, tuple): + return _parse_inputs(*inputs) + elif isinstance(inputs, dict): + return _parse_inputs(**inputs) + raise ValueError('inputs must be a tuple or dict') + + +@api_util.export("layers.CoilSensitivityEstimation2D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class CoilSensitivityEstimation2D(CoilSensitivityEstimation): + def __init__(self, *args, **kwargs): + super().__init__(2, *args, **kwargs) + + +@api_util.export("layers.CoilSensitivityEstimation3D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class CoilSensitivityEstimation3D(CoilSensitivityEstimation): + def __init__(self, *args, **kwargs): + super().__init__(3, *args, **kwargs) + + +CoilSensitivityEstimation2D.__doc__ = string.Template( + CoilSensitivityEstimation.__doc__).safe_substitute(rank=2) +CoilSensitivityEstimation3D.__doc__ = string.Template( + CoilSensitivityEstimation.__doc__).safe_substitute(rank=3) + + +CoilSensitivityEstimation2D.__signature__ = doc_util.get_nd_layer_signature( + CoilSensitivityEstimation) +CoilSensitivityEstimation3D.__signature__ = doc_util.get_nd_layer_signature( + CoilSensitivityEstimation) diff --git a/tensorflow_mri/python/layers/concatenate.py b/tensorflow_mri/python/layers/concatenate.py new file mode 100644 index 00000000..d852dd2e --- /dev/null +++ b/tensorflow_mri/python/layers/concatenate.py @@ -0,0 +1,67 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Resize and concatenate layer.""" + +import tensorflow as tf + +from tensorflow_mri.python.ops import array_ops + + +@tf.keras.utils.register_keras_serializable(package="MRI") +class ResizeAndConcatenate(tf.keras.layers.Layer): + """Resizes and concatenates a list of inputs. + + Similar to `tf.keras.layers.Concatenate`, but if the inputs have different + shapes, they are resized to match the shape of the first input. + + Args: + axis: Axis along which to concatenate. + """ + def __init__(self, axis=-1, **kwargs): + super().__init__(**kwargs) + self.axis = axis + + def call(self, inputs): # pylint: disable=missing-function-docstring,arguments-differ + if not isinstance(inputs, (list, tuple)): + raise ValueError( + f"Layer {self.__class__.__name__} expects a list of inputs. " + f"Received: {inputs}") + + rank = inputs[0].shape.rank + if rank is None: + raise ValueError( + f"Layer {self.__class__.__name__} expects inputs with known rank. " + f"Received: {inputs}") + if self.axis >= rank or self.axis < -rank: + raise ValueError( + f"Layer {self.__class__.__name__} expects `axis` to be in the range " + f"[-{rank}, {rank}) for an input of rank {rank}. " + f"Received: {self.axis}") + # Canonical axis (always positive). + axis = self.axis % rank + + # Resize inputs. + shape = tf.tensor_scatter_nd_update(tf.shape(inputs[0]), [[axis]], [-1]) + resized = [array_ops.resize_with_crop_or_pad(tensor, shape) + for tensor in inputs[1:]] + + # Set the static shape for each resized tensor. + for i, tensor in enumerate(resized): + static_shape = inputs[0].shape.as_list() + static_shape[axis] = inputs[i + 1].shape.as_list()[axis] + static_shape = tf.TensorShape(static_shape) + resized[i] = tf.ensure_shape(tensor, static_shape) + + return tf.concat(inputs[:1] + resized, axis=self.axis) # pylint: disable=unexpected-keyword-arg,no-value-for-parameter diff --git a/tensorflow_mri/python/layers/concatenate_test.py b/tensorflow_mri/python/layers/concatenate_test.py new file mode 100644 index 00000000..4b0e341d --- /dev/null +++ b/tensorflow_mri/python/layers/concatenate_test.py @@ -0,0 +1,52 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for `ResizeAndConcatenate` layers.""" + +import tensorflow as tf + +from tensorflow_mri.python.layers import concatenate +from tensorflow_mri.python.util import test_util + + +class ResizeAndConcatenateTest(test_util.TestCase): + """Tests for layer `ResizeAndConcatenate`.""" + def test_resize_and_concatenate(self): + """Test `ResizeAndConcatenate` layer.""" + # Test data. + x1 = tf.constant([[1.0, 2.0], [3.0, 4.0]]) + x2 = tf.constant([[5.0], [6.0]]) + + # Test concatenation along axis 1. + layer = concatenate.ResizeAndConcatenate(axis=-1) + + result = layer([x1, x2]) + self.assertAllClose([[1.0, 2.0, 5.0], [3.0, 4.0, 6.0]], result) + + result = layer([x2, x1]) + self.assertAllClose([[5.0, 1.0, 2.0], [6.0, 3.0, 4.0]], result) + + # Test concatenation along axis 0. + layer = concatenate.ResizeAndConcatenate(axis=0) + + result = layer([x1, x2]) + self.assertAllClose( + [[1.0, 2.0], [3.0, 4.0], [5.0, 0.0], [6.0, 0.0]], result) + + result = layer([x2, x1]) + self.assertAllClose([[5.0], [6.0], [1.0], [3.0]], result) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/layers/conv_blocks.py b/tensorflow_mri/python/layers/conv_blocks.py deleted file mode 100644 index 3efa492d..00000000 --- a/tensorflow_mri/python/layers/conv_blocks.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Copyright 2021 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Convolutional neural network blocks.""" - -import itertools - -import tensorflow as tf - -from tensorflow_mri.python.util import api_util -from tensorflow_mri.python.util import deprecation -from tensorflow_mri.python.util import check_util -from tensorflow_mri.python.util import layer_util - - -@api_util.export("layers.ConvBlock") -@tf.keras.utils.register_keras_serializable(package='MRI') -@deprecation.deprecated( - date=deprecation.REMOVAL_DATE['0.20.0'], - instructions='Use `tfmri.models.ConvBlockND` instead.') -class ConvBlock(tf.keras.layers.Layer): - """A basic convolution block. - - A Conv + BN + Activation block. The number of convolutional layers is - determined by `filters`. BN and activation are optional. - - Args: - filters: A list of `int` numbers or an `int` number of filters. Given an - `int` input, a single convolution is applied; otherwise a series of - convolutions are applied. - kernel_size: An integer or tuple/list of `rank` integers, specifying the - size of the convolution window. Can be a single integer to specify the - same value for all spatial dimensions. - strides: An integer or tuple/list of `rank` integers, specifying the strides - of the convolution along each spatial dimension. Can be a single integer - to specify the same value for all spatial dimensions. - rank: An integer specifying the number of spatial dimensions. Defaults to 2. - activation: A callable or a Keras activation identifier. The activation to - use in all layers. Defaults to `'relu'`. - out_activation: A callable or a Keras activation identifier. The activation - to use in the last layer. Defaults to `'same'`, in which case we use the - same activation as in previous layers as defined by `activation`. - use_bias: A `boolean`, whether the block's layers use bias vectors. Defaults - to `True`. - kernel_initializer: A `tf.keras.initializers.Initializer` or a Keras - initializer identifier. Initializer for convolutional kernels. Defaults to - `'VarianceScaling'`. - bias_initializer: A `tf.keras.initializers.Initializer` or a Keras - initializer identifier. Initializer for bias terms. Defaults to `'Zeros'`. - kernel_regularizer: A `tf.keras.initializers.Regularizer` or a Keras - regularizer identifier. Regularizer for convolutional kernels. Defaults to - `None`. - bias_regularizer: A `tf.keras.initializers.Regularizer` or a Keras - regularizer identifier. Regularizer for bias terms. Defaults to `None`. - use_batch_norm: If `True`, use batch normalization. Defaults to `False`. - use_sync_bn: If `True`, use synchronised batch normalization. Defaults to - `False`. - bn_momentum: A `float`. Momentum for the moving average in batch - normalization. - bn_epsilon: A `float`. Small float added to variance to avoid dividing by - zero during batch normalization. - use_residual: A `boolean`. If `True`, the input is added to the outputs to - create a residual learning block. Defaults to `False`. - use_dropout: A `boolean`. If `True`, a dropout layer is inserted after - each activation. Defaults to `False`. - dropout_rate: A `float`. The dropout rate. Only relevant if `use_dropout` is - `True`. Defaults to 0.3. - dropout_type: A `str`. The dropout type. Must be one of `'standard'` or - `'spatial'`. Standard dropout drops individual elements from the feature - maps, whereas spatial dropout drops entire feature maps. Only relevant if - `use_dropout` is `True`. Defaults to `'standard'`. - **kwargs: Additional keyword arguments to be passed to base class. - """ - def __init__(self, - filters, - kernel_size, - strides=1, - rank=2, - activation='relu', - out_activation='same', - use_bias=True, - kernel_initializer='VarianceScaling', - bias_initializer='Zeros', - kernel_regularizer=None, - bias_regularizer=None, - use_batch_norm=False, - use_sync_bn=False, - bn_momentum=0.99, - bn_epsilon=0.001, - use_residual=False, - use_dropout=False, - dropout_rate=0.3, - dropout_type='standard', - **kwargs): - """Create a basic convolution block.""" - super().__init__(**kwargs) - - self._filters = [filters] if isinstance(filters, int) else filters - self._kernel_size = kernel_size - self._strides = strides - self._rank = rank - self._activation = activation - self._out_activation = out_activation - self._use_bias = use_bias - self._kernel_initializer = kernel_initializer - self._bias_initializer = bias_initializer - self._kernel_regularizer = kernel_regularizer - self._bias_regularizer = bias_regularizer - self._use_batch_norm = use_batch_norm - self._use_sync_bn = use_sync_bn - self._bn_momentum = bn_momentum - self._bn_epsilon = bn_epsilon - self._use_residual = use_residual - self._use_dropout = use_dropout - self._dropout_rate = dropout_rate - self._dropout_type = check_util.validate_enum( - dropout_type, {'standard', 'spatial'}, 'dropout_type') - self._num_layers = len(self._filters) - - conv = layer_util.get_nd_layer('Conv', self._rank) - - if self._use_batch_norm: - if self._use_sync_bn: - bn = tf.keras.layers.experimental.SyncBatchNormalization - else: - bn = tf.keras.layers.BatchNormalization - - if self._use_dropout: - if self._dropout_type == 'standard': - dropout = tf.keras.layers.Dropout - elif self._dropout_type == 'spatial': - dropout = layer_util.get_nd_layer('SpatialDropout', self._rank) - - if tf.keras.backend.image_data_format() == 'channels_last': - self._channel_axis = -1 - else: - self._channel_axis = 1 - - self._convs = [] - self._norms = [] - self._dropouts = [] - for num_filters in self._filters: - self._convs.append( - conv(filters=num_filters, - kernel_size=self._kernel_size, - strides=self._strides, - padding='same', - data_format=None, - activation=None, - use_bias=self._use_bias, - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer)) - if self._use_batch_norm: - self._norms.append( - bn(axis=self._channel_axis, - momentum=self._bn_momentum, - epsilon=self._bn_epsilon)) - if self._use_dropout: - self._dropouts.append(dropout(rate=self._dropout_rate)) - - self._activation_fn = tf.keras.activations.get(self._activation) - if self._out_activation == 'same': - self._out_activation_fn = self._activation_fn - else: - self._out_activation_fn = tf.keras.activations.get(self._out_activation) - - def call(self, inputs, training=None): # pylint: disable=unused-argument, missing-param-doc - """Runs forward pass on the input tensor.""" - x = inputs - - for i, (conv, norm, dropout) in enumerate( - itertools.zip_longest(self._convs, self._norms, self._dropouts)): - # Convolution. - x = conv(x) - # Batch normalization. - if self._use_batch_norm: - x = norm(x, training=training) - # Activation. - if i == self._num_layers - 1: # Last layer. - x = self._out_activation_fn(x) - else: - x = self._activation_fn(x) - # Dropout. - if self._use_dropout: - x = dropout(x, training=training) - - # Residual connection. - if self._use_residual: - x += inputs - return x - - def get_config(self): - """Gets layer configuration.""" - config = { - 'filters': self._filters, - 'kernel_size': self._kernel_size, - 'strides': self._strides, - 'rank': self._rank, - 'activation': self._activation, - 'out_activation': self._out_activation, - 'use_bias': self._use_bias, - 'kernel_initializer': self._kernel_initializer, - 'bias_initializer': self._bias_initializer, - 'kernel_regularizer': self._kernel_regularizer, - 'bias_regularizer': self._bias_regularizer, - 'use_batch_norm': self._use_batch_norm, - 'use_sync_bn': self._use_sync_bn, - 'bn_momentum': self._bn_momentum, - 'bn_epsilon': self._bn_epsilon, - 'use_residual': self._use_residual, - 'use_dropout': self._use_dropout, - 'dropout_rate': self._dropout_rate, - 'dropout_type': self._dropout_type - } - base_config = super().get_config() - return {**base_config, **config} diff --git a/tensorflow_mri/python/layers/conv_blocks_test.py b/tensorflow_mri/python/layers/conv_blocks_test.py deleted file mode 100644 index dd8a8039..00000000 --- a/tensorflow_mri/python/layers/conv_blocks_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for module `conv_blocks`.""" - -from absl.testing import parameterized -import tensorflow as tf - -from tensorflow_mri.python.layers import conv_blocks -from tensorflow_mri.python.util import test_util - - -class ConvBlockTest(test_util.TestCase): - """Tests for `ConvBlock`.""" - @parameterized.parameters((64, 3, 2), (32, 3, 3)) - @test_util.run_in_graph_and_eager_modes - def test_conv_block_creation(self, filters, kernel_size, rank): # pylint: disable=missing-param-doc - """Test object creation.""" - inputs = tf.keras.Input( - shape=(128,) * rank + (32,), batch_size=1) - - block = conv_blocks.ConvBlock( - filters=filters, kernel_size=kernel_size) - - features = block(inputs) - - self.assertAllEqual(features.shape, [1] + [128] * rank + [filters]) - - - def test_serialize_deserialize(self): - """Test de/serialization.""" - config = dict( - filters=[32], - kernel_size=3, - strides=1, - rank=2, - activation='tanh', - out_activation='linear', - use_bias=False, - kernel_initializer='ones', - bias_initializer='ones', - kernel_regularizer='l2', - bias_regularizer='l1', - use_batch_norm=True, - use_sync_bn=True, - bn_momentum=0.98, - bn_epsilon=0.002, - use_residual=True, - use_dropout=True, - dropout_rate=0.5, - dropout_type='spatial', - name='conv_block', - dtype='float32', - trainable=True) - - block = conv_blocks.ConvBlock(**config) - self.assertEqual(block.get_config(), config) - - block2 = conv_blocks.ConvBlock.from_config(block.get_config()) - self.assertAllEqual(block.get_config(), block2.get_config()) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_mri/python/layers/conv_endec.py b/tensorflow_mri/python/layers/conv_endec.py deleted file mode 100644 index 65030aac..00000000 --- a/tensorflow_mri/python/layers/conv_endec.py +++ /dev/null @@ -1,274 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Convolutional encoder-decoder layers.""" - -import tensorflow as tf - -from tensorflow_mri.python.layers import conv_blocks -from tensorflow_mri.python.util import api_util -from tensorflow_mri.python.util import check_util -from tensorflow_mri.python.util import deprecation -from tensorflow_mri.python.util import layer_util - - -@api_util.export("layers.UNet") -@tf.keras.utils.register_keras_serializable(package='MRI') -@deprecation.deprecated( - date=deprecation.REMOVAL_DATE['0.20.0'], - instructions='Use `tfmri.models.UNetND` instead.') -class UNet(tf.keras.layers.Layer): - """A UNet layer. - - Args: - scales: The number of scales. `scales - 1` pooling layers will be added to - the model. Lowering the depth may reduce the amount of memory required for - training. - base_filters: The number of filters that the first layer in the - convolution network will have. The number of filters in following layers - will be calculated from this number. Lowering this number may reduce the - amount of memory required for training. - kernel_size: An integer or tuple/list of `rank` integers, specifying the - size of the convolution window. Can be a single integer to specify the - same value for all spatial dimensions. - pool_size: The pooling size for the pooling operations. Defaults to 2. - block_depth: The number of layers in each convolutional block. Defaults to - 2. - use_deconv: If `True`, transpose convolution (deconvolution) will be used - instead of up-sampling. This increases the amount memory required during - training. Defaults to `False`. - rank: An integer specifying the number of spatial dimensions. Defaults to 2. - activation: A callable or a Keras activation identifier. Defaults to - `'relu'`. - kernel_initializer: A `tf.keras.initializers.Initializer` or a Keras - initializer identifier. Initializer for convolutional kernels. Defaults to - `'VarianceScaling'`. - bias_initializer: A `tf.keras.initializers.Initializer` or a Keras - initializer identifier. Initializer for bias terms. Defaults to `'Zeros'`. - kernel_regularizer: A `tf.keras.initializers.Regularizer` or a Keras - regularizer identifier. Regularizer for convolutional kernels. Defaults to - `None`. - bias_regularizer: A `tf.keras.initializers.Regularizer` or a Keras - regularizer identifier. Regularizer for bias terms. Defaults to `None`. - use_batch_norm: If `True`, use batch normalization. Defaults to `False`. - use_sync_bn: If `True`, use synchronised batch normalization. Defaults to - `False`. - bn_momentum: A `float`. Momentum for the moving average in batch - normalization. - bn_epsilon: A `float`. Small float added to variance to avoid dividing by - zero during batch normalization. - out_channels: An `int`. The number of output channels. - out_activation: A callable or a Keras activation identifier. The output - activation. Defaults to `None`. - use_global_residual: A `boolean`. If `True`, adds a global residual - connection to create a residual learning network. Defaults to `False`. - use_dropout: A `boolean`. If `True`, a dropout layer is inserted after - each activation. Defaults to `False`. - dropout_rate: A `float`. The dropout rate. Only relevant if `use_dropout` is - `True`. Defaults to 0.3. - dropout_type: A `str`. The dropout type. Must be one of `'standard'` or - `'spatial'`. Standard dropout drops individual elements from the feature - maps, whereas spatial dropout drops entire feature maps. Only relevant if - `use_dropout` is `True`. Defaults to `'standard'`. - **kwargs: Additional keyword arguments to be passed to base class. - """ - def __init__(self, - scales, - base_filters, - kernel_size, - pool_size=2, - rank=2, - block_depth=2, - use_deconv=False, - activation='relu', - kernel_initializer='VarianceScaling', - bias_initializer='Zeros', - kernel_regularizer=None, - bias_regularizer=None, - use_batch_norm=False, - use_sync_bn=False, - bn_momentum=0.99, - bn_epsilon=0.001, - out_channels=None, - out_activation=None, - use_global_residual=False, - use_dropout=False, - dropout_rate=0.3, - dropout_type='standard', - **kwargs): - """Creates a UNet layer.""" - self._scales = scales - self._base_filters = base_filters - self._kernel_size = kernel_size - self._pool_size = pool_size - self._rank = rank - self._block_depth = block_depth - self._use_deconv = use_deconv - self._activation = activation - self._kernel_initializer = kernel_initializer - self._bias_initializer = bias_initializer - self._kernel_regularizer = kernel_regularizer - self._bias_regularizer = bias_regularizer - self._use_batch_norm = use_batch_norm - self._use_sync_bn = use_sync_bn - self._bn_momentum = bn_momentum - self._bn_epsilon = bn_epsilon - self._out_channels = out_channels - self._out_activation = out_activation - self._use_global_residual = use_global_residual - self._use_dropout = use_dropout - self._dropout_rate = dropout_rate - self._dropout_type = check_util.validate_enum( - dropout_type, {'standard', 'spatial'}, 'dropout_type') - - block_config = dict( - filters=None, # To be filled for each scale. - kernel_size=self._kernel_size, - strides=1, - rank=self._rank, - activation=self._activation, - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer, - use_batch_norm=self._use_batch_norm, - use_sync_bn=self._use_sync_bn, - bn_momentum=self._bn_momentum, - bn_epsilon=self._bn_epsilon, - use_dropout=self._use_dropout, - dropout_rate=self._dropout_rate, - dropout_type=self._dropout_type) - - pool = layer_util.get_nd_layer('MaxPool', self._rank) - if use_deconv: - upsamp = layer_util.get_nd_layer('ConvTranspose', self._rank) - upsamp_config = dict( - filters=None, # To be filled for each scale. - kernel_size=self._kernel_size, - strides=self._pool_size, - padding='same', - activation=None, - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer) - else: - upsamp = layer_util.get_nd_layer('UpSampling', self._rank) - upsamp_config = dict( - size=self._pool_size) - - if tf.keras.backend.image_data_format() == 'channels_last': - self._channel_axis = -1 - else: - self._channel_axis = 1 - - self._enc_blocks = [] - self._dec_blocks = [] - self._pools = [] - self._upsamps = [] - self._concats = [] - - # Configure backbone and decoder. - for scale in range(self._scales): - num_filters = base_filters * (2 ** scale) - block_config['filters'] = [num_filters] * self._block_depth - self._enc_blocks.append(conv_blocks.ConvBlock(**block_config)) - - if scale < self._scales - 1: - self._pools.append(pool( - pool_size=self._pool_size, - strides=self._pool_size, - padding='same')) - if use_deconv: - upsamp_config['filters'] = num_filters - self._upsamps.append(upsamp(**upsamp_config)) - self._concats.append(tf.keras.layers.Concatenate( - axis=self._channel_axis)) - self._dec_blocks.append(conv_blocks.ConvBlock(**block_config)) - - # Configure output block. - if self._out_channels is not None: - block_config['filters'] = self._out_channels - # If network is residual, the activation is performed after the residual - # addition. - if self._use_global_residual: - block_config['activation'] = None - else: - block_config['activation'] = self._out_activation - self._out_block = conv_blocks.ConvBlock(**block_config) - - # Configure residual addition, if requested. - if self._use_global_residual: - self._add = tf.keras.layers.Add() - self._out_activation_fn = tf.keras.activations.get(self._out_activation) - - super().__init__(**kwargs) - - def call(self, inputs, training=None): # pylint: disable=missing-param-doc,unused-argument - """Runs forward pass on the input tensors.""" - x = inputs - - # Backbone. - cache = [None] * (self._scales - 1) # For skip connections to decoder. - for scale in range(self._scales - 1): - cache[scale] = self._enc_blocks[scale](x) - x = self._pools[scale](cache[scale]) - x = self._enc_blocks[-1](x) - - # Decoder. - for scale in range(self._scales - 2, -1, -1): - x = self._upsamps[scale](x) - x = self._concats[scale]([x, cache[scale]]) - x = self._dec_blocks[scale](x) - - # Head. - if self._out_channels is not None: - x = self._out_block(x) - - # Global residual connection. - if self._use_global_residual: - x = self._add([x, inputs]) - if self._out_activation is not None: - x = self._out_activation_fn(x) - - return x - - def get_config(self): - """Gets layer configuration.""" - config = { - 'scales': self._scales, - 'base_filters': self._base_filters, - 'kernel_size': self._kernel_size, - 'pool_size': self._pool_size, - 'rank': self._rank, - 'block_depth': self._block_depth, - 'use_deconv': self._use_deconv, - 'activation': self._activation, - 'kernel_initializer': self._kernel_initializer, - 'bias_initializer': self._bias_initializer, - 'kernel_regularizer': self._kernel_regularizer, - 'bias_regularizer': self._bias_regularizer, - 'use_batch_norm': self._use_batch_norm, - 'use_sync_bn': self._use_sync_bn, - 'bn_momentum': self._bn_momentum, - 'bn_epsilon': self._bn_epsilon, - 'out_channels': self._out_channels, - 'out_activation': self._out_activation, - 'use_global_residual': self._use_global_residual, - 'use_dropout': self._use_dropout, - 'dropout_rate': self._dropout_rate, - 'dropout_type': self._dropout_type - } - base_config = super().get_config() - return {**base_config, **config} diff --git a/tensorflow_mri/python/layers/conv_endec_test.py b/tensorflow_mri/python/layers/conv_endec_test.py deleted file mode 100644 index 65c53310..00000000 --- a/tensorflow_mri/python/layers/conv_endec_test.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for module `conv_endec`.""" - -from absl.testing import parameterized -import tensorflow as tf - -from tensorflow_mri.python.layers import conv_endec -from tensorflow_mri.python.util import test_util - - -class UNetTest(test_util.TestCase): - """U-Net tests.""" - @parameterized.parameters((3, 16, 3, 2, None, True, False), - (2, 4, 3, 3, None, False, False), - (2, 8, 5, 2, 2, False, False), - (2, 8, 5, 2, 16, False, True)) - @test_util.run_in_graph_and_eager_modes - def test_unet_creation(self, # pylint: disable=missing-param-doc - scales, - base_filters, - kernel_size, - rank, - out_channels, - use_deconv, - use_global_residual): - """Test object creation.""" - inputs = tf.keras.Input( - shape=(128,) * rank + (16,), batch_size=1) - - network = conv_endec.UNet( - scales=scales, - base_filters=base_filters, - kernel_size=kernel_size, - rank=rank, - use_deconv=use_deconv, - out_channels=out_channels, - use_global_residual=use_global_residual) - - features = network(inputs) - if out_channels is None: - out_channels = base_filters - - self.assertAllEqual(features.shape, [1] + [128] * rank + [out_channels]) - - - def test_serialize_deserialize(self): - """Test de/serialization.""" - config = dict( - scales=3, - base_filters=16, - kernel_size=2, - pool_size=2, - rank=2, - block_depth=2, - use_deconv=True, - activation='tanh', - kernel_initializer='ones', - bias_initializer='ones', - kernel_regularizer='l2', - bias_regularizer='l1', - use_batch_norm=True, - use_sync_bn=True, - bn_momentum=0.98, - bn_epsilon=0.002, - out_channels=1, - out_activation='relu', - use_global_residual=True, - use_dropout=True, - dropout_rate=0.5, - dropout_type='spatial', - name='conv_block', - dtype='float32', - trainable=True) - - block = conv_endec.UNet(**config) - self.assertEqual(block.get_config(), config) - - block2 = conv_endec.UNet.from_config(block.get_config()) - self.assertAllEqual(block.get_config(), block2.get_config()) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_mri/python/layers/convolutional.py b/tensorflow_mri/python/layers/convolutional.py index 14228afe..207ab35f 100644 --- a/tensorflow_mri/python/layers/convolutional.py +++ b/tensorflow_mri/python/layers/convolutional.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,19 +18,18 @@ import tensorflow as tf -from tensorflow_mri.python.initializers import TFMRI_INITIALIZERS +from tensorflow_mri.python import initializers from tensorflow_mri.python.util import api_util EXTENSION_NOTE = string.Template(""" - .. note:: - This layer can be used as a drop-in replacement for - `tf.keras.layers.${name}`_. However, this one also supports complex-valued - convolutions. Simply pass `dtype='complex64'` or `dtype='complex128'` to - the layer constructor. - - .. _tf.keras.layers.${name}: https://www.tensorflow.org/api_docs/python/tf/keras/layers/${name} + ```{tip} + This layer can be used as a drop-in replacement for + `tf.keras.layers.${name}`, but unlike the core Keras layer, this one also + supports complex-valued convolutions. Simply pass `dtype='complex64'` or + `dtype='complex128'` to the layer constructor. + ``` """) @@ -64,18 +63,12 @@ def complex_conv(base): f'`tf.keras.layers.ConvND`, but got {base}.') def __init__(self, *args, **kwargs): # pylint: disable=invalid-name - # If the requested initializer is one of those provided by TFMRI, prefer - # the TFMRI version. - kernel_initializer = kwargs.get('kernel_initializer', 'glorot_uniform') - if (isinstance(kernel_initializer, str) and - kernel_initializer in TFMRI_INITIALIZERS): - kwargs['kernel_initializer'] = TFMRI_INITIALIZERS[kernel_initializer]() - - bias_initializer = kwargs.get('bias_initializer', 'zeros') - if (isinstance(bias_initializer, str) and - bias_initializer in TFMRI_INITIALIZERS): - kwargs['bias_initializer'] = TFMRI_INITIALIZERS[bias_initializer]() - + # Make sure we parse the initializers here to use the TFMRI initializers + # which support complex numbers. + kwargs['kernel_initializer'] = initializers.get( + kwargs.get('kernel_initializer', 'glorot_uniform')) + kwargs['bias_initializer'] = initializers.get( + kwargs.get('bias_initializer', 'zeros')) return base.__init__(self, *args, **kwargs) def convolution_op(self, inputs, kernel): diff --git a/tensorflow_mri/python/layers/convolutional_test.py b/tensorflow_mri/python/layers/convolutional_test.py index ed0c1e79..091c7976 100644 --- a/tensorflow_mri/python/layers/convolutional_test.py +++ b/tensorflow_mri/python/layers/convolutional_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/layers/data_consistency.py b/tensorflow_mri/python/layers/data_consistency.py new file mode 100644 index 00000000..645c4896 --- /dev/null +++ b/tensorflow_mri/python/layers/data_consistency.py @@ -0,0 +1,112 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Data consistency layers.""" + +import string + +import tensorflow as tf + +from tensorflow_mri.python.ops import math_ops +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import doc_util + + +class LeastSquaresGradientDescent(tf.keras.layers.Layer): + """Least squares gradient descent layer for ${rank}-D images. + """ + def __init__(self, + rank, + scale_initializer=1.0, + expand_channel_dim=False, + reinterpret_complex=False, + **kwargs): + super().__init__(**kwargs) + self.rank = rank + if isinstance(scale_initializer, (float, int)): + self.scale_initializer = tf.keras.initializers.Constant(scale_initializer) + else: + self.scale_initializer = tf.keras.initializers.get(scale_initializer) + self.expand_channel_dim = expand_channel_dim + self.reinterpret_complex = reinterpret_complex + + def build(self, input_shape): + super().build(input_shape) + self.scale = self.add_weight( + name='scale', + shape=(), + dtype=tf.as_dtype(self.dtype).real_dtype, + initializer=self.scale_initializer, + trainable=self.trainable, + constraint=tf.keras.constraints.NonNeg()) + + def call(self, inputs): # pylint: disable=missing-function-docstring,arguments-differ + image, data, operator = parse_inputs(inputs) + if self.reinterpret_complex: + image = math_ops.view_as_complex(image, stacked=False) + if self.expand_channel_dim: + image = tf.squeeze(image, axis=-1) + image -= tf.cast(self.scale, image.dtype) * operator.transform( + operator.transform(image) - data, adjoint=True) + if self.expand_channel_dim: + image = tf.expand_dims(image, axis=-1) + if self.reinterpret_complex: + image = math_ops.view_as_real(image, stacked=False) + return image + + def get_config(self): + base_config = super().get_config() + config = { + 'scale_initializer': tf.keras.initializers.serialize( + self.scale_initializer), + 'expand_channel_dim': self.expand_channel_dim, + 'reinterpret_complex': self.reinterpret_complex + } + return {**base_config, **config} + + +def parse_inputs(inputs): + def _parse_inputs(image, data, operator): + return image, data, operator + if isinstance(inputs, tuple): + return _parse_inputs(*inputs) + if isinstance(inputs, dict): + return _parse_inputs(**inputs) + raise ValueError('inputs must be a tuple or dict') + + +@api_util.export("layers.LeastSquaresGradientDescent2D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class LeastSquaresGradientDescent2D(LeastSquaresGradientDescent): + def __init__(self, *args, **kwargs): + super().__init__(2, *args, **kwargs) + + +@api_util.export("layers.LeastSquaresGradientDescent3D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class LeastSquaresGradientDescent3D(LeastSquaresGradientDescent): + def __init__(self, *args, **kwargs): + super().__init__(3, *args, **kwargs) + + +LeastSquaresGradientDescent2D.__doc__ = string.Template( + LeastSquaresGradientDescent.__doc__).safe_substitute(rank=2) +LeastSquaresGradientDescent3D.__doc__ = string.Template( + LeastSquaresGradientDescent.__doc__).safe_substitute(rank=3) + + +LeastSquaresGradientDescent2D.__signature__ = doc_util.get_nd_layer_signature( + LeastSquaresGradientDescent) +LeastSquaresGradientDescent3D.__signature__ = doc_util.get_nd_layer_signature( + LeastSquaresGradientDescent) diff --git a/tensorflow_mri/python/layers/normalization.py b/tensorflow_mri/python/layers/normalization.py new file mode 100644 index 00000000..4c909ee0 --- /dev/null +++ b/tensorflow_mri/python/layers/normalization.py @@ -0,0 +1,66 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Normalization layers.""" + +import tensorflow as tf + +from tensorflow_mri.python.util import api_util + + +@api_util.export("layers.Normalized") +@tf.keras.utils.register_keras_serializable(package='MRI') +class Normalized(tf.keras.layers.Wrapper): + r"""Applies the wrapped layer with normalized inputs. + + This layer shifts and scales the inputs into a distribution centered around 0 + with a standard deviation of 1 before passing them to the wrapped layer. + + $$ + x = \frac{x - \mu}{\sigma} + $$ + + After applying the wrapped layer, the outputs are scaled back to the original + distribution. + + $$ + y = \sigma y + \mu + $$ + + Args: + layer: A `tf.keras.layers.Layer`. The wrapped layer. + axis: An `int` or a `list` thereof. The axis or axes to normalize across. + Typically this is the features axis/axes. The left-out axes are typically + the batch axis/axes. Defaults to -1, the last dimension in the input. + **kwargs: Additional keyword arguments to be passed to the base class. + """ + def __init__(self, layer, axis=-1, **kwargs): + super().__init__(layer, **kwargs) + self.axis = axis + + def compute_output_shape(self, input_shape): + return self.layer.compute_output_shape(input_shape) + + def call(self, inputs, *args, **kwargs): + mean, variance = tf.nn.moments(inputs, axes=self.axis, keepdims=True) + std = tf.math.maximum(tf.math.sqrt(variance), tf.keras.backend.epsilon()) + inputs = (inputs - mean) / std + outputs = self.layer(inputs, *args, **kwargs) + outputs = outputs * std + mean + return outputs + + def get_config(self): + base_config = super().get_config() + config = {'axis': self.axis} + return {**base_config, **config} diff --git a/tensorflow_mri/python/layers/normalization_test.py b/tensorflow_mri/python/layers/normalization_test.py new file mode 100644 index 00000000..036fbd36 --- /dev/null +++ b/tensorflow_mri/python/layers/normalization_test.py @@ -0,0 +1,56 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for normalization layers.""" + +import numpy as np +import tensorflow as tf + +from tensorflow_mri.python.layers import normalization +from tensorflow_mri.python.util import test_util + + +class NormalizedTest(test_util.TestCase): + """Tests for `Normalized` layer.""" + @test_util.run_all_execution_modes + def test_normalized_dense(self): + """Tests `Normalized` layer wrapping a `Dense` layer.""" + layer = normalization.Normalized( + tf.keras.layers.Dense(2, bias_initializer='random_uniform')) + layer.build((None, 4)) + + input_data = np.random.uniform(size=(2, 4)) + + def _compute_output(input_data, normalized=False): + if normalized: + mean = input_data.mean(axis=-1, keepdims=True) + std = input_data.std(axis=-1, keepdims=True) + input_data = (input_data - mean) / std + output_data = layer.layer(input_data) + if normalized: + output_data = output_data * std + mean + return output_data + + expected_unnorm = _compute_output(input_data, normalized=False) + expected_norm = _compute_output(input_data, normalized=True) + + result_unnorm = layer.layer(input_data) + result_norm = layer(input_data) + + self.assertAllClose(expected_unnorm, result_unnorm) + self.assertAllClose(expected_norm, result_norm) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/layers/padding.py b/tensorflow_mri/python/layers/padding.py new file mode 100644 index 00000000..0689b5f0 --- /dev/null +++ b/tensorflow_mri/python/layers/padding.py @@ -0,0 +1,85 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Padding layers.""" + +import tensorflow as tf + + +class DivisorPadding(tf.keras.layers.Layer): + """Divisor padding layer. + + This layer pads the input tensor so that its spatial dimensions are a multiple + of the specified divisor. + + Args: + divisor: An `int` or a `tuple` of `int`. The divisor used to compute the + output shape. + """ + def __init__(self, rank, divisor=2, **kwargs): + super().__init__(**kwargs) + self.rank = rank + if isinstance(divisor, int): + self.divisor = (divisor,) * rank + elif hasattr(divisor, '__len__'): + if len(divisor) != rank: + raise ValueError(f'`divisor` should have {rank} elements. ' + f'Received: {divisor}') + self.divisor = divisor + else: + raise ValueError(f'`divisor` should be either an int or a ' + f'a tuple of {rank} ints. ' + f'Received: {divisor}') + self.input_spec = tf.keras.layers.InputSpec(ndim=rank + 2) + + def call(self, inputs): # pylint: disable=missing-function-docstring,arguments-differ + static_input_shape = inputs.shape + static_output_shape = tuple( + ((s + d - 1) // d) * d if s is not None else None for s, d in zip( + static_input_shape[1:-1].as_list(), self.divisor)) + static_output_shape = static_input_shape[:1].concatenate( + static_output_shape).concatenate(static_input_shape[-1:]) + + input_shape = tf.shape(inputs)[1:-1] + output_shape = (((input_shape + self.divisor - 1) // self.divisor) * + self.divisor) + left_paddings = (output_shape - input_shape) // 2 + right_paddings = (output_shape - input_shape + 1) // 2 + paddings = tf.stack([left_paddings, right_paddings], axis=-1) + paddings = tf.pad(paddings, [[1, 1], [0, 0]]) # pylint: disable=no-value-for-parameter + + return tf.ensure_shape(tf.pad(inputs, paddings), static_output_shape) # pylint: disable=no-value-for-parameter + + def get_config(self): + config = {'divisor': self.divisor} + base_config = super().get_config() + return {**config, **base_config} + + +@tf.keras.utils.register_keras_serializable(package='MRI') +class DivisorPadding1D(DivisorPadding): + def __init__(self, *args, **kwargs): + super().__init__(1, *args, **kwargs) + + +@tf.keras.utils.register_keras_serializable(package='MRI') +class DivisorPadding2D(DivisorPadding): + def __init__(self, *args, **kwargs): + super().__init__(2, *args, **kwargs) + + +@tf.keras.utils.register_keras_serializable(package='MRI') +class DivisorPadding3D(DivisorPadding): + def __init__(self, *args, **kwargs): + super().__init__(3, *args, **kwargs) diff --git a/tensorflow_mri/python/layers/pooling.py b/tensorflow_mri/python/layers/pooling.py index de93a90d..ee953d86 100644 --- a/tensorflow_mri/python/layers/pooling.py +++ b/tensorflow_mri/python/layers/pooling.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Convolutional layers.""" +"""Pooling layers.""" import string @@ -23,13 +23,12 @@ EXTENSION_NOTE = string.Template(""" - .. note:: + ```{note} This layer can be used as a drop-in replacement for - `tf.keras.layers.${name}`_. However, this one also supports complex-valued + `tf.keras.layers.${name}`. However, this one also supports complex-valued pooling. Simply pass `dtype='complex64'` or `dtype='complex128'` to the layer constructor. - - .. _tf.keras.layers.${name}: https://www.tensorflow.org/api_docs/python/tf/keras/layers/${name} + ``` """) @@ -53,7 +52,7 @@ def complex_pool(base): if issubclass(base, (tf.keras.layers.AveragePooling1D, tf.keras.layers.AveragePooling2D, tf.keras.layers.AveragePooling3D)): - def call(self, inputs): + def call(self, inputs): # pylint: arguments-differ if tf.as_dtype(self.dtype).is_complex: return tf.dtypes.complex( base.call(self, tf.math.real(inputs)), @@ -65,7 +64,7 @@ def call(self, inputs): elif issubclass(base, (tf.keras.layers.MaxPooling1D, tf.keras.layers.MaxPooling2D, tf.keras.layers.MaxPooling3D)): - def call(self, inputs): + def call(self, inputs): # pylint: arguments-differ if tf.as_dtype(self.dtype).is_complex: # For complex numbers the max is computed according to the magnitude # or absolute value of the complex input. To do this we rely on diff --git a/tensorflow_mri/python/layers/pooling_test.py b/tensorflow_mri/python/layers/pooling_test.py index 2ddf4001..590c6070 100644 --- a/tensorflow_mri/python/layers/pooling_test.py +++ b/tensorflow_mri/python/layers/pooling_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/layers/preproc_layers.py b/tensorflow_mri/python/layers/preproc_layers.py index de96d79b..eedc40fc 100644 --- a/tensorflow_mri/python/layers/preproc_layers.py +++ b/tensorflow_mri/python/layers/preproc_layers.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -31,7 +31,7 @@ class AddChannelDimension(tf.keras.layers.Layer): Args: **kwargs: Additional keyword arguments to be passed to base class. """ - def call(self, inputs): + def call(self, inputs): # pylint: arguments-differ """Runs forward pass on the input tensor.""" return tf.expand_dims(inputs, -1) @@ -43,7 +43,7 @@ class Cast(tf.keras.layers.Layer): Args: **kwargs: Additional keyword arguments to be passed to base class. """ - def call(self, inputs): + def call(self, inputs): # pylint: arguments-differ """Runs forward pass on the input tensor.""" return tf.cast(inputs, self.dtype) @@ -62,7 +62,7 @@ def __init__(self, axis, **kwargs): super().__init__(**kwargs) self._axis = axis - def call(self, inputs): + def call(self, inputs): # pylint: arguments-differ """Runs forward pass on the input tensor.""" return tf.expand_dims(inputs, self._axis) @@ -377,7 +377,7 @@ def __init__(self, repeats, **kwargs): super().__init__(**kwargs) self._repeats = repeats - def call(self, inputs): + def call(self, inputs): # pylint: arguments-differ """Runs forward pass on the input tensor.""" return [inputs] * self._repeats @@ -412,7 +412,7 @@ def __init__(self, shape, padding_mode='constant', **kwargs): self._shape_internal += [-1] self._padding_mode = padding_mode - def call(self, inputs): + def call(self, inputs): # pylint: arguments-differ """Runs forward pass on the input tensor.""" return array_ops.resize_with_crop_or_pad(inputs, self._shape_internal, padding_mode=self._padding_mode) @@ -441,7 +441,7 @@ def __init__(self, output_min=0.0, output_max=1.0, **kwargs): self._output_min = output_min self._output_max = output_max - def call(self, inputs): + def call(self, inputs): # pylint: arguments-differ """Runs forward pass on the input tensor.""" return math_ops.scale_by_min_max(inputs, self._output_min, self._output_max) @@ -468,7 +468,7 @@ def __init__(self, perm=None, conjugate=False, **kwargs): self._perm = perm self._conjugate = conjugate - def call(self, inputs): + def call(self, inputs): # pylint: arguments-differ """Runs forward pass on the input tensor.""" return tf.transpose(inputs, self._perm, conjugate=self._conjugate) diff --git a/tensorflow_mri/python/layers/preproc_layers_test.py b/tensorflow_mri/python/layers/preproc_layers_test.py index e90ce215..1d89725d 100644 --- a/tensorflow_mri/python/layers/preproc_layers_test.py +++ b/tensorflow_mri/python/layers/preproc_layers_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/layers/recon_adjoint.py b/tensorflow_mri/python/layers/recon_adjoint.py new file mode 100644 index 00000000..18599a2e --- /dev/null +++ b/tensorflow_mri/python/layers/recon_adjoint.py @@ -0,0 +1,140 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Adjoint reconstruction layer.""" + +import string + +import tensorflow as tf + +from tensorflow_mri.python.ops import math_ops +from tensorflow_mri.python.recon import recon_adjoint +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import doc_util + + +class ReconAdjoint(tf.keras.layers.Layer): + r"""${rank}D adjoint reconstruction layer. + + This layer reconstructs a signal using the adjoint of the specified system + operator. + + Given measurement data $b$ generated by a linear system $A$ such that + $Ax = b$, this function estimates the corresponding signal $x$ as + $x = A^H b$, where $A$ is the specified linear operator. + + ```{note} + This function is part of the family of + [universal operators](https://mrphys.github.io/tensorflow-mri/guide/universal/), + a set of functions and classes designed to work flexibly with any linear + system. + ``` + + ```{seealso} + This is the Keras layer equivalent of `tfmri.recon.adjoint_universal`. + ``` + + ## Inputs + + This layer's `call` method expects the following inputs: + + - data: A `tf.Tensor` of real or complex dtype. The measurement data $b$. + Its shape must be compatible with `operator.range_shape`. + - operator: A `tfmri.linalg.LinearOperator` representing the system operator + $A$. Its range shape must be compatible with `data.shape`. + + ```{attention} + Both `data` and `operator` should be passed as part of the first positional + `inputs` argument, either as as a `tuple` or as a `dict`, in order to take + advantage of this argument's special rules. For more information, see + https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#call. + ``` + + ## Outputs + + This layer's `call` method returns a `tf.Tensor` containing the reconstructed + signal. Has the same dtype as `data` and shape + `batch_shape + operator.domain_shape`. `batch_shape` is the result of + broadcasting the batch shapes of `data` and `operator`. + + Args: + expand_channel_dim: A `boolean`. Whether to expand the channel dimension. + If `True`, output has shape `batch_shape + operator.domain_shape + [1]`. + If `False`, output has shape `batch_shape + operator.domain_shape`. + Defaults to `True`. + reinterpret_complex: A `boolean`. Whether to reinterpret a complex-valued + output image as a dual-channel real image. Defaults to `False`. + **kwargs: Keyword arguments to be passed to base layer + `tf.keras.layers.Layer`. + """ + def __init__(self, + rank, + expand_channel_dim=False, + reinterpret_complex=False, + **kwargs): + super().__init__(**kwargs) + self.rank = rank # Currently unused. + self.expand_channel_dim = expand_channel_dim + self.reinterpret_complex = reinterpret_complex + + def call(self, inputs): # pylint: arguments-differ + data, operator = parse_inputs(inputs) + image = recon_adjoint.recon_adjoint(data, operator) + if self.expand_channel_dim: + image = tf.expand_dims(image, axis=-1) + if self.reinterpret_complex and image.dtype.is_complex: + image = math_ops.view_as_real(image, stacked=False) + return image + + def get_config(self): + base_config = super().get_config() + config = { + 'expand_channel_dim': self.expand_channel_dim, + 'reinterpret_complex': self.reinterpret_complex + } + return {**base_config, **config} + + +def parse_inputs(inputs): + def _parse_inputs(data, operator): + return data, operator + if isinstance(inputs, tuple): + return _parse_inputs(*inputs) + if isinstance(inputs, dict): + return _parse_inputs(**inputs) + raise ValueError('inputs must be a tuple or dict') + + +@api_util.export("layers.ReconAdjoint2D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class ReconAdjoint2D(ReconAdjoint): + def __init__(self, *args, **kwargs): + super().__init__(2, *args, **kwargs) + + +@api_util.export("layers.ReconAdjoint3D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class ReconAdjoint3D(ReconAdjoint): + def __init__(self, *args, **kwargs): + super().__init__(3, *args, **kwargs) + + +ReconAdjoint2D.__doc__ = string.Template( + ReconAdjoint.__doc__).safe_substitute(rank=2) +ReconAdjoint3D.__doc__ = string.Template( + ReconAdjoint.__doc__).safe_substitute(rank=3) + + +ReconAdjoint2D.__signature__ = doc_util.get_nd_layer_signature(ReconAdjoint) +ReconAdjoint3D.__signature__ = doc_util.get_nd_layer_signature(ReconAdjoint) diff --git a/tensorflow_mri/python/layers/recon_adjoint_test.py b/tensorflow_mri/python/layers/recon_adjoint_test.py new file mode 100644 index 00000000..5e8f170e --- /dev/null +++ b/tensorflow_mri/python/layers/recon_adjoint_test.py @@ -0,0 +1,79 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `recon_adjoint`.""" +# pylint: disable=missing-param-doc + +import os +import tempfile + +from absl.testing import parameterized +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator_mri +from tensorflow_mri.python.layers import recon_adjoint as recon_adjoint_layer +from tensorflow_mri.python.recon import recon_adjoint +from tensorflow_mri.python.util import test_util + + +class ReconAdjointTest(test_util.TestCase): + """Tests for `ReconAdjoint` layer.""" + @parameterized.product(expand_channel_dim=[True, False]) + def test_recon_adjoint(self, expand_channel_dim): + """Test `ReconAdjoint` layer.""" + # Create layer. + layer = recon_adjoint_layer.ReconAdjoint( + expand_channel_dim=expand_channel_dim) + + # Generate k-space data. + image_shape = tf.constant([4, 4]) + kspace = tf.dtypes.complex( + tf.random.stateless_normal(shape=image_shape, seed=[11, 22]), + tf.random.stateless_normal(shape=image_shape, seed=[12, 34])) + + # Reconstruct image. + expected = recon_adjoint.recon_adjoint_mri(kspace, image_shape) + if expand_channel_dim: + expected = tf.expand_dims(expected, axis=-1) + + operator = linear_operator_mri.LinearOperatorMRI(image_shape) + + # Test with tuple inputs. + input_data = (kspace, operator) + result = layer(input_data) + self.assertAllClose(expected, result) + + # Test with dict inputs. + input_data = {'data': kspace, 'operator': operator} + result = layer(input_data) + self.assertAllClose(expected, result) + + # Test (de)serialization. + layer = recon_adjoint_layer.ReconAdjoint.from_config(layer.get_config()) + result = layer(input_data) + self.assertAllClose(expected, result) + + # Test in model. + inputs = {k: tf.keras.Input(type_spec=tf.type_spec_from_value(v)) + for k, v in input_data.items()} + model = tf.keras.Model(inputs, layer(inputs)) + result = model(input_data) + self.assertAllClose(expected, result) + + # Test saving/loading. + saved_model = os.path.join(tempfile.mkdtemp(), 'saved_model') + model.save(saved_model) + model = tf.keras.models.load_model(saved_model) + result = model(input_data) + self.assertAllClose(expected, result) diff --git a/tensorflow_mri/python/layers/reshaping.py b/tensorflow_mri/python/layers/reshaping.py new file mode 100644 index 00000000..e9c918f4 --- /dev/null +++ b/tensorflow_mri/python/layers/reshaping.py @@ -0,0 +1,97 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Reshaping layers.""" + +import string + +import tensorflow as tf + +from tensorflow_mri.python.util import api_util + + +EXTENSION_NOTE = string.Template(""" + + ```{note} + This layer can be used as a drop-in replacement for + `tf.keras.layers.${name}`. However, this one also supports complex-valued + operations. Simply pass `dtype='complex64'` or `dtype='complex128'` to the + layer constructor. + ``` + +""") + + +def complex_reshape(base): + """Adds complex-valued support to a Keras reshaping layer. + + We need the init method in the pooling layer to replace the `pool_function` + attribute with a function that supports complex inputs. + + Args: + base: The base class to be extended. + + Returns: + A subclass of `base` that supports complex-valued pooling. + + Raises: + ValueError: If `base` is not one of the supported base classes. + """ + if issubclass(base, (tf.keras.layers.UpSampling1D, + tf.keras.layers.UpSampling2D, + tf.keras.layers.UpSampling3D)): + def call(self, inputs): # pylint: arguments-differ + if tf.as_dtype(self.dtype).is_complex: + return tf.dtypes.complex( + base.call(self, tf.math.real(inputs)), + base.call(self, tf.math.imag(inputs))) + + # For real values, we can just use the regular reshape function. + return base.call(self, inputs) + + else: + raise ValueError(f'Unexpected base class: {base}') + + # Dynamically create a subclass of `base` with the same name as `base` and + # with the overriden `convolution_op` method. + subclass = type(base.__name__, (base,), {'call': call}) + + # Copy docs from the base class, adding the extra note. + docstring = base.__doc__ + doclines = docstring.split('\n') + doclines[1:1] = EXTENSION_NOTE.substitute(name=base.__name__).splitlines() + subclass.__doc__ = '\n'.join(doclines) + + return subclass + + +# Define the complex-valued pooling layers. We use a composition of three +# decorators: +# 1. `complex_reshape`: Adds complex-valued support to a Keras reshape layer. +# 2. `register_keras_serializable`: Registers the new layer with the Keras +# serialization framework. +# 3. `export`: Exports the new layer to the TFMRI API. +UpSampling1D = api_util.export("layers.UpSampling1D")( + tf.keras.utils.register_keras_serializable(package='MRI')( + complex_reshape(tf.keras.layers.UpSampling1D))) + + +UpSampling2D = api_util.export("layers.UpSampling2D")( + tf.keras.utils.register_keras_serializable(package='MRI')( + complex_reshape(tf.keras.layers.UpSampling2D))) + + +UpSampling3D = api_util.export("layers.UpSampling3D")( + tf.keras.utils.register_keras_serializable(package='MRI')( + complex_reshape(tf.keras.layers.UpSampling3D))) diff --git a/tensorflow_mri/python/ops/geom_ops_test.py b/tensorflow_mri/python/layers/reshaping_test.py similarity index 86% rename from tensorflow_mri/python/ops/geom_ops_test.py rename to tensorflow_mri/python/layers/reshaping_test.py index 6721663d..35a7ce75 100644 --- a/tensorflow_mri/python/ops/geom_ops_test.py +++ b/tensorflow_mri/python/layers/reshaping_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests for module `geom_ops`.""" +"""Tests for reshaping layers.""" diff --git a/tensorflow_mri/python/layers/signal_layers.py b/tensorflow_mri/python/layers/signal_layers.py index 95317912..a4762cc4 100644 --- a/tensorflow_mri/python/layers/signal_layers.py +++ b/tensorflow_mri/python/layers/signal_layers.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -96,7 +96,7 @@ def __init__(self, rank, inverse, wavelet, mode, format_dict=True, **kwargs): else: raise NotImplementedError('rank must be 1, 2, or 3') - def call(self, inputs): # pylint: disable=missing-function-docstring + def call(self, inputs): # pylint: disable=missing-function-docstring,arguments-differ # If not using dict format, convert input to dict. if self.inverse and not self.format_dict: if not isinstance(inputs, (list, tuple)): diff --git a/tensorflow_mri/python/layers/signal_layers_test.py b/tensorflow_mri/python/layers/signal_layers_test.py index cf281358..ec59fde7 100644 --- a/tensorflow_mri/python/layers/signal_layers_test.py +++ b/tensorflow_mri/python/layers/signal_layers_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/linalg/__init__.py b/tensorflow_mri/python/linalg/__init__.py new file mode 100644 index 00000000..8954c374 --- /dev/null +++ b/tensorflow_mri/python/linalg/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Linear algebra operators.""" + +from tensorflow_mri.python.linalg import conjugate_gradient +from tensorflow_mri.python.linalg import linear_operator_addition +from tensorflow_mri.python.linalg import linear_operator_adjoint +from tensorflow_mri.python.linalg import linear_operator_composition +from tensorflow_mri.python.linalg import linear_operator_diag +from tensorflow_mri.python.linalg import linear_operator_finite_difference +from tensorflow_mri.python.linalg import linear_operator_gram_matrix +from tensorflow_mri.python.linalg import linear_operator_identity +from tensorflow_mri.python.linalg import linear_operator_mri +from tensorflow_mri.python.linalg import linear_operator_nufft +from tensorflow_mri.python.linalg import linear_operator_wavelet +from tensorflow_mri.python.linalg import linear_operator diff --git a/tensorflow_mri/python/linalg/conjugate_gradient.py b/tensorflow_mri/python/linalg/conjugate_gradient.py new file mode 100644 index 00000000..fb31c732 --- /dev/null +++ b/tensorflow_mri/python/linalg/conjugate_gradient.py @@ -0,0 +1,234 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Conjugate gradient solver.""" + +import collections + +import tensorflow as tf + +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.linalg import linear_operator + + +@api_util.export("linalg.conjugate_gradient") +def conjugate_gradient(operator, + rhs, + preconditioner=None, + x=None, + tol=1e-5, + max_iterations=20, + bypass_gradient=False, + name=None): + r"""Conjugate gradient solver. + + Solves a linear system of equations $Ax = b$ for self-adjoint, positive + definite matrix $A$ and right-hand side vector $b$, using an + iterative, matrix-free algorithm where the action of the matrix $A$ is + represented by `operator`. The iteration terminates when either the number of + iterations exceeds `max_iterations` or when the residual norm has been reduced + to `tol` times its initial value, i.e. + $(\left\| b - A x_k \right\| <= \mathrm{tol} \left\| b \right\|\\)$. + + ```{note} + This function is similar to + `tf.linalg.experimental.conjugate_gradient`, except it adds support for + complex-valued linear systems and for imaging operators. + ``` + + Args: + operator: A `LinearOperator` that is self-adjoint and positive definite. + rhs: A `tf.Tensor` of shape `[..., N]`. The right hand-side of the linear + system. + preconditioner: A `LinearOperator` that approximates the inverse of `A`. + An efficient preconditioner could dramatically improve the rate of + convergence. If `preconditioner` represents matrix `M`(`M` approximates + `A^{-1}`), the algorithm uses `preconditioner.apply(x)` to estimate + `A^{-1}x`. For this to be useful, the cost of applying `M` should be + much lower than computing `A^{-1}` directly. + x: A `tf.Tensor` of shape `[..., N]`. The initial guess for the solution. + tol: A float scalar convergence tolerance. + max_iterations: An `int` giving the maximum number of iterations. + bypass_gradient: A `boolean`. If `True`, the gradient with respect to `rhs` + will be computed by applying the inverse of `operator` to the upstream + gradient with respect to `x` (through CG iteration), instead of relying + on TensorFlow's automatic differentiation. This may reduce memory usage + when training neural networks, but `operator` must not have any trainable + parameters. If `False`, gradients are computed normally. For more details, + see ref. [1]. + name: A name scope for the operation. + + Returns: + A `namedtuple` representing the final state with fields + + - i: A scalar `int32` `tf.Tensor`. Number of iterations executed. + - x: A rank-1 `tf.Tensor` of shape `[..., N]` containing the computed + solution. + - r: A rank-1 `tf.Tensor` of shape `[.., M]` containing the residual vector. + - p: A rank-1 `tf.Tensor` of shape `[..., N]`. `A`-conjugate basis vector. + - gamma: \\(r \dot M \dot r\\), equivalent to \\(||r||_2^2\\) when + `preconditioner=None`. + + Raises: + ValueError: If `operator` is not self-adjoint and positive definite. + + References: + 1. Aggarwal, H. K., Mani, M. P., & Jacob, M. (2018). MoDL: Model-based + deep learning architecture for inverse problems. IEEE transactions on + medical imaging, 38(2), 394-405. + """ + if bypass_gradient: + if preconditioner is not None: + raise ValueError( + "preconditioner is not supported when bypass_gradient is True.") + if x is not None: + raise ValueError("x is not supported when bypass_gradient is True.") + + def _conjugate_gradient_simple(rhs): + return _conjugate_gradient_internal(operator, rhs, + tol=tol, + max_iterations=max_iterations, + name=name) + + @tf.custom_gradient + def _conjugate_gradient_internal_grad(rhs): + result = _conjugate_gradient_simple(rhs) + + def grad(*upstream_grads): + # upstream_grads has the upstream gradient for each element of the + # output tuple (i, x, r, p, gamma). + _, dx, _, _, _ = upstream_grads + return _conjugate_gradient_simple(dx).x + + return result, grad + + return _conjugate_gradient_internal_grad(rhs) + + return _conjugate_gradient_internal(operator, rhs, + preconditioner=preconditioner, + x=x, + tol=tol, + max_iterations=max_iterations, + name=name) + + +def _conjugate_gradient_internal(operator, + rhs, + preconditioner=None, + x=None, + tol=1e-5, + max_iterations=20, + name=None): + """Implementation of `conjugate_gradient`. + + For the parameters, see `conjugate_gradient`. + """ + if isinstance(operator, linear_operator.LinearOperatorMixin): + rhs = operator.flatten_domain_shape(rhs) + + if not (operator.is_self_adjoint and operator.is_positive_definite): + raise ValueError('Expected a self-adjoint, positive definite operator.') + + cg_state = collections.namedtuple('CGState', ['i', 'x', 'r', 'p', 'gamma']) + + def stopping_criterion(i, state): + return tf.math.logical_and( + i < max_iterations, + tf.math.reduce_any( + tf.math.real(tf.norm(state.r, axis=-1)) > tf.math.real(tol))) + + def dot(x, y): + return tf.squeeze( + tf.linalg.matvec( + x[..., tf.newaxis], + y, adjoint_a=True), axis=-1) + + def cg_step(i, state): # pylint: disable=missing-docstring + z = tf.linalg.matvec(operator, state.p) + alpha = state.gamma / dot(state.p, z) + x = state.x + alpha[..., tf.newaxis] * state.p + r = state.r - alpha[..., tf.newaxis] * z + if preconditioner is None: + q = r + else: + q = preconditioner.matvec(r) + gamma = dot(r, q) + beta = gamma / state.gamma + p = q + beta[..., tf.newaxis] * state.p + return i + 1, cg_state(i + 1, x, r, p, gamma) + + # We now broadcast initial shapes so that we have fixed shapes per iteration. + + with tf.name_scope(name or 'conjugate_gradient'): + broadcast_shape = tf.broadcast_dynamic_shape( + tf.shape(rhs)[:-1], + operator.batch_shape_tensor()) + static_broadcast_shape = tf.broadcast_static_shape( + rhs.shape[:-1], + operator.batch_shape) + if preconditioner is not None: + broadcast_shape = tf.broadcast_dynamic_shape( + broadcast_shape, + preconditioner.batch_shape_tensor()) + static_broadcast_shape = tf.broadcast_static_shape( + static_broadcast_shape, + preconditioner.batch_shape) + broadcast_rhs_shape = tf.concat([broadcast_shape, [tf.shape(rhs)[-1]]], -1) + static_broadcast_rhs_shape = static_broadcast_shape.concatenate( + [rhs.shape[-1]]) + r0 = tf.broadcast_to(rhs, broadcast_rhs_shape) + tol *= tf.norm(r0, axis=-1) + + if x is None: + x = tf.zeros( + broadcast_rhs_shape, dtype=rhs.dtype.base_dtype) + x = tf.ensure_shape(x, static_broadcast_rhs_shape) + else: + r0 = rhs - tf.linalg.matvec(operator, x) + if preconditioner is None: + p0 = r0 + else: + p0 = tf.linalg.matvec(preconditioner, r0) + gamma0 = dot(r0, p0) + i = tf.constant(0, dtype=tf.int32) + state = cg_state(i=i, x=x, r=r0, p=p0, gamma=gamma0) + _, state = tf.while_loop( + stopping_criterion, cg_step, [i, state]) + + if isinstance(operator, linear_operator.LinearOperatorMixin): + x = operator.expand_range_dimension(state.x) + else: + x = state.x + + return cg_state( + state.i, + x=x, + r=state.r, + p=state.p, + gamma=state.gamma) diff --git a/tensorflow_mri/python/linalg/conjugate_gradient_test.py b/tensorflow_mri/python/linalg/conjugate_gradient_test.py new file mode 100755 index 00000000..c1604758 --- /dev/null +++ b/tensorflow_mri/python/linalg/conjugate_gradient_test.py @@ -0,0 +1,161 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `conjugate_gradient`.""" +# pylint: disable=missing-class-docstring,missing-function-docstring + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow_mri.python.linalg import conjugate_gradient +from tensorflow_mri.python.util import test_util + + +@test_util.run_all_in_graph_and_eager_modes +class ConjugateGradientTest(test_util.TestCase): + """Tests for op `conjugate_gradient`.""" + @parameterized.product(dtype=[np.float32, np.float64], + shape=[[1, 1], [4, 4], [10, 10]], + use_static_shape=[True, False]) + def test_conjugate_gradient(self, dtype, shape, use_static_shape): # pylint: disable=missing-param-doc + """Test CG method.""" + np.random.seed(1) + a_np = np.random.uniform( + low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype) + # Make a self-adjoint, positive definite. + a_np = np.dot(a_np.T, a_np) + # jacobi preconditioner + jacobi_np = np.zeros_like(a_np) + jacobi_np[range(a_np.shape[0]), range(a_np.shape[1])] = ( + 1.0 / a_np.diagonal()) + rhs_np = np.random.uniform( + low=-1.0, high=1.0, size=shape[0]).astype(dtype) + x_np = np.zeros_like(rhs_np) + tol = 1e-6 if dtype == np.float64 else 1e-3 + max_iterations = 20 + + if use_static_shape: + a = tf.constant(a_np) + rhs = tf.constant(rhs_np) + x = tf.constant(x_np) + jacobi = tf.constant(jacobi_np) + else: + a = tf.compat.v1.placeholder_with_default(a_np, shape=None) + rhs = tf.compat.v1.placeholder_with_default(rhs_np, shape=None) + x = tf.compat.v1.placeholder_with_default(x_np, shape=None) + jacobi = tf.compat.v1.placeholder_with_default(jacobi_np, shape=None) + + operator = tf.linalg.LinearOperatorFullMatrix( + a, is_positive_definite=True, is_self_adjoint=True) + preconditioners = [ + None, + # Preconditioner that does nothing beyond change shape. + tf.linalg.LinearOperatorIdentity( + a_np.shape[-1], + dtype=a_np.dtype, + is_positive_definite=True, + is_self_adjoint=True), + # Jacobi preconditioner. + tf.linalg.LinearOperatorFullMatrix( + jacobi, + is_positive_definite=True, + is_self_adjoint=True), + ] + cg_results = [] + for preconditioner in preconditioners: + cg_graph = conjugate_gradient.conjugate_gradient( + operator, + rhs, + preconditioner=preconditioner, + x=x, + tol=tol, + max_iterations=max_iterations) + cg_val = self.evaluate(cg_graph) + norm_r0 = np.linalg.norm(rhs_np) + norm_r = np.linalg.norm(cg_val.r) + self.assertLessEqual(norm_r, tol * norm_r0) + # Validate that we get an equally small residual norm with numpy + # using the computed solution. + r_np = rhs_np - np.dot(a_np, cg_val.x) + norm_r_np = np.linalg.norm(r_np) + self.assertLessEqual(norm_r_np, tol * norm_r0) + cg_results.append(cg_val) + + # Validate that we get same results using identity_preconditioner + # and None + self.assertEqual(cg_results[0].i, cg_results[1].i) + self.assertAlmostEqual(cg_results[0].gamma, cg_results[1].gamma) + self.assertAllClose(cg_results[0].r, cg_results[1].r, rtol=tol) + self.assertAllClose(cg_results[0].x, cg_results[1].x, rtol=tol) + self.assertAllClose(cg_results[0].p, cg_results[1].p, rtol=tol) + + def test_bypass_gradient(self): + """Tests the `bypass_gradient` argument.""" + dtype = np.float32 + shape = [4, 4] + np.random.seed(1) + a_np = np.random.uniform( + low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype) + # Make a self-adjoint, positive definite. + a_np = np.dot(a_np.T, a_np) + + rhs_np = np.random.uniform( + low=-1.0, high=1.0, size=shape[0]).astype(dtype) + + tol = 1e-3 + max_iterations = 20 + + a = tf.constant(a_np) + rhs = tf.constant(rhs_np) + operator = tf.linalg.LinearOperatorFullMatrix( + a, is_positive_definite=True, is_self_adjoint=True) + + with tf.GradientTape(persistent=True) as tape: + tape.watch(rhs) + result = conjugate_gradient.conjugate_gradient( + operator, + rhs, + tol=tol, + max_iterations=max_iterations) + result_bypass = conjugate_gradient.conjugate_gradient( + operator, + rhs, + tol=tol, + max_iterations=max_iterations, + bypass_gradient=True) + + grad = tape.gradient(result.x, rhs) + grad_bypass = tape.gradient(result_bypass.x, rhs) + self.assertAllClose(result, result_bypass) + self.assertAllClose(grad, grad_bypass, rtol=tol) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/linalg/linear_operator.py b/tensorflow_mri/python/linalg/linear_operator.py new file mode 100644 index 00000000..9ad6bc3c --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator.py @@ -0,0 +1,679 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Base linear operator.""" + +import abc +import functools + +import tensorflow as tf +from tensorflow.python.framework import type_spec +from tensorflow.python.ops.linalg import linear_operator as tf_linear_operator + +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import tensor_util + + +class LinearOperatorMixin(tf.linalg.LinearOperator): + """Mixin for linear operators meant to operate on images.""" + def transform(self, x, adjoint=False, name="transform"): + """Transforms a batch of inputs. + + Applies this operator to a batch of non-vectorized inputs `x`. + + Args: + x: A `tf.Tensor` with compatible shape and same dtype as `self`. + adjoint: A `boolean`. If `True`, transforms the input using the adjoint + of the operator, instead of the operator itself. + name: A name for this operation. + + Returns: + The transformed `tf.Tensor` with the same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + x = tf.convert_to_tensor(x, name="x") + self._check_input_dtype(x) + input_shape = self.range_shape if adjoint else self.domain_shape + input_shape.assert_is_compatible_with(x.shape[-input_shape.rank:]) # pylint: disable=invalid-unary-operand-type + return self._transform(x, adjoint=adjoint) + + def preprocess(self, x, adjoint=False, name="preprocess"): + """Preprocesses a batch of inputs. + + This method should be called **before** applying the operator via + `transform`, `matvec` or `matmul`. The `adjoint` flag should be set to the + same value as the `adjoint` flag passed to `transform`, `matvec` or + `matmul`. + + Args: + x: A `tf.Tensor` with compatible shape and same dtype as `self`. + adjoint: A `boolean`. If `True`, preprocesses the input in preparation + for applying the adjoint. + name: A name for this operation. + + Returns: + The preprocessed `tf.Tensor` with the same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + x = tf.convert_to_tensor(x, name="x") + self._check_input_dtype(x) + input_shape = self.range_shape if adjoint else self.domain_shape + input_shape.assert_is_compatible_with(x.shape[-input_shape.rank:]) # pylint: disable=invalid-unary-operand-type + return self._preprocess(x, adjoint=adjoint) + + def postprocess(self, x, adjoint=False, name="postprocess"): + """Postprocesses a batch of inputs. + + This method should be called **after** applying the operator via + `transform`, `matvec` or `matmul`. The `adjoint` flag should be set to the + same value as the `adjoint` flag passed to `transform`, `matvec` or + `matmul`. + + Args: + x: A `tf.Tensor` with compatible shape and same dtype as `self`. + adjoint: A `boolean`. If `True`, postprocesses the input after applying + the adjoint. + name: A name for this operation. + + Returns: + The preprocessed `tf.Tensor` with the same `dtype` as `self`. + """ + with self._name_scope(name): # pylint: disable=not-callable + x = tf.convert_to_tensor(x, name="x") + self._check_input_dtype(x) + input_shape = self.domain_shape if adjoint else self.range_shape + input_shape.assert_is_compatible_with(x.shape[-input_shape.rank:]) # pylint: disable=invalid-unary-operand-type + return self._postprocess(x, adjoint=adjoint) + + @property + def domain_shape(self): + """Domain shape of this linear operator.""" + return self._domain_shape() + + @property + def range_shape(self): + """Range shape of this linear operator.""" + return self._range_shape() + + def domain_shape_tensor(self, name="domain_shape_tensor"): + """Domain shape of this linear operator, determined at runtime.""" + with self._name_scope(name): # pylint: disable=not-callable + # Prefer to use statically defined shape if available. + if self.domain_shape.is_fully_defined(): + return tensor_util.convert_shape_to_tensor(self.domain_shape.as_list()) + return self._domain_shape_tensor() + + def range_shape_tensor(self, name="range_shape_tensor"): + """Range shape of this linear operator, determined at runtime.""" + with self._name_scope(name): # pylint: disable=not-callable + # Prefer to use statically defined shape if available. + if self.range_shape.is_fully_defined(): + return tensor_util.convert_shape_to_tensor(self.range_shape.as_list()) + return self._range_shape_tensor() + + def batch_shape_tensor(self, name="batch_shape_tensor"): + """Batch shape of this linear operator, determined at runtime.""" + with self._name_scope(name): # pylint: disable=not-callable + if self.batch_shape.is_fully_defined(): + return tensor_util.convert_shape_to_tensor(self.batch_shape.as_list()) + return self._batch_shape_tensor() + + def adjoint(self, name="adjoint"): + """Returns the adjoint of this linear operator. + + The returned operator is a valid `LinearOperatorMixin` instance. + + Calling `self.adjoint()` and `self.H` are equivalent. + + Args: + name: A name for this operation. + + Returns: + A `LinearOperator` derived from `LinearOperatorMixin`, which + represents the adjoint of this linear operator. + """ + if self.is_self_adjoint: + return self + with self._name_scope(name): # pylint: disable=not-callable + return LinearOperatorAdjoint(self) + + H = property(adjoint, None) + + @abc.abstractmethod + def _transform(self, x, adjoint=False): + # Subclasses must override this method. + raise NotImplementedError("Method `_transform` is not implemented.") + + def _preprocess(self, x, adjoint=False): + # Subclasses may override this method. + return x + + def _postprocess(self, x, adjoint=False): + # Subclasses may override this method. + return x + + def _matvec(self, x, adjoint=False): + # Default implementation of `_matvec` for imaging operator. The vectorized + # input `x` is first expanded to the its full shape, then transformed, then + # vectorized again. Typically subclasses should not need to override this + # method. + x = self.expand_range_dimension(x) if adjoint else \ + self.expand_domain_dimension(x) + x = self._transform(x, adjoint=adjoint) + x = self.flatten_domain_shape(x) if adjoint else \ + self.flatten_range_shape(x) + return x + + def _matmul(self, x, adjoint=False, adjoint_arg=False): + # Default implementation of `matmul` for imaging operator. Basically we + # just call `matvec` for each column of `x` (or for each row, if + # `adjoint_arg` is `True`). `tf.einsum` is used to transpose the input arg, + # moving the column/row dimension to be the leading batch dimension to be + # unpacked by `tf.map_fn`. Typically subclasses should not need to override + # this method. + batch_shape = tf.broadcast_static_shape(x.shape[:-2], self.batch_shape) + output_dim = self.domain_dimension if adjoint else self.range_dimension + if adjoint_arg and x.dtype.is_complex: + x = tf.math.conj(x) + x = tf.einsum('...ij->i...j' if adjoint_arg else '...ij->j...i', x) + y = tf.map_fn(functools.partial(self.matvec, adjoint=adjoint), x, + fn_output_signature=tf.TensorSpec( + shape=batch_shape + [output_dim], + dtype=x.dtype)) + y = tf.einsum('i...j->...ji' if adjoint_arg else 'j...i->...ij', y) + return y + + @abc.abstractmethod + def _domain_shape(self): + # Users must override this method. + return tf.TensorShape(None) + + @abc.abstractmethod + def _range_shape(self): + # Users must override this method. + return tf.TensorShape(None) + + def _batch_shape(self): + # Users should override this method if this operator has a batch shape. + return tf.TensorShape([]) + + def _domain_shape_tensor(self): + # Users should override this method if they need to provide a dynamic domain + # shape. + raise NotImplementedError("_domain_shape_tensor is not implemented.") + + def _range_shape_tensor(self): + # Users should override this method if they need to provide a dynamic range + # shape. + raise NotImplementedError("_range_shape_tensor is not implemented.") + + def _batch_shape_tensor(self): # pylint: disable=arguments-differ + # Users should override this method if they need to provide a dynamic batch + # shape. + return tf.constant([], dtype=tf.dtypes.int32) + + def _shape(self): + # Default implementation of `_shape` for imaging operators. Typically + # subclasses should not need to override this method. + return self._batch_shape().concatenate(tf.TensorShape( + [self.range_shape.num_elements(), + self.domain_shape.num_elements()])) + + def _shape_tensor(self): + # Default implementation of `_shape_tensor` for imaging operators. Typically + # subclasses should not need to override this method. + return tf.concat([self.batch_shape_tensor(), + [tf.math.reduce_prod(self.range_shape_tensor()), + tf.math.reduce_prod(self.domain_shape_tensor())]], 0) + + def flatten_domain_shape(self, x): + """Flattens `x` to match the domain dimension of this operator. + + Args: + x: A `Tensor`. Must have shape `[...] + self.domain_shape`. + + Returns: + The flattened `Tensor`. Has shape `[..., self.domain_dimension]`. + """ + # pylint: disable=invalid-unary-operand-type + domain_rank_static = self.domain_shape.rank + if domain_rank_static is not None: + domain_rank_dynamic = domain_rank_static + else: + domain_rank_dynamic = tf.shape(self.domain_shape_tensor())[0] + + if domain_rank_static is not None: + self.domain_shape.assert_is_compatible_with( + x.shape[-domain_rank_static:]) + + if domain_rank_static is not None: + batch_shape = x.shape[:-domain_rank_static] + else: + batch_shape = tf.TensorShape(None) + batch_shape_tensor = tf.shape(x)[:-domain_rank_dynamic] + + output_shape = batch_shape + self.domain_dimension + output_shape_tensor = tf.concat( + [batch_shape_tensor, [self.domain_dimension_tensor()]], 0) + + x = tf.reshape(x, output_shape_tensor) + return tf.ensure_shape(x, output_shape) + + def flatten_range_shape(self, x): + """Flattens `x` to match the range dimension of this operator. + + Args: + x: A `Tensor`. Must have shape `[...] + self.range_shape`. + + Returns: + The flattened `Tensor`. Has shape `[..., self.range_dimension]`. + """ + # pylint: disable=invalid-unary-operand-type + range_rank_static = self.range_shape.rank + if range_rank_static is not None: + range_rank_dynamic = range_rank_static + else: + range_rank_dynamic = tf.shape(self.range_shape_tensor())[0] + + if range_rank_static is not None: + self.range_shape.assert_is_compatible_with( + x.shape[-range_rank_static:]) + + if range_rank_static is not None: + batch_shape = x.shape[:-range_rank_static] + else: + batch_shape = tf.TensorShape(None) + batch_shape_tensor = tf.shape(x)[:-range_rank_dynamic] + + output_shape = batch_shape + self.range_dimension + output_shape_tensor = tf.concat( + [batch_shape_tensor, [self.range_dimension_tensor()]], 0) + + x = tf.reshape(x, output_shape_tensor) + return tf.ensure_shape(x, output_shape) + + def expand_domain_dimension(self, x): + """Expands `x` to match the domain shape of this operator. + + Args: + x: A `Tensor`. Must have shape `[..., self.domain_dimension]`. + + Returns: + The expanded `Tensor`. Has shape `[...] + self.domain_shape`. + """ + self.domain_dimension.assert_is_compatible_with(x.shape[-1]) + + batch_shape = x.shape[:-1] + batch_shape_tensor = tf.shape(x)[:-1] + + output_shape = batch_shape + self.domain_shape + output_shape_tensor = tf.concat([ + batch_shape_tensor, self.domain_shape_tensor()], 0) + + x = tf.reshape(x, output_shape_tensor) + return tf.ensure_shape(x, output_shape) + + def expand_range_dimension(self, x): + """Expands `x` to match the range shape of this operator. + + Args: + x: A `Tensor`. Must have shape `[..., self.range_dimension]`. + + Returns: + The expanded `Tensor`. Has shape `[...] + self.range_shape`. + """ + self.range_dimension.assert_is_compatible_with(x.shape[-1]) + + batch_shape = x.shape[:-1] + batch_shape_tensor = tf.shape(x)[:-1] + + output_shape = batch_shape + self.range_shape + output_shape_tensor = tf.concat([ + batch_shape_tensor, self.range_shape_tensor()], 0) + + x = tf.reshape(x, output_shape_tensor) + return tf.ensure_shape(x, output_shape) + + +@api_util.export("linalg.LinearOperator") +class LinearOperator(LinearOperatorMixin, tf.linalg.LinearOperator): # pylint: disable=abstract-method + r"""Base class defining a [batch of] linear operator[s]. + + Provides access to common matrix operations without the need to materialize + the matrix. + + This operator is similar to `tf.linalg.LinearOperator`_, but has additional + methods to simplify operations on images, while maintaining compatibility + with the TensorFlow linear algebra framework. + + Inputs and outputs to this linear operator or its subclasses may have + meaningful non-vectorized N-D shapes. Thus this class defines the additional + properties `domain_shape` and `range_shape` and the methods + `domain_shape_tensor` and `range_shape_tensor`. These enrich the information + provided by the built-in properties `shape`, `domain_dimension`, + `range_dimension` and methods `domain_dimension_tensor` and + `range_dimension_tensor`, which only have information about the vectorized 1D + shapes. + + Subclasses of this operator must define the methods `_domain_shape` and + `_range_shape`, which return the static domain and range shapes of the + operator. Optionally, subclasses may also define the methods + `_domain_shape_tensor` and `_range_shape_tensor`, which return the dynamic + domain and range shapes of the operator. These two methods will only be called + if `_domain_shape` and `_range_shape` do not return fully defined static + shapes. + + Subclasses must define the abstract method `_transform`, which + applies the operator (or its adjoint) to a [batch of] images. This internal + method is called by `transform`. In general, subclasses of this operator + should not define the methods `_matvec` or `_matmul`. These have default + implementations which call `_transform`. + + Operators derived from this class may be used in any of the following ways: + + 1. Using method `transform`, which expects a full-shaped input and returns + a full-shaped output, i.e. a tensor with shape `[...] + shape`, where + `shape` is either the `domain_shape` or the `range_shape`. This method is + unique to operators derived from this class. + 2. Using method `matvec`, which expects a vectorized input and returns a + vectorized output, i.e. a tensor with shape `[..., n]` where `n` is + either the `domain_dimension` or the `range_dimension`. This method is + part of the TensorFlow linear algebra framework. + 3. Using method `matmul`, which expects matrix inputs and returns matrix + outputs. Note that a matrix is just a column vector in this context, i.e. + a tensor with shape `[..., n, 1]`, where `n` is either the + `domain_dimension` or the `range_dimension`. Matrices which are not column + vectors (i.e. whose last dimension is not 1) are not supported. This + method is part of the TensorFlow linear algebra framework. + + Operators derived from this class may also be used with the functions + `tf.linalg.matvec`_ and `tf.linalg.matmul`_, which will call the + corresponding methods. + + This class also provides the convenience functions `flatten_domain_shape` and + `flatten_range_shape` to flatten full-shaped inputs/outputs to their + vectorized form. Conversely, `expand_domain_dimension` and + `expand_range_dimension` may be used to expand vectorized inputs/outputs to + their full-shaped form. + + **Preprocessing and post-processing** + + It can sometimes be useful to modify a linear operator in order to maintain + certain mathematical properties, such as Hermitian symmetry or positive + definiteness (e.g., [1]). As a result of these modifications the linear + operator may no longer accurately represent the physical system under + consideration. This can be compensated through the use of a pre-processing + step and/or post-processing step. To this end linear operators expose a + `preprocess` method and a `postprocess` method. The user may define their + behavior by overriding the `_preprocess` and/or `_postprocess` methods. If + not overriden, the default behavior is to apply the identity. In the context + of optimization methods, these steps typically only need to be applied at the + beginning or at the end of the optimization. + + **Subclassing** + + Subclasses must always define `_transform`, which implements this operator's + functionality (and its adjoint). In general, subclasses should not define the + methods `_matvec` or `_matmul`. These have default implementations which call + `_transform`. + + Subclasses must always define `_domain_shape` + and `_range_shape`, which return the static domain/range shapes of the + operator. If the subclassed operator needs to provide dynamic domain/range + shapes and the static shapes are not always fully-defined, it must also define + `_domain_shape_tensor` and `_range_shape_tensor`, which return the dynamic + domain/range shapes of the operator. In general, subclasses should not define + the methods `_shape` or `_shape_tensor`. These have default implementations. + + If the subclassed operator has a non-scalar batch shape, it must also define + `_batch_shape` which returns the static batch shape. If the static batch shape + is not always fully-defined, the subclass must also define + `_batch_shape_tensor`, which returns the dynamic batch shape. + + Args: + dtype: The `tf.dtypes.DType` of the matrix that this operator represents. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its Hermitian + transpose. If `dtype` is real, this is equivalent to being symmetric. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form $x^H A x$ has positive real part for all + nonzero $x$. Note that we do not require the operator to be + self-adjoint to be positive-definite. + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + + References: + 1. https://onlinelibrary.wiley.com/doi/full/10.1002/mrm.1241 + + .. _tf.linalg.LinearOperator: https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperator + .. _tf.linalg.matvec: https://www.tensorflow.org/api_docs/python/tf/linalg/matvec + .. _tf.linalg.matmul: https://www.tensorflow.org/api_docs/python/tf/linalg/matmul + """ + + +@api_util.export("linalg.LinearOperatorAdjoint") +class LinearOperatorAdjoint(LinearOperatorMixin, # pylint: disable=abstract-method + tf.linalg.LinearOperatorAdjoint): + """Linear operator representing the adjoint of another operator. + + `LinearOperatorAdjoint` is initialized with an operator $A$ and + represents its adjoint $A^H$. + + .. note: + Similar to `tf.linalg.LinearOperatorAdjoint`_, but with imaging extensions. + + Args: + operator: A `LinearOperator`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its Hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form $x^H A x$ has positive real part for all + nonzero $x$. Note that we do not require the operator to be + self-adjoint to be positive-definite. + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. Default is `operator.name + + "_adjoint"`. + + .. _tf.linalg.LinearOperatorAdjoint: https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperatorAdjoint + """ + def _transform(self, x, adjoint=False): + # pylint: disable=protected-access + return self.operator._transform(x, adjoint=(not adjoint)) + + def _preprocess(self, x, adjoint=False): + # pylint: disable=protected-access + return self.operator._preprocess(x, adjoint=(not adjoint)) + + def _postprocess(self, x, adjoint=False): + # pylint: disable=protected-access + return self.operator._postprocess(x, adjoint=(not adjoint)) + + def _domain_shape(self): + return self.operator.range_shape + + def _range_shape(self): + return self.operator.domain_shape + + def _batch_shape(self): + return self.operator.batch_shape + + def _domain_shape_tensor(self): + return self.operator.range_shape_tensor() + + def _range_shape_tensor(self): + return self.operator.domain_shape_tensor() + + def _batch_shape_tensor(self): + return self.operator.batch_shape_tensor() + + +class _LinearOperatorSpec(type_spec.BatchableTypeSpec): # pylint: disable=abstract-method + """A tf.TypeSpec for `LinearOperator` objects. + + This is very similar to `tf.linalg.LinearOperatorSpec`, but it adds a + `shape` attribute which is required by Keras. + + Note that this attribute is redundant, as it can always be computed from + other attributes. However, the details of this computation vary between + operators, so its easier to just store it. + """ + __slots__ = ("_shape", + "_dtype", + "_param_specs", + "_non_tensor_params", + "_prefer_static_fields") + + def __init__(self, + shape, + dtype, + param_specs, + non_tensor_params, + prefer_static_fields): + """Initializes a new `_LinearOperatorSpec`. + + Args: + shape: A `tf.TensorShape`. + dtype: A `tf.dtypes.DType`. + param_specs: Python `dict` of `tf.TypeSpec` instances that describe + kwargs to the `LinearOperator`'s constructor that are `Tensor`-like or + `CompositeTensor` subclasses. + non_tensor_params: Python `dict` containing non-`Tensor` and non- + `CompositeTensor` kwargs to the `LinearOperator`'s constructor. + prefer_static_fields: Python `tuple` of strings corresponding to the names + of `Tensor`-like args to the `LinearOperator`s constructor that may be + stored as static values, if known. These are typically shapes, indices, + or axis values. + """ + self._shape = shape + self._dtype = dtype + self._param_specs = param_specs + self._non_tensor_params = non_tensor_params + self._prefer_static_fields = prefer_static_fields + + @classmethod + def from_operator(cls, operator): + """Builds a `_LinearOperatorSpec` from a `LinearOperator` instance. + + Args: + operator: An instance of `LinearOperator`. + + Returns: + linear_operator_spec: An instance of `_LinearOperatorSpec` to be used as + the `TypeSpec` of `operator`. + """ + validation_fields = ("is_non_singular", "is_self_adjoint", + "is_positive_definite", "is_square") + kwargs = tf_linear_operator._extract_attrs( # pylint: disable=protected-access + operator, + keys=set(operator._composite_tensor_fields + validation_fields)) # pylint: disable=protected-access + + non_tensor_params = {} + param_specs = {} + for k, v in list(kwargs.items()): + type_spec_or_v = tf_linear_operator._extract_type_spec_recursively(v) # pylint: disable=protected-access + is_tensor = [isinstance(x, type_spec.TypeSpec) + for x in tf.nest.flatten(type_spec_or_v)] + if all(is_tensor): + param_specs[k] = type_spec_or_v + elif not any(is_tensor): + non_tensor_params[k] = v + else: + raise NotImplementedError(f"Field {k} contains a mix of `Tensor` and " + f" non-`Tensor` values.") + + return cls( + shape=operator.shape, + dtype=operator.dtype, + param_specs=param_specs, + non_tensor_params=non_tensor_params, + prefer_static_fields=operator._composite_tensor_prefer_static_fields) # pylint: disable=protected-access + + def _to_components(self, obj): + return tf_linear_operator._extract_attrs(obj, keys=list(self._param_specs)) + + def _from_components(self, components): + kwargs = dict(self._non_tensor_params, **components) + return self.value_type(**kwargs) + + @property + def _component_specs(self): + return self._param_specs + + def _serialize(self): + return (self._shape, + self._dtype, + self._param_specs, + self._non_tensor_params, + self._prefer_static_fields) + + def _to_legacy_output_shapes(self): + return self._shape + + def _to_legacy_output_types(self): + return self._dtype + + def _copy(self, **overrides): + kwargs = { + "shape": self._shape, + "dtype": self._dtype, + "param_specs": self._param_specs, + "non_tensor_params": self._non_tensor_params, + "prefer_static_fields": self._prefer_static_fields + } + kwargs.update(overrides) + return type(self)(**kwargs) + + def _batch(self, batch_size): + """Returns a TypeSpec representing a batch of objects with this TypeSpec.""" + return self._copy( + param_specs=tf.nest.map_structure( + lambda spec: spec._batch(batch_size), # pylint: disable=protected-access + self._param_specs)) + + def _unbatch(self): + """Returns a TypeSpec representing a single element of this TypeSpec.""" + return self._copy( + param_specs=tf.nest.map_structure( + lambda spec: spec._unbatch(), # pylint: disable=protected-access + self._param_specs)) + + @property + def shape(self): + """Returns a `tf.TensorShape` representing the static shape.""" + # This property is required to use linear operators with Keras. + return self._shape + + @property + def dtype(self): + """Returns a `tf.dtypes.DType` representing the dtype.""" + return self._dtype + + def with_shape(self, shape): + """Returns a new `tf.TypeSpec` with the given shape.""" + # This method is required to use linear operators with Keras. + return self._copy(shape=shape) + + +def make_composite_tensor(cls, module_name="tfmri.linalg"): + """Class decorator to convert `LinearOperator`s to `CompositeTensor`s. + + Overrides the default `make_composite_tensor` to use the custom + `LinearOperatorSpec`. + """ + spec_name = "{}Spec".format(cls.__name__) + spec_type = type(spec_name, (_LinearOperatorSpec,), {"value_type": cls}) + type_spec.register("{}.{}".format(module_name, spec_name))(spec_type) + cls._type_spec = property(spec_type.from_operator) # pylint: disable=protected-access + return cls diff --git a/tensorflow_mri/python/linalg/linear_operator_addition.py b/tensorflow_mri/python/linalg/linear_operator_addition.py new file mode 100644 index 00000000..81db6b75 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_addition.py @@ -0,0 +1,71 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Addition of linear operators.""" + +from tensorflow_mri.python.ops import array_ops +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import linalg_ext + + +@api_util.export("linalg.LinearOperatorAddition") +class LinearOperatorAddition(linear_operator.LinearOperatorMixin, # pylint: disable=abstract-method + linalg_ext.LinearOperatorAddition): + """Adds one or more linear operators. + + `LinearOperatorAddition` is initialized with a list of operators + $A_1, A_2, ..., A_J$ and represents their addition + $A_1 + A_2 + ... + A_J$. + + Args: + operators: A `list` of `LinearOperator` objects, each with the same `dtype` + and shape. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its Hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form $x^H A x$ has positive real part for all + nonzero $x$. Note that we do not require the operator to be + self-adjoint to be positive-definite. + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. Default is the individual + operators names joined with `_p_`. + """ + def _transform(self, x, adjoint=False): + # pylint: disable=protected-access + result = self.operators[0]._transform(x, adjoint=adjoint) + for operator in self.operators[1:]: + result += operator._transform(x, adjoint=adjoint) + return result + + def _domain_shape(self): + return self.operators[0].domain_shape + + def _range_shape(self): + return self.operators[0].range_shape + + def _batch_shape(self): + return array_ops.broadcast_static_shapes( + *[operator.batch_shape for operator in self.operators]) + + def _domain_shape_tensor(self): + return self.operators[0].domain_shape_tensor() + + def _range_shape_tensor(self): + return self.operators[0].range_shape_tensor() + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shapes( + *[operator.batch_shape_tensor() for operator in self.operators]) diff --git a/tensorflow_mri/python/linalg/linear_operator_addition_test.py b/tensorflow_mri/python/linalg/linear_operator_addition_test.py new file mode 100644 index 00000000..24dda3c1 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_addition_test.py @@ -0,0 +1,15 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_addition`.""" diff --git a/tensorflow_mri/python/linalg/linear_operator_adjoint.py b/tensorflow_mri/python/linalg/linear_operator_adjoint.py new file mode 100644 index 00000000..9ebd6828 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_adjoint.py @@ -0,0 +1,22 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Adjoint of a linear operator.""" + +from tensorflow_mri.python.linalg import linear_operator + + +# This is actually defined in `linear_operator` module to avoid circular +# dependencies. +LinearOperatorAdjoint = linear_operator.LinearOperatorAdjoint diff --git a/tensorflow_mri/python/linalg/linear_operator_adjoint_test.py b/tensorflow_mri/python/linalg/linear_operator_adjoint_test.py new file mode 100644 index 00000000..894aac5e --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_adjoint_test.py @@ -0,0 +1,15 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_adjoint`.""" diff --git a/tensorflow_mri/python/linalg/linear_operator_algebra.py b/tensorflow_mri/python/linalg/linear_operator_algebra.py new file mode 100644 index 00000000..ff0f2965 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_algebra.py @@ -0,0 +1,21 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Linear operator algebra.""" + +from tensorflow.python.ops.linalg import linear_operator_algebra + + +RegisterAdjoint = linear_operator_algebra.RegisterAdjoint +RegisterInverse = linear_operator_algebra.RegisterInverse diff --git a/tensorflow_mri/python/linalg/linear_operator_composition.py b/tensorflow_mri/python/linalg/linear_operator_composition.py new file mode 100644 index 00000000..0659f904 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_composition.py @@ -0,0 +1,83 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Composition of linear operators.""" + +import tensorflow as tf + +from tensorflow_mri.python.ops import array_ops +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.util import api_util + + +@api_util.export("linalg.LinearOperatorComposition") +class LinearOperatorComposition(linear_operator.LinearOperatorMixin, # pylint: disable=abstract-method + tf.linalg.LinearOperatorComposition): + """Composes one or more linear operators. + + `LinearOperatorComposition` is initialized with a list of operators + $A_1, A_2, ..., A_J$ and represents their composition + $A_1 A_2 ... A_J$. + + .. note: + Similar to `tf.linalg.LinearOperatorComposition`_, but with imaging + extensions. + + Args: + operators: A `list` of `LinearOperator` objects, each with the same `dtype` + and composable shape. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its Hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form $x^H A x$ has positive real part for all + nonzero $x$. Note that we do not require the operator to be + self-adjoint to be positive-definite. + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. Default is the individual + operators names joined with `_o_`. + + .. _tf.linalg.LinearOperatorComposition: https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperatorComposition + """ + def _transform(self, x, adjoint=False): + # pylint: disable=protected-access + if adjoint: + transform_order_list = self.operators + else: + transform_order_list = list(reversed(self.operators)) + + result = transform_order_list[0]._transform(x, adjoint=adjoint) + for operator in transform_order_list[1:]: + result = operator._transform(result, adjoint=adjoint) + return result + + def _domain_shape(self): + return self.operators[-1].domain_shape + + def _range_shape(self): + return self.operators[0].range_shape + + def _batch_shape(self): + return array_ops.broadcast_static_shapes( + *[operator.batch_shape for operator in self.operators]) + + def _domain_shape_tensor(self): + return self.operators[-1].domain_shape_tensor() + + def _range_shape_tensor(self): + return self.operators[0].range_shape_tensor() + + def _batch_shape_tensor(self): + return array_ops.broadcast_dynamic_shapes( + *[operator.batch_shape_tensor() for operator in self.operators]) diff --git a/tensorflow_mri/python/linalg/linear_operator_composition_test.py b/tensorflow_mri/python/linalg/linear_operator_composition_test.py new file mode 100644 index 00000000..55d48a34 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_composition_test.py @@ -0,0 +1,16 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_composition`.""" +# pylint: disable=missing-class-docstring,missing-function-docstring diff --git a/tensorflow_mri/python/linalg/linear_operator_diag.py b/tensorflow_mri/python/linalg/linear_operator_diag.py new file mode 100644 index 00000000..e89ee47a --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_diag.py @@ -0,0 +1,101 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Diagonal linear operator.""" + +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import check_util + + +@api_util.export("linalg.LinearOperatorDiag") +class LinearOperatorDiag(linear_operator.LinearOperatorMixin, # pylint: disable=abstract-method + tf.linalg.LinearOperatorDiag): + """Linear operator representing a square diagonal matrix. + + This operator acts like a [batch] diagonal matrix `A` with shape + `[B1, ..., Bb, N, N]` for some `b >= 0`. The first `b` indices index a + batch member. For every batch index `(i1, ..., ib)`, `A[i1, ..., ib, : :]` is + an `N x N` matrix. This matrix `A` is not materialized, but for + purposes of broadcasting this shape will be relevant. + + .. note: + Similar to `tf.linalg.LinearOperatorDiag`_, but with imaging extensions. + + Args: + diag: A `tf.Tensor` of shape `[B1, ..., Bb, *S]`. + rank: An `int`. The rank of `S`. Must be <= `diag.shape.rank`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its Hermitian + transpose. If `diag` is real, this is auto-set to `True`. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form $x^H A x$ has positive real part for all + nonzero $x$. Note that we do not require the operator to be + self-adjoint to be positive-definite. + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + + .. _tf.linalg.LinearOperatorDiag: https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperatorDiag + """ + # pylint: disable=invalid-unary-operand-type + def __init__(self, + diag, + rank, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=True, + name='LinearOperatorDiag'): + # pylint: disable=invalid-unary-operand-type + diag = tf.convert_to_tensor(diag, name='diag') + self._rank = check_util.validate_rank(rank, name='rank', accept_none=False) + if self._rank > diag.shape.rank: + raise ValueError( + f"Argument `rank` must be <= `diag.shape.rank`, but got: {rank}") + + self._shape_tensor_value = tf.shape(diag) + self._shape_value = diag.shape + batch_shape = self._shape_tensor_value[:-self._rank] + + super().__init__( + diag=tf.reshape(diag, tf.concat([batch_shape, [-1]], 0)), + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name) + + def _transform(self, x, adjoint=False): + diag = tf.math.conj(self.diag) if adjoint else self.diag + return tf.reshape(diag, self.domain_shape_tensor()) * x + + def _domain_shape(self): + return self._shape_value[-self._rank:] + + def _range_shape(self): + return self._shape_value[-self._rank:] + + def _batch_shape(self): + return self._shape_value[:-self._rank] + + def _domain_shape_tensor(self): + return self._shape_tensor_value[-self._rank:] + + def _range_shape_tensor(self): + return self._shape_tensor_value[-self._rank:] + + def _batch_shape_tensor(self): + return self._shape_tensor_value[:-self._rank] diff --git a/tensorflow_mri/python/linalg/linear_operator_diag_test.py b/tensorflow_mri/python/linalg/linear_operator_diag_test.py new file mode 100644 index 00000000..b46fc955 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_diag_test.py @@ -0,0 +1,103 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_diag`.""" +# pylint: disable=missing-class-docstring,missing-function-docstring + +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.util import test_util + + +class LinearOperatorDiagTest(test_util.TestCase): + """Tests for `linear_operator.LinearOperatorDiag`.""" + def test_transform(self): + """Test `transform` method.""" + diag = tf.constant([[1., 2.], [3., 4.]]) + diag_linop = linear_operator.LinearOperatorDiag(diag, rank=2) + x = tf.constant([[2., 2.], [2., 2.]]) + self.assertAllClose([[2., 4.], [6., 8.]], diag_linop.transform(x)) + + def test_transform_adjoint(self): + """Test `transform` method with adjoint.""" + diag = tf.constant([[1., 2.], [3., 4.]]) + diag_linop = linear_operator.LinearOperatorDiag(diag, rank=2) + x = tf.constant([[2., 2.], [2., 2.]]) + self.assertAllClose([[2., 4.], [6., 8.]], + diag_linop.transform(x, adjoint=True)) + + def test_transform_complex(self): + """Test `transform` method with complex values.""" + diag = tf.constant([[1. + 1.j, 2. + 2.j], [3. + 3.j, 4. + 4.j]], + dtype=tf.complex64) + diag_linop = linear_operator.LinearOperatorDiag(diag, rank=2) + x = tf.constant([[2., 2.], [2., 2.]], dtype=tf.complex64) + self.assertAllClose([[2. + 2.j, 4. + 4.j], [6. + 6.j, 8. + 8.j]], + diag_linop.transform(x)) + + def test_transform_adjoint_complex(self): + """Test `transform` method with adjoint and complex values.""" + diag = tf.constant([[1. + 1.j, 2. + 2.j], [3. + 3.j, 4. + 4.j]], + dtype=tf.complex64) + diag_linop = linear_operator.LinearOperatorDiag(diag, rank=2) + x = tf.constant([[2., 2.], [2., 2.]], dtype=tf.complex64) + self.assertAllClose([[2. - 2.j, 4. - 4.j], [6. - 6.j, 8. - 8.j]], + diag_linop.transform(x, adjoint=True)) + + def test_shapes(self): + """Test shapes.""" + diag = tf.constant([[1., 2.], [3., 4.]]) + diag_linop = linear_operator.LinearOperatorDiag(diag, rank=2) + self.assertIsInstance(diag_linop.domain_shape, tf.TensorShape) + self.assertIsInstance(diag_linop.range_shape, tf.TensorShape) + self.assertAllEqual([2, 2], diag_linop.domain_shape) + self.assertAllEqual([2, 2], diag_linop.range_shape) + + def test_tensor_shapes(self): + """Test tensor shapes.""" + diag = tf.constant([[1., 2.], [3., 4.]]) + diag_linop = linear_operator.LinearOperatorDiag(diag, rank=2) + self.assertIsInstance(diag_linop.domain_shape_tensor(), tf.Tensor) + self.assertIsInstance(diag_linop.range_shape_tensor(), tf.Tensor) + self.assertAllEqual([2, 2], diag_linop.domain_shape_tensor()) + self.assertAllEqual([2, 2], diag_linop.range_shape_tensor()) + + def test_batch_shapes(self): + """Test batch shapes.""" + diag = tf.constant([[1., 2., 3.], [4., 5., 6.]]) + diag_linop = linear_operator.LinearOperatorDiag(diag, rank=1) + self.assertIsInstance(diag_linop.domain_shape, tf.TensorShape) + self.assertIsInstance(diag_linop.range_shape, tf.TensorShape) + self.assertIsInstance(diag_linop.batch_shape, tf.TensorShape) + self.assertAllEqual([3], diag_linop.domain_shape) + self.assertAllEqual([3], diag_linop.range_shape) + self.assertAllEqual([2], diag_linop.batch_shape) + + def test_tensor_batch_shapes(self): + """Test tensor batch shapes.""" + diag = tf.constant([[1., 2., 3.], [4., 5., 6.]]) + diag_linop = linear_operator.LinearOperatorDiag(diag, rank=1) + self.assertIsInstance(diag_linop.domain_shape_tensor(), tf.Tensor) + self.assertIsInstance(diag_linop.range_shape_tensor(), tf.Tensor) + self.assertIsInstance(diag_linop.batch_shape_tensor(), tf.Tensor) + self.assertAllEqual([3], diag_linop.domain_shape) + self.assertAllEqual([3], diag_linop.range_shape) + self.assertAllEqual([2], diag_linop.batch_shape) + + def test_name(self): + """Test names.""" + diag = tf.constant([[1., 2.], [3., 4.]]) + diag_linop = linear_operator.LinearOperatorDiag(diag, rank=2) + self.assertEqual("LinearOperatorDiag", diag_linop.name) diff --git a/tensorflow_mri/python/linalg/linear_operator_finite_difference.py b/tensorflow_mri/python/linalg/linear_operator_finite_difference.py new file mode 100644 index 00000000..66833b67 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_finite_difference.py @@ -0,0 +1,125 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Finite difference linear operator.""" + + +import tensorflow as tf + +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import check_util +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.util import tensor_util + + +@api_util.export("linalg.LinearOperatorFiniteDifference") +class LinearOperatorFiniteDifference(linear_operator.LinearOperator): # pylint: disable=abstract-method + """Linear operator representing a finite difference matrix. + + Args: + domain_shape: A 1D `tf.Tensor` or a `list` of `int`. The domain shape of + this linear operator. + axis: An `int`. The axis along which the finite difference is taken. + Defaults to -1. + dtype: A `tf.dtypes.DType`. The data type for this operator. Defaults to + `float32`. + name: A `str`. A name for this operator. + """ + def __init__(self, + domain_shape, + axis=-1, + dtype=tf.dtypes.float32, + name="LinearOperatorFiniteDifference"): + + parameters = dict( + domain_shape=domain_shape, + axis=axis, + dtype=dtype, + name=name + ) + + # Compute the static and dynamic shapes and save them for later use. + self._domain_shape_static, self._domain_shape_dynamic = ( + tensor_util.static_and_dynamic_shapes_from_shape(domain_shape)) + + # Validate axis and canonicalize to negative. This ensures the correct + # axis is selected in the presence of batch dimensions. + self.axis = check_util.validate_static_axes( + axis, self._domain_shape_static.rank, + min_length=1, + max_length=1, + canonicalize="negative", + scalar_to_list=False) + + # Compute range shape statically. The range has one less element along + # the difference axis than the domain. + range_shape_static = self._domain_shape_static.as_list() + if range_shape_static[self.axis] is not None: + range_shape_static[self.axis] -= 1 + range_shape_static = tf.TensorShape(range_shape_static) + self._range_shape_static = range_shape_static + + # Now compute dynamic range shape. First concatenate the leading axes with + # the updated difference dimension. Then, iff the difference axis is not + # the last one, concatenate the trailing axes. + range_shape_dynamic = self._domain_shape_dynamic + range_shape_dynamic = tf.concat([ + range_shape_dynamic[:self.axis], + [range_shape_dynamic[self.axis] - 1]], 0) + if self.axis != -1: + range_shape_dynamic = tf.concat([ + range_shape_dynamic, + range_shape_dynamic[self.axis + 1:]], 0) + self._range_shape_dynamic = range_shape_dynamic + + super().__init__(dtype, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name=name, + parameters=parameters) + + def _transform(self, x, adjoint=False): + + if adjoint: + paddings1 = [[0, 0]] * x.shape.rank + paddings2 = [[0, 0]] * x.shape.rank + paddings1[self.axis] = [1, 0] + paddings2[self.axis] = [0, 1] + x1 = tf.pad(x, paddings1) # pylint: disable=no-value-for-parameter + x2 = tf.pad(x, paddings2) # pylint: disable=no-value-for-parameter + x = x1 - x2 + else: + slice1 = [slice(None)] * x.shape.rank + slice2 = [slice(None)] * x.shape.rank + slice1[self.axis] = slice(1, None) + slice2[self.axis] = slice(None, -1) + x1 = x[tuple(slice1)] + x2 = x[tuple(slice2)] + x = x1 - x2 + + return x + + def _domain_shape(self): + return self._domain_shape_static + + def _range_shape(self): + return self._range_shape_static + + def _domain_shape_tensor(self): + return self._domain_shape_dynamic + + def _range_shape_tensor(self): + return self._range_shape_dynamic diff --git a/tensorflow_mri/python/linalg/linear_operator_finite_difference_test.py b/tensorflow_mri/python/linalg/linear_operator_finite_difference_test.py new file mode 100755 index 00000000..6586b991 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_finite_difference_test.py @@ -0,0 +1,81 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_finite_difference`.""" +# pylint: disable=missing-class-docstring,missing-function-docstring + +import numpy as np +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator_finite_difference +from tensorflow_mri.python.util import test_util + + +class LinearOperatorFiniteDifferenceTest(test_util.TestCase): + """Tests for difference linear operator.""" + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.linop1 = ( + linear_operator_finite_difference.LinearOperatorFiniteDifference([4])) + cls.linop2 = ( + linear_operator_finite_difference.LinearOperatorFiniteDifference( + [4, 4], axis=-2)) + cls.matrix1 = tf.convert_to_tensor([[-1, 1, 0, 0], + [0, -1, 1, 0], + [0, 0, -1, 1]], dtype=tf.float32) + + def test_transform(self): + """Test transform method.""" + signal = tf.random.normal([4, 4]) + result = self.linop2.transform(signal) + self.assertAllClose(result, np.diff(signal, axis=-2)) + + def test_matvec(self): + """Test matvec method.""" + signal = tf.constant([1, 2, 4, 8], dtype=tf.float32) + result = tf.linalg.matvec(self.linop1, signal) + self.assertAllClose(result, [1, 2, 4]) + self.assertAllClose(result, np.diff(signal)) + self.assertAllClose(result, tf.linalg.matvec(self.matrix1, signal)) + + signal2 = tf.range(16, dtype=tf.float32) + result = tf.linalg.matvec(self.linop2, signal2) + self.assertAllClose(result, [4] * 12) + + def test_matvec_adjoint(self): + """Test matvec with adjoint.""" + signal = tf.constant([1, 2, 4], dtype=tf.float32) + result = tf.linalg.matvec(self.linop1, signal, adjoint_a=True) + self.assertAllClose(result, + tf.linalg.matvec(tf.transpose(self.matrix1), signal)) + + def test_shapes(self): + """Test shapes.""" + self._test_all_shapes(self.linop1, [4], [3]) + self._test_all_shapes(self.linop2, [4, 4], [3, 4]) + + def _test_all_shapes(self, linop, domain_shape, range_shape): + """Test shapes.""" + self.assertIsInstance(linop.domain_shape, tf.TensorShape) + self.assertAllEqual(linop.domain_shape, domain_shape) + self.assertAllEqual(linop.domain_shape_tensor(), domain_shape) + + self.assertIsInstance(linop.range_shape, tf.TensorShape) + self.assertAllEqual(linop.range_shape, range_shape) + self.assertAllEqual(linop.range_shape_tensor(), range_shape) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/linalg/linear_operator_gram_matrix.py b/tensorflow_mri/python/linalg/linear_operator_gram_matrix.py new file mode 100644 index 00000000..969dc124 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_gram_matrix.py @@ -0,0 +1,147 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Gram matrix of a linear operator.""" + +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.linalg import linear_operator_addition +from tensorflow_mri.python.linalg import linear_operator_composition +from tensorflow_mri.python.linalg import linear_operator_identity +from tensorflow_mri.python.util import api_util + + +@api_util.export("linalg.LinearOperatorGramMatrix") +class LinearOperatorGramMatrix(linear_operator.LinearOperator): # pylint: disable=abstract-method + r"""Linear operator representing the Gram matrix of an operator. + + If $A$ is a `LinearOperator`, this operator is equivalent to + $A^H A$. + + The Gram matrix of $A$ appears in the normal equation + $A^H A x = A^H b$ associated with the least squares problem + ${\mathop{\mathrm{argmin}}_x} {\left \| Ax-b \right \|_2^2}$. + + This operator is self-adjoint and positive definite. Therefore, linear systems + defined by this linear operator can be solved using the conjugate gradient + method. + + This operator supports the optional addition of a regularization parameter + $\lambda$ and a transform matrix $T$. If these are provided, + this operator becomes $A^H A + \lambda T^H T$. This appears + in the regularized normal equation + $\left ( A^H A + \lambda T^H T \right ) x = A^H b + \lambda T^H T x_0$, + associated with the regularized least squares problem + ${\mathop{\mathrm{argmin}}_x} {\left \| Ax-b \right \|_2^2 + \lambda \left \| T(x-x_0) \right \|_2^2}$. + + Args: + operator: A `tfmri.linalg.LinearOperator`. The operator $A$ whose Gram + matrix is represented by this linear operator. + reg_parameter: A `Tensor` of shape `[B1, ..., Bb]` and real dtype. + The regularization parameter $\lambda$. Defaults to 0. + reg_operator: A `tfmri.linalg.LinearOperator`. The regularization transform + $T$. Defaults to the identity. + gram_operator: A `tfmri.linalg.LinearOperator`. The Gram matrix + $A^H A$. This may be optionally provided to use a specialized + Gram matrix implementation. Defaults to `None`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its Hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form $x^H A x$ has positive real part for all + nonzero $x$. Note that we do not require the operator to be + self-adjoint to be positive-definite. + is_square: Expect that this operator acts like square [batch] matrices. + name: A name for this `LinearOperator`. + """ + def __init__(self, + operator, + reg_parameter=None, + reg_operator=None, + gram_operator=None, + is_non_singular=None, + is_self_adjoint=True, + is_positive_definite=True, + is_square=True, + name=None): + parameters = dict( + operator=operator, + reg_parameter=reg_parameter, + reg_operator=reg_operator, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name) + self._operator = operator + self._reg_parameter = reg_parameter + self._reg_operator = reg_operator + self._gram_operator = gram_operator + if gram_operator is not None: + self._composed = gram_operator + else: + self._composed = linear_operator_composition.LinearOperatorComposition( + operators=[self._operator.H, self._operator]) + + if not is_self_adjoint: + raise ValueError("A Gram matrix is always self-adjoint.") + if not is_positive_definite: + raise ValueError("A Gram matrix is always positive-definite.") + if not is_square: + raise ValueError("A Gram matrix is always square.") + + if self._reg_parameter is not None: + reg_operator_gm = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=self._operator.domain_shape, + multiplier=tf.cast(self._reg_parameter, self._operator.dtype)) + if self._reg_operator is not None: + reg_operator_gm = linear_operator_composition.LinearOperatorComposition( + operators=[reg_operator_gm, + self._reg_operator.H, + self._reg_operator]) + self._composed = linear_operator_addition.LinearOperatorAddition( + operators=[self._composed, reg_operator_gm]) + + super().__init__(operator.dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + parameters=parameters) + + def _transform(self, x, adjoint=False): + return self._composed.transform(x, adjoint=adjoint) + + def _domain_shape(self): + return self.operator.domain_shape + + def _range_shape(self): + return self.operator.domain_shape + + def _batch_shape(self): + return self.operator.batch_shape + + def _domain_shape_tensor(self): + return self.operator.domain_shape_tensor() + + def _range_shape_tensor(self): + return self.operator.domain_shape_tensor() + + def _batch_shape_tensor(self): + return self.operator.batch_shape_tensor() + + @property + def operator(self): + return self._operator diff --git a/tensorflow_mri/python/linalg/linear_operator_gram_matrix_test.py b/tensorflow_mri/python/linalg/linear_operator_gram_matrix_test.py new file mode 100644 index 00000000..2cbc2c93 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_gram_matrix_test.py @@ -0,0 +1,15 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_gram_matrix`.""" diff --git a/tensorflow_mri/python/linalg/linear_operator_identity.py b/tensorflow_mri/python/linalg/linear_operator_identity.py new file mode 100644 index 00000000..187632b0 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_identity.py @@ -0,0 +1,287 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Scaled identity linear operator.""" + +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.linalg import linear_operator_algebra +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import tensor_util +from tensorflow_mri.python.util import types_util + + +@api_util.export("linalg.LinearOperatorIdentity") +@linear_operator.make_composite_tensor +class LinearOperatorIdentity(linear_operator.LinearOperatorMixin, # pylint: disable=abstract-method + tf.linalg.LinearOperatorIdentity): + """Linear operator representing an identity matrix. + + This operator acts like the identity matrix $A = I$ (or a batch of identity + matrices). + + ```{note} + This operator is similar to `tf.linalg.LinearOperatorIdentity`, but + provides additional functionality. See the + [linear algebra guide](https://mrphys.github.io/tensorflow-mri/guide/linalg/) + for more details. + ``` + + ```{seealso} + The scaled identity operator `tfmri.linalg.LinearOperatorScaledIdentity`. + ``` + + Args: + domain_shape: A 1D integer `tf.Tensor`. The domain/range shape of the + operator. + batch_shape: An optional 1D integer `tf.Tensor`. The shape of the leading + batch dimensions. If `None`, this operator has no leading batch + dimensions. + dtype: A `tf.dtypes.DType`. The data type of the matrix that this operator + represents. Defaults to `float32`. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form $x^H A x$ has positive real part for all + nonzero $x$. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + assert_proper_shapes: A boolean. If `False`, only perform static + checks that initialization and method arguments have proper shape. + If `True`, and static checks are inconclusive, add asserts to the graph. + name: A name for this `LinearOperator`. + """ + def __init__(self, + domain_shape, + batch_shape=None, + dtype=None, + is_non_singular=True, + is_self_adjoint=True, + is_positive_definite=True, + is_square=True, + assert_proper_shapes=False, + name="LinearOperatorIdentity"): + # Shape inputs must not have reference semantics. + types_util.assert_not_ref_type(domain_shape, "domain_shape") + types_util.assert_not_ref_type(batch_shape, "batch_shape") + + # Parse domain shape. + self._domain_shape_static, self._domain_shape_dynamic = ( + tensor_util.static_and_dynamic_shapes_from_shape( + domain_shape, + assert_proper_shape=assert_proper_shapes, + arg_name='domain_shape')) + + # Parse batch shape. + if batch_shape is not None: + # Extra underscore at the end to distinguish from base class property of + # the same name. + self._batch_shape_static_, self._batch_shape_dynamic = ( + tensor_util.static_and_dynamic_shapes_from_shape( + batch_shape, + assert_proper_shape=assert_proper_shapes, + arg_name='batch_shape')) + else: + self._batch_shape_static_ = tf.TensorShape([]) + self._batch_shape_dynamic = tf.constant([], dtype=tf.int32) + + super().__init__(num_rows=tf.math.reduce_prod(domain_shape), + batch_shape=batch_shape, + dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + assert_proper_shapes=assert_proper_shapes, + name=name) + + def _transform(self, x, adjoint=False): + if self.domain_shape.rank is not None: + rank = self.domain_shape.rank + else: + rank = tf.size(self.domain_shape_tensor()) + batch_shape = tf.broadcast_dynamic_shape( + tf.shape(x)[:-rank], self.batch_shape_tensor()) + output_shape = tf.concat([batch_shape, self.domain_shape_tensor()], axis=0) # pylint: disable=unexpected-keyword-arg,no-value-for-parameter + return tf.broadcast_to(x, output_shape) + + def _domain_shape(self): + return self._domain_shape_static + + def _range_shape(self): + return self._domain_shape_static + + def _batch_shape(self): + return self._batch_shape_static_ + + def _domain_shape_tensor(self): + return self._domain_shape_dynamic + + def _range_shape_tensor(self): + return self._domain_shape_dynamic + + def _batch_shape_tensor(self): + return self._batch_shape_dynamic + + @property + def _composite_tensor_fields(self): + return ("domain_shape", "batch_shape", "dtype", "assert_proper_shapes") + + @property + def _composite_tensor_prefer_static_fields(self): + return ("domain_shape", "batch_shape") + + +@api_util.export("linalg.LinearOperatorScaledIdentity") +@linear_operator.make_composite_tensor +class LinearOperatorScaledIdentity(linear_operator.LinearOperatorMixin, # pylint: disable=abstract-method + tf.linalg.LinearOperatorScaledIdentity): + """Linear operator representing a scaled identity matrix. + + This operator acts like a scaled identity matrix $A = cI$ (or a batch of + scaled identity matrices). + + ```{note} + This operator is similar to `tf.linalg.LinearOperatorScaledIdentity`, but + provides additional functionality. See the + [linear algebra guide](https://mrphys.github.io/tensorflow-mri/guide/linalg/) + for more details. + ``` + + ```{seealso} + The identity operator `tfmri.linalg.LinearOperatorIdentity`. + ``` + + Args: + domain_shape: A 1D integer `Tensor`. The domain/range shape of the operator. + multiplier: A `tf.Tensor` of arbitrary shape. Its shape will become the + batch shape of the operator. Its dtype will determine the dtype of the + operator. + is_non_singular: Expect that this operator is non-singular. + is_self_adjoint: Expect that this operator is equal to its hermitian + transpose. + is_positive_definite: Expect that this operator is positive definite, + meaning the quadratic form $x^H A x$ has positive real part for all + nonzero $x$. Note that we do not require the operator to be + self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices + is_square: Expect that this operator acts like square [batch] matrices. + assert_proper_shapes: A boolean. If `False`, only perform static + checks that initialization and method arguments have proper shape. + If `True`, and static checks are inconclusive, add asserts to the graph. + name: A name for this `LinearOperator`. + """ + def __init__(self, + domain_shape, + multiplier, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=True, + assert_proper_shapes=False, + name="LinearOperatorScaledIdentity"): + # Shape inputs must not have reference semantics. + types_util.assert_not_ref_type(domain_shape, "domain_shape") + + # Parse domain shape. + self._domain_shape_static, self._domain_shape_dynamic = ( + tensor_util.static_and_dynamic_shapes_from_shape( + domain_shape, + assert_proper_shape=assert_proper_shapes, + arg_name='domain_shape')) + + super().__init__( + num_rows=tf.math.reduce_prod(domain_shape), + multiplier=multiplier, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + assert_proper_shapes=assert_proper_shapes, + name=name) + + def _transform(self, x, adjoint=False): + domain_rank = tf.size(self.domain_shape_tensor()) + multiplier_shape = tf.concat([ + tf.shape(self.multiplier), + tf.ones((domain_rank,), dtype=tf.int32)], 0) + multiplier_matrix = tf.reshape(self.multiplier, multiplier_shape) + if adjoint: + multiplier_matrix = tf.math.conj(multiplier_matrix) + return x * multiplier_matrix + + def _domain_shape(self): + return self._domain_shape_static + + def _range_shape(self): + return self._domain_shape_static + + def _batch_shape(self): + return self.multiplier.shape + + def _domain_shape_tensor(self): + return self._domain_shape_dynamic + + def _range_shape_tensor(self): + return self._domain_shape_dynamic + + def _batch_shape_tensor(self): + return tf.shape(self.multiplier) + + @property + def _composite_tensor_fields(self): + return ("domain_shape", "multiplier", "assert_proper_shapes") + + @property + def _composite_tensor_prefer_static_fields(self): + return ("domain_shape",) + + +@linear_operator_algebra.RegisterAdjoint(LinearOperatorIdentity) +def adjoint_identity(identity_operator): + return identity_operator + + +@linear_operator_algebra.RegisterAdjoint(LinearOperatorScaledIdentity) +def adjoint_scaled_identity(identity_operator): + multiplier = identity_operator.multiplier + if multiplier.dtype.is_complex: + multiplier = tf.math.conj(multiplier) + + return LinearOperatorScaledIdentity( + domain_shape=identity_operator.domain_shape_tensor(), + multiplier=multiplier, + is_non_singular=identity_operator.is_non_singular, + is_self_adjoint=identity_operator.is_self_adjoint, + is_positive_definite=identity_operator.is_positive_definite, + is_square=True) + + +@linear_operator_algebra.RegisterInverse(LinearOperatorIdentity) +def inverse_identity(identity_operator): + return identity_operator + + +@linear_operator_algebra.RegisterInverse(LinearOperatorScaledIdentity) +def inverse_scaled_identity(identity_operator): + return LinearOperatorScaledIdentity( + domain_shape=identity_operator.domain_shape_tensor(), + multiplier=1. / identity_operator.multiplier, + is_non_singular=identity_operator.is_non_singular, + is_self_adjoint=True, + is_positive_definite=identity_operator.is_positive_definite, + is_square=True) diff --git a/tensorflow_mri/python/linalg/linear_operator_identity_test.py b/tensorflow_mri/python/linalg/linear_operator_identity_test.py new file mode 100644 index 00000000..7364b12b --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_identity_test.py @@ -0,0 +1,706 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_identity`. + +Adapted from: + tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py +""" +# pylint: disable=missing-function-docstring + +import numpy as np +import tensorflow as tf +from tensorflow.python.framework import test_util +from tensorflow.python.ops.linalg import linear_operator_test_util + +from tensorflow_mri.python.linalg import linear_operator_identity + + +rng = np.random.RandomState(2016) + + +@test_util.run_all_in_graph_and_eager_modes +class LinearOperatorIdentityTest( + linear_operator_test_util.SquareLinearOperatorDerivedClassTest): + """Most tests done in the base class LinearOperatorDerivedClassTest.""" + + def tearDown(self): + tf.config.experimental.enable_tensor_float_32_execution(self.tf32_keep_) + + def setUp(self): + self.tf32_keep_ = tf.config.experimental.tensor_float_32_execution_enabled() + tf.config.experimental.enable_tensor_float_32_execution(False) + + @staticmethod + def dtypes_to_test(): + # TODO(langmore) Test tf.float16 once tf.linalg.solve works in + # 16bit. + return [tf.float32, tf.float64, tf.complex64, tf.complex128] + + @staticmethod + def optional_tests(): + """List of optional test names to run.""" + return [ + "operator_matmul_with_same_type", + "operator_solve_with_same_type", + ] + + def operator_and_matrix( + self, build_info, dtype, use_placeholder, + ensure_self_adjoint_and_pd=False): + # Identity matrix is already Hermitian Positive Definite. + del ensure_self_adjoint_and_pd + + shape = list(build_info.shape) + assert shape[-1] == shape[-2] + + batch_shape = shape[:-2] + num_rows = shape[-1] + + operator = linear_operator_identity.LinearOperatorIdentity( + num_rows, batch_shape=batch_shape, dtype=dtype) + mat = tf.linalg.eye(num_rows, batch_shape=batch_shape, dtype=dtype) + + return operator, mat + + def test_to_dense(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2]) + self.assertAllClose(np.eye(2), self.evaluate(operator.to_dense())) + + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2, 3]) + self.assertAllClose(np.eye(6), self.evaluate(operator.to_dense())) + + def test_shapes(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2, 3], batch_shape=[4, 5]) + self.assertAllEqual([2, 3], operator.domain_shape) + self.assertAllEqual([2, 3], operator.range_shape) + self.assertAllEqual([4, 5], operator.batch_shape) + self.assertAllEqual([4, 5, 6, 6], operator.shape) + self.assertAllEqual(6, operator.domain_dimension) + self.assertAllEqual(6, operator.range_dimension) + self.assertAllEqual([2, 3], self.evaluate(operator.domain_shape_tensor())) + self.assertAllEqual([2, 3], self.evaluate(operator.range_shape_tensor())) + self.assertAllEqual([4, 5], self.evaluate(operator.batch_shape_tensor())) + self.assertAllEqual([4, 5, 6, 6], self.evaluate(operator.shape_tensor())) + self.assertAllEqual(6, self.evaluate(operator.domain_dimension_tensor())) + self.assertAllEqual(6, self.evaluate(operator.range_dimension_tensor())) + + def test_shapes_dynamic(self): + # These cannot be done in the automated (base test class) tests since they + # test shapes that tf.batch_matmul cannot handle. + # In particular, tf.batch_matmul does not broadcast. + with self.cached_session(): + # Given this x and LinearOperatorIdentity shape of (2, 1, 6, 6), the + # broadcast shape of operator and 'x' is (2, 2, 3, 4) + domain_shape = tf.compat.v1.placeholder_with_default((2, 3), shape=None) + batch_shape = tf.compat.v1.placeholder_with_default((2, 1), shape=None) + + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape, batch_shape=batch_shape, dtype=tf.float64) + + self.assertAllEqual([2, 3], self.evaluate(operator.domain_shape_tensor())) + self.assertAllEqual([2, 1], self.evaluate(operator.batch_shape_tensor())) + self.assertAllEqual([2, 1, 6, 6], self.evaluate(operator.shape_tensor())) + self.assertAllEqual(6, self.evaluate(operator.domain_dimension_tensor())) + self.assertAllEqual(6, self.evaluate(operator.range_dimension_tensor())) + + def test_assert_positive_definite(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2]) + self.evaluate(operator.assert_positive_definite()) # Should not fail + + def test_assert_non_singular(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2]) + self.evaluate(operator.assert_non_singular()) # Should not fail + + def test_assert_self_adjoint(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2]) + self.evaluate(operator.assert_self_adjoint()) # Should not fail + + # TODO(jmontalt). + # def test_float16_matmul(self): + # # float16 cannot be tested by base test class because tf.linalg.solve does + # # not work with float16. + # with self.cached_session(): + # operator = linear_operator_identity.LinearOperatorIdentity( + # domain_shape=[2], dtype=tf.float16) + # x = rng.randn(2, 3).astype(np.float16) + # y = operator.matmul(x) + # self.assertAllClose(x, self.evaluate(y)) + + def test_non_1d_domain_shape_raises_static(self): + with self.assertRaisesRegex( + ValueError, "domain_shape must be a 1-D Tensor"): + linear_operator_identity.LinearOperatorIdentity(domain_shape=2) + + def test_non_integer_domain_shape_raises_static(self): + with self.assertRaisesRegex( + TypeError, "domain_shape must be integer"): + linear_operator_identity.LinearOperatorIdentity(domain_shape=[2.]) + + def test_negative_domain_shape_raises_static(self): + with self.assertRaisesRegex( + ValueError, "domain_shape must be non-negative"): + linear_operator_identity.LinearOperatorIdentity(domain_shape=[-2]) + + def test_non_1d_batch_shape_raises_static(self): + with self.assertRaisesRegex( + ValueError, "batch_shape must be a 1-D Tensor"): + linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2], batch_shape=2) + + def test_non_integer_batch_shape_raises_static(self): + with self.assertRaisesRegex(TypeError, "must be integer"): + linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2], batch_shape=[2.]) + + def test_negative_batch_shape_raises_static(self): + with self.assertRaisesRegex(ValueError, "must be non-negative"): + linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2], batch_shape=[-2]) + + def test_non_1d_domain_shape_raises_dynamic(self): + with self.cached_session(): + domain_shape = tf.compat.v1.placeholder_with_default(2, shape=None) + with self.assertRaisesError("must be a 1-D Tensor"): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape, assert_proper_shapes=True) + self.evaluate(operator.to_dense()) + + def test_negative_domain_shape_raises_dynamic(self): + with self.cached_session(): + domain_shape = tf.compat.v1.placeholder_with_default([-2], shape=None) + with self.assertRaisesError("must be non-negative"): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape, assert_proper_shapes=True) + self.evaluate(operator.to_dense()) + + def test_non_1d_batch_shape_raises_dynamic(self): + with self.cached_session(): + batch_shape = tf.compat.v1.placeholder_with_default(2, shape=None) + with self.assertRaisesError("must be a 1-D"): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2], batch_shape=batch_shape, + assert_proper_shapes=True) + self.evaluate(operator.to_dense()) + + def test_negative_batch_shape_raises_dynamic(self): + with self.cached_session(): + batch_shape = tf.compat.v1.placeholder_with_default([-2], shape=None) + with self.assertRaisesError("must be non-negative"): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2], batch_shape=batch_shape, + assert_proper_shapes=True) + self.evaluate(operator.to_dense()) + + def test_wrong_matrix_dimensions_raises_static(self): + operator = linear_operator_identity.LinearOperatorIdentity(domain_shape=[2]) + x = rng.randn(3, 3).astype(np.float32) + with self.assertRaisesRegex(ValueError, "Dimensions.*not compatible"): + operator.matmul(x) + + # TODO(jmontalt). + # def test_wrong_matrix_dimensions_raises_dynamic(self): + # domain_shape = tf.compat.v1.placeholder_with_default([2], shape=None) + # x = tf.compat.v1.placeholder_with_default( + # rng.rand(3, 3).astype(np.float32), shape=None) + + # with self.cached_session(): + # with self.assertRaisesError("Dimensions.*not.compatible"): + # operator = linear_operator_identity.LinearOperatorIdentity( + # domain_shape, assert_proper_shapes=True) + # self.evaluate(operator.matmul(x)) + + def test_default_batch_shape_broadcasts_with_everything_static(self): + # These cannot be done in the automated (base test class) tests since they + # test shapes that tf.batch_matmul cannot handle. + # In particular, tf.batch_matmul does not broadcast. + with self.cached_session(): + x = tf.random.normal(shape=(1, 2, 3, 4)) + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[3], dtype=x.dtype) + + operator_matmul = operator.matmul(x) + expected = x + + self.assertAllEqual(operator_matmul.shape, expected.shape) + self.assertAllClose(*self.evaluate([operator_matmul, expected])) + + def test_default_batch_shape_broadcasts_with_everything_dynamic(self): + # These cannot be done in the automated (base test class) tests since they + # test shapes that tf.batch_matmul cannot handle. + # In particular, tf.batch_matmul does not broadcast. + with self.cached_session(): + x = tf.compat.v1.placeholder_with_default( + rng.randn(1, 2, 3, 4), shape=None) + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[3], dtype=x.dtype) + + operator_matmul = operator.matmul(x) + expected = x + + self.assertAllClose(*self.evaluate([operator_matmul, expected])) + + def test_broadcast_matmul_static_shapes(self): + # These cannot be done in the automated (base test class) tests since they + # test shapes that tf.batch_matmul cannot handle. + # In particular, tf.batch_matmul does not broadcast. + with self.cached_session(): + # Given this x and LinearOperatorIdentity shape of (2, 1, 6, 6), the + # broadcast shape of operator and 'x' is (2, 2, 6, 4) + x = tf.random.normal(shape=(1, 2, 6, 4)) + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=(2, 3), batch_shape=(2, 1), dtype=x.dtype) + + # Batch matrix of zeros with the broadcast shape of x and operator. + zeros = tf.zeros(shape=(2, 2, 6, 4), dtype=x.dtype) + + # Expected result of matmul and solve. + expected = x + zeros + + operator_matmul = operator.matmul(x) + self.assertAllEqual(operator_matmul.shape, expected.shape) + self.assertAllClose(*self.evaluate([operator_matmul, expected])) + + def test_broadcast_matmul_dynamic_shapes(self): + # These cannot be done in the automated (base test class) tests since they + # test shapes that tf.batch_matmul cannot handle. + # In particular, tf.batch_matmul does not broadcast. + with self.cached_session(): + # Given this x and LinearOperatorIdentity shape of (2, 1, 6, 6), the + # broadcast shape of operator and 'x' is (2, 2, 3, 4) + x = tf.compat.v1.placeholder_with_default( + rng.rand(1, 2, 6, 4), shape=None) + domain_shape = tf.compat.v1.placeholder_with_default((2, 3), shape=None) + batch_shape = tf.compat.v1.placeholder_with_default((2, 1), shape=None) + + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape, batch_shape=batch_shape, dtype=tf.float64) + + # Batch matrix of zeros with the broadcast shape of x and operator. + zeros = tf.zeros(shape=(2, 2, 6, 4), dtype=x.dtype) + + # Expected result of matmul and solve. + expected = x + zeros + + operator_matmul = operator.matmul(x) + self.assertAllClose(*self.evaluate([operator_matmul, expected])) + + def test_is_x_flags(self): + # The is_x flags are by default all True. + operator = linear_operator_identity.LinearOperatorIdentity(domain_shape=[2]) + self.assertTrue(operator.is_positive_definite) + self.assertTrue(operator.is_non_singular) + self.assertTrue(operator.is_self_adjoint) + + # Any of them False raises because the identity is always self-adjoint etc.. + with self.assertRaisesRegex(ValueError, "is always non-singular"): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2], + is_non_singular=None, + ) + + def test_identity_adjoint_type(self): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2], is_non_singular=True) + self.assertIsInstance( + operator.adjoint(), linear_operator_identity.LinearOperatorIdentity) + + # TODO(jmontalt). + # def test_identity_cholesky_type(self): + # operator = linear_operator_identity.LinearOperatorIdentity( + # domain_shape=[2], + # is_positive_definite=True, + # is_self_adjoint=True, + # ) + # self.assertIsInstance( + # operator.cholesky(), linear_operator_identity.LinearOperatorIdentity) + + def test_identity_inverse_type(self): + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2], is_non_singular=True) + self.assertIsInstance( + operator.inverse(), linear_operator_identity.LinearOperatorIdentity) + + def test_ref_type_shape_args_raises(self): + with self.assertRaisesRegex(TypeError, "domain_shape.*reference"): + linear_operator_identity.LinearOperatorIdentity( + domain_shape=tf.Variable([2])) + + with self.assertRaisesRegex(TypeError, "batch_shape.*reference"): + linear_operator_identity.LinearOperatorIdentity( + domain_shape=[2], batch_shape=tf.Variable([3])) + + +@test_util.run_all_in_graph_and_eager_modes +class LinearOperatorScaledIdentityTest( + linear_operator_test_util.SquareLinearOperatorDerivedClassTest): + """Most tests done in the base class LinearOperatorDerivedClassTest.""" + + def tearDown(self): + tf.config.experimental.enable_tensor_float_32_execution(self.tf32_keep_) + + def setUp(self): + self.tf32_keep_ = tf.config.experimental.tensor_float_32_execution_enabled() + tf.config.experimental.enable_tensor_float_32_execution(False) + + @staticmethod + def dtypes_to_test(): + # TODO(langmore) Test tf.float16 once tf.linalg.solve works in + # 16bit. + return [tf.float32, tf.float64, tf.complex64, tf.complex128] + + @staticmethod + def optional_tests(): + """List of optional test names to run.""" + return [ + "operator_matmul_with_same_type", + "operator_solve_with_same_type", + ] + + def operator_and_matrix( + self, build_info, dtype, use_placeholder, + ensure_self_adjoint_and_pd=False): + + shape = list(build_info.shape) + assert shape[-1] == shape[-2] + + batch_shape = shape[:-2] + num_rows = shape[-1] + + # Uniform values that are at least length 1 from the origin. Allows the + # operator to be well conditioned. + # Shape batch_shape + multiplier = linear_operator_test_util.random_sign_uniform( + shape=batch_shape, minval=1., maxval=2., dtype=dtype) + + if ensure_self_adjoint_and_pd: + # Abs on complex64 will result in a float32, so we cast back up. + multiplier = tf.cast(tf.abs(multiplier), dtype=dtype) + + # Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args. + lin_op_multiplier = multiplier + + if use_placeholder: + lin_op_multiplier = tf.compat.v1.placeholder_with_default( + multiplier, shape=None) + + operator = linear_operator_identity.LinearOperatorScaledIdentity( + num_rows, + lin_op_multiplier, + is_self_adjoint=True if ensure_self_adjoint_and_pd else None, + is_positive_definite=True if ensure_self_adjoint_and_pd else None) + + multiplier_matrix = tf.expand_dims( + tf.expand_dims(multiplier, -1), -1) + matrix = multiplier_matrix * tf.linalg.eye( + num_rows, batch_shape=batch_shape, dtype=dtype) + + return operator, matrix + + def test_to_dense(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=1.0) + self.assertAllClose(np.eye(2), self.evaluate(operator.to_dense())) + + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2, 3], multiplier=2.0) + self.assertAllClose(2.0 * np.eye(6), self.evaluate(operator.to_dense())) + + def test_shapes(self): + with self.cached_session(): + multiplier = tf.ones([4, 5]) + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2, 3], multiplier=multiplier) + self.assertAllEqual([2, 3], operator.domain_shape) + self.assertAllEqual([2, 3], operator.range_shape) + self.assertAllEqual([4, 5], operator.batch_shape) + self.assertAllEqual([4, 5, 6, 6], operator.shape) + self.assertAllEqual(6, operator.domain_dimension) + self.assertAllEqual(6, operator.range_dimension) + self.assertAllEqual([2, 3], self.evaluate(operator.domain_shape_tensor())) + self.assertAllEqual([2, 3], self.evaluate(operator.range_shape_tensor())) + self.assertAllEqual([4, 5], self.evaluate(operator.batch_shape_tensor())) + self.assertAllEqual([4, 5, 6, 6], self.evaluate(operator.shape_tensor())) + self.assertAllEqual(6, self.evaluate(operator.domain_dimension_tensor())) + self.assertAllEqual(6, self.evaluate(operator.range_dimension_tensor())) + + def test_shapes_dynamic(self): + # These cannot be done in the automated (base test class) tests since they + # test shapes that tf.batch_matmul cannot handle. + # In particular, tf.batch_matmul does not broadcast. + with self.cached_session(): + # Given this x and LinearOperatorIdentity shape of (2, 1, 6, 6), the + # broadcast shape of operator and 'x' is (2, 2, 3, 4) + domain_shape = tf.compat.v1.placeholder_with_default((2, 3), shape=None) + batch_shape = tf.compat.v1.placeholder_with_default((2, 1), shape=None) + + operator = linear_operator_identity.LinearOperatorIdentity( + domain_shape, batch_shape=batch_shape, dtype=tf.float64) + + self.assertAllEqual([2, 3], self.evaluate(operator.domain_shape_tensor())) + self.assertAllEqual([2, 1], self.evaluate(operator.batch_shape_tensor())) + self.assertAllEqual([2, 1, 6, 6], self.evaluate(operator.shape_tensor())) + self.assertAllEqual(6, self.evaluate(operator.domain_dimension_tensor())) + self.assertAllEqual(6, self.evaluate(operator.range_dimension_tensor())) + + def test_assert_positive_definite_does_not_raise_when_positive(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=1.) + self.evaluate(operator.assert_positive_definite()) # Should not fail + + def test_assert_positive_definite_raises_when_negative(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=-1.) + with self.assertRaisesOpError("not positive definite"): + self.evaluate(operator.assert_positive_definite()) + + def test_assert_non_singular_does_not_raise_when_non_singular(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=[1., 2., 3.]) + self.evaluate(operator.assert_non_singular()) # Should not fail + + def test_assert_non_singular_raises_when_singular(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=[1., 2., 0.]) + with self.assertRaisesOpError("was singular"): + self.evaluate(operator.assert_non_singular()) + + def test_assert_self_adjoint_does_not_raise_when_self_adjoint(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=[1. + 0J]) + self.evaluate(operator.assert_self_adjoint()) # Should not fail + + def test_assert_self_adjoint_raises_when_not_self_adjoint(self): + with self.cached_session(): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=[1. + 1J]) + with self.assertRaisesOpError("not self-adjoint"): + self.evaluate(operator.assert_self_adjoint()) + +# def test_float16_matmul(self): +# # float16 cannot be tested by base test class because tf.linalg.solve does +# # not work with float16. +# with self.cached_session(): +# multiplier = rng.rand(3).astype(np.float16) +# operator = linear_operator_identity.LinearOperatorScaledIdentity( +# domain_shape=[2], multiplier=multiplier) +# x = rng.randn(2, 3).astype(np.float16) +# y = operator.matmul(x) +# self.assertAllClose(multiplier[..., None, None] * x, self.evaluate(y)) + + def test_non_1d_domain_shape_raises_static(self): + # Many "test_...num_rows" tests are performed in LinearOperatorIdentity. + with self.assertRaisesRegex(ValueError, "must be a 1-D Tensor"): + linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=2, multiplier=123.) + + def test_wrong_matrix_dimensions_raises_static(self): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=2.2) + x = rng.randn(3, 3).astype(np.float32) + with self.assertRaisesRegex(ValueError, "Dimensions.*not compatible"): + operator.matmul(x) + + # TODO(jmontalt): add assertions to `transform` / `matmul`. + # def test_wrong_matrix_dimensions_raises_dynamic(self): + # num_rows = tf.compat.v1.placeholder_with_default(2, shape=None) + # x = tf.compat.v1.placeholder_with_default( + # rng.rand(3, 3).astype(np.float32), shape=None) + + # with self.cached_session(): + # with self.assertRaisesError("Dimensions.*not.compatible"): + # operator = linear_operator_identity.LinearOperatorScaledIdentity( + # num_rows, + # multiplier=[1., 2], + # assert_proper_shapes=True) + # self.evaluate(operator.matmul(x)) + + def test_broadcast_matmul_and_solve(self): + # These cannot be done in the automated (base test class) tests since they + # test shapes that tf.batch_matmul cannot handle. + # In particular, tf.batch_matmul does not broadcast. + with self.cached_session(): + # Given this x and LinearOperatorScaledIdentity shape of (2, 1, 6, 6), the + # broadcast shape of operator and 'x' is (2, 2, 6, 4) + x = tf.random.normal(shape=(1, 2, 6, 4)) + + # operator is 2.2 * identity (with a batch shape). + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2, 3], multiplier=2.2 * tf.ones((2, 1))) + + # Batch matrix of zeros with the broadcast shape of x and operator. + zeros = tf.zeros(shape=(2, 2, 6, 4), dtype=x.dtype) + + # Test matmul + expected = x * 2.2 + zeros + operator_matmul = operator.matmul(x) + self.assertAllEqual(operator_matmul.shape, expected.shape) + self.assertAllClose(*self.evaluate([operator_matmul, expected])) + + # Test solve + expected = x / 2.2 + zeros + operator_solve = operator.solve(x) + self.assertAllEqual(operator_solve.shape, expected.shape) + self.assertAllClose(*self.evaluate([operator_solve, expected])) + + def test_broadcast_matmul_and_solve_scalar_scale_multiplier(self): + # These cannot be done in the automated (base test class) tests since they + # test shapes that tf.batch_matmul cannot handle. + # In particular, tf.batch_matmul does not broadcast. + with self.cached_session(): + # Given this x and LinearOperatorScaledIdentity shape of (6, 6), the + # broadcast shape of operator and 'x' is (1, 2, 6, 4), which is the same + # shape as x. + x = tf.random.normal(shape=(1, 2, 6, 4)) + + # operator is 2.2 * identity (with a batch shape). + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2, 3], multiplier=2.2) + + # Test matmul + expected = x * 2.2 + operator_matmul = operator.matmul(x) + self.assertAllEqual(operator_matmul.shape, expected.shape) + self.assertAllClose(*self.evaluate([operator_matmul, expected])) + + # Test solve + expected = x / 2.2 + operator_solve = operator.solve(x) + self.assertAllEqual(operator_solve.shape, expected.shape) + self.assertAllClose(*self.evaluate([operator_solve, expected])) + + def test_is_x_flags(self): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=1., + is_positive_definite=False, is_non_singular=True) + self.assertFalse(operator.is_positive_definite) + self.assertTrue(operator.is_non_singular) + self.assertTrue(operator.is_self_adjoint) # Auto-set due to real multiplier + + # TODO(jmontalt). + # def test_identity_matmul(self): + # operator1 = linear_operator_identity.LinearOperatorIdentity(domain_shape=[2]) + # operator2 = linear_operator_identity.LinearOperatorScaledIdentity( + # domain_shape=[2], multiplier=3.) + # self.assertIsInstance( + # operator1.matmul(operator1), + # linear_operator_identity.LinearOperatorIdentity) + + # self.assertIsInstance( + # operator1.matmul(operator1), + # linear_operator_identity.LinearOperatorIdentity) + + # self.assertIsInstance( + # operator2.matmul(operator2), + # linear_operator_identity.LinearOperatorScaledIdentity) + + # operator_matmul = operator1.matmul(operator2) + # self.assertIsInstance( + # operator_matmul, + # linear_operator_identity.LinearOperatorScaledIdentity) + # self.assertAllClose(3., self.evaluate(operator_matmul.multiplier)) + + # operator_matmul = operator2.matmul(operator1) + # self.assertIsInstance( + # operator_matmul, + # linear_operator_identity.LinearOperatorScaledIdentity) + # self.assertAllClose(3., self.evaluate(operator_matmul.multiplier)) + + # def test_identity_solve(self): + # operator1 = linear_operator_identity.LinearOperatorIdentity( + # domain_shape=[2]) + # operator2 = linear_operator_identity.LinearOperatorScaledIdentity( + # domain_shape=[2], multiplier=3.) + # self.assertIsInstance( + # operator1.solve(operator1), + # linear_operator_identity.LinearOperatorIdentity) + + # self.assertIsInstance( + # operator2.solve(operator2), + # linear_operator_identity.LinearOperatorScaledIdentity) + + # operator_solve = operator1.solve(operator2) + # self.assertIsInstance( + # operator_solve, + # linear_operator_identity.LinearOperatorScaledIdentity) + # self.assertAllClose(3., self.evaluate(operator_solve.multiplier)) + + # operator_solve = operator2.solve(operator1) + # self.assertIsInstance( + # operator_solve, + # linear_operator_identity.LinearOperatorScaledIdentity) + # self.assertAllClose(1. / 3., self.evaluate(operator_solve.multiplier)) + + # def test_scaled_identity_cholesky_type(self): + # operator = linear_operator_identity.LinearOperatorScaledIdentity( + # domain_shape=[2], + # multiplier=3., + # is_positive_definite=True, + # is_self_adjoint=True, + # ) + # self.assertIsInstance( + # operator.cholesky(), + # linear_operator_identity.LinearOperatorScaledIdentity) + + def test_scaled_identity_inverse_type(self): + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], + multiplier=3., + is_non_singular=True, + ) + self.assertIsInstance( + operator.inverse(), + linear_operator_identity.LinearOperatorScaledIdentity) + + def test_ref_type_shape_args_raises(self): + with self.assertRaisesRegex(TypeError, "domain_shape.*reference"): + linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=tf.Variable(2), multiplier=1.23) + + def test_tape_safe(self): + multiplier = tf.Variable(1.23) + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=multiplier) + self.check_tape_safe(operator) + + def test_convert_variables_to_tensors(self): + multiplier = tf.Variable(1.23) + operator = linear_operator_identity.LinearOperatorScaledIdentity( + domain_shape=[2], multiplier=multiplier) + with self.cached_session() as sess: + sess.run([multiplier.initializer]) + self.check_convert_variables_to_tensors(operator) + + +if __name__ == "__main__": + linear_operator_test_util.add_tests(LinearOperatorIdentityTest) + linear_operator_test_util.add_tests(LinearOperatorScaledIdentityTest) + tf.test.main() diff --git a/tensorflow_mri/python/linalg/linear_operator_mri.py b/tensorflow_mri/python/linalg/linear_operator_mri.py new file mode 100644 index 00000000..5f0cfe91 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_mri.py @@ -0,0 +1,812 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""MRI linear operator.""" + +import warnings + +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator_nufft +from tensorflow_mri.python.ops import fft_ops +from tensorflow_mri.python.ops import math_ops +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import check_util +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.util import tensor_util + + +_WARNED_IGNORED_BATCH_DIMENSIONS = {} + + +@api_util.export("linalg.LinearOperatorMRI") +@linear_operator.make_composite_tensor +class LinearOperatorMRI(linear_operator.LinearOperator): # pylint: disable=abstract-method + r"""Linear operator acting like an MRI measurement system. + + The MRI operator, $A$, maps a [batch of] images, $x$ to a + [batch of] measurement data (*k*-space), $b$. + + $$ + A x = b + $$ + + This object may represent an undersampled MRI operator and supports + Cartesian and non-Cartesian *k*-space sampling. The user may provide a + sampling `mask` to represent an undersampled Cartesian operator, or a + `trajectory` to represent a non-Cartesian operator. + + This object may represent a multicoil MRI operator by providing coil + `sensitivities`. Note that `mask`, `trajectory` and `density` should never + have a coil dimension, including in the case of multicoil imaging. The coil + dimension will be handled automatically. + + The domain shape of this operator is `extra_shape + image_shape`. The range + of this operator is `extra_shape + [num_coils] + image_shape`, for + Cartesian imaging, or `extra_shape + [num_coils] + [num_samples]`, for + non-Cartesian imaging. `[num_coils]` is optional and only present for + multicoil operators. This operator supports batches of images and will + vectorize operations when possible. + + Args: + image_shape: A 1D integer `tf.Tensor`. The shape of the images + that this operator acts on. Must have length 2 or 3. + extra_shape: An optional 1D integer `tf.Tensor`. Additional + dimensions that should be included within the operator domain. Note that + `extra_shape` is not needed to reconstruct independent batches of images. + However, it is useful when this operator is used as part of a + reconstruction that performs computation along non-spatial dimensions, + e.g. for temporal regularization. Defaults to `None`. + mask: An optional `tf.Tensor` of type `tf.bool`. The sampling mask. Must + have shape `[..., *S]`, where `S` is the `image_shape` and `...` is + the batch shape, which can have any number of dimensions. If `mask` is + passed, this operator represents an undersampled MRI operator. + trajectory: An optional `tf.Tensor` of type `float32` or `float64`. Must + have shape `[..., M, N]`, where `N` is the rank (number of spatial + dimensions), `M` is the number of samples in the encoded space and `...` + is the batch shape, which can have any number of dimensions. If + `trajectory` is passed, this operator represents a non-Cartesian MRI + operator. + density: An optional `tf.Tensor` of type `float32` or `float64`. The + sampling densities. Must have shape `[..., M]`, where `M` is the number of + samples and `...` is the batch shape, which can have any number of + dimensions. This input is only relevant for non-Cartesian MRI operators. + If passed, the non-Cartesian operator will include sampling density + compensation. If `None`, the operator will not perform sampling density + compensation. + sensitivities: An optional `tf.Tensor` of type `complex64` or `complex128`. + The coil sensitivity maps. Must have shape `[..., C, *S]`, where `S` + is the `image_shape`, `C` is the number of coils and `...` is the batch + shape, which can have any number of dimensions. + phase: An optional `tf.Tensor` of type `float32` or `float64`. A phase + estimate for the image. If provided, this operator will be + phase-constrained. + fft_norm: FFT normalization mode. Must be `None` (no normalization) + or `'ortho'`. Defaults to `'ortho'`. + sens_norm: A `boolean`. Whether to normalize coil sensitivities. Defaults to + `True`. + intensity_correction: A `boolean`. Whether to correct for overall receiver + coil sensitivity. Defaults to `True`. Has no effect if `sens_norm` is also + `True`. + dynamic_domain: A `str`. The domain of the dynamic dimension, if present. + Must be one of `'time'` or `'frequency'`. May only be provided together + with a non-scalar `extra_shape`. The dynamic dimension is the last + dimension of `extra_shape`. The `'time'` mode (default) should be + used for regular dynamic reconstruction. The `'frequency'` mode should be + used for reconstruction in x-f space. + is_non_singular: A boolean, or `None`. Whether this operator is expected + to be non-singular. Defaults to `None`. + is_self_adjoint: A boolean, or `None`. Whether this operator is expected + to be equal to its Hermitian transpose. If `dtype` is real, this is + equivalent to being symmetric. Defaults to `None`. + is_positive_definite: A boolean, or `None`. Whether this operators is + expected to be positive definite, meaning the quadratic form $x^H A x$ + has positive real part for all nonzero $x$. Note that we do not require + the operator to be self-adjoint to be positive-definite. See: + https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices. + Defaults to `None`. + is_square: A boolean, or `None`. Expect that this operator acts like a + square matrix (or a batch of square matrices). Defaults to `None`. + dtype: A `tf.dtypes.DType`. The dtype of this operator. Must be `complex64` + or `complex128`. Defaults to `complex64`. + name: An optional `str`. The name of this operator. + """ + def __init__(self, + image_shape, + extra_shape=None, + mask=None, + trajectory=None, + density=None, + sensitivities=None, + phase=None, + fft_norm='ortho', + sens_norm=True, + intensity_correction=True, + dynamic_domain=None, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + dtype=tf.complex64, + name=None): + # pylint: disable=invalid-unary-operand-type + parameters = dict( + image_shape=image_shape, + extra_shape=extra_shape, + mask=mask, + trajectory=trajectory, + density=density, + sensitivities=sensitivities, + phase=phase, + fft_norm=fft_norm, + sens_norm=sens_norm, + intensity_correction=intensity_correction, + dynamic_domain=dynamic_domain, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + dtype=dtype, + name=name) + super().__init__(dtype=dtype, + is_non_singular=is_non_singular, + is_self_adjoint=is_self_adjoint, + is_positive_definite=is_positive_definite, + is_square=is_square, + name=name, + parameters=parameters) + + # Set dtype. + dtype = tf.as_dtype(dtype) + if dtype not in (tf.complex64, tf.complex128): + raise ValueError( + f"`dtype` must be `complex64` or `complex128`, but got: {str(dtype)}") + + # Batch dimensions in `image_shape` and `extra_shape` are not supported. + # However, it is convenient to allow them to have batch dimensions anyway. + # This helps when this operator is used in Keras models, where all inputs + # may be automatically batched. If there are any batch dimensions, we simply + # ignore them by taking the first element. The first time this happens + # we also emit a warning. + image_shape = self._ignore_batch_dims_in_shape(image_shape, "image_shape") + extra_shape = self._ignore_batch_dims_in_shape(extra_shape, "extra_shape") + + # Set image shape, rank and extra shape. + self._image_shape_static, self._image_shape_dynamic = ( + tensor_util.static_and_dynamic_shapes_from_shape(image_shape)) + self._rank = self._image_shape_static.rank + if self._rank not in (2, 3): + raise ValueError(f"Rank must be 2 or 3, but got: {self._rank}") + self._image_axes = list(range(-self._rank, 0)) # pylint: disable=invalid-unary-operand-type + if extra_shape is None: + extra_shape = [] + self._extra_shape_static, self._extra_shape_dynamic = ( + tensor_util.static_and_dynamic_shapes_from_shape(extra_shape)) + + # Set initial batch shape, then update according to inputs. + # We include the "extra" dimensions in the batch shape for now, so that + # they are also included in the broadcasting operations below. However, + # note that the "extra" dimensions are not in fact part of the batch shape + # and they will be removed later. + self._batch_shape_static = self._extra_shape_static + self._batch_shape_dynamic = self._extra_shape_dynamic + + # Set sampling mask after checking dtype and static shape. + if mask is not None: + mask = tf.convert_to_tensor(mask) + if mask.dtype != tf.bool: + raise TypeError( + f"`mask` must have dtype `bool`, but got: {str(mask.dtype)}") + if not mask.shape[-self._rank:].is_compatible_with( + self._image_shape_static): + raise ValueError( + f"Expected the last dimensions of `mask` to be compatible with " + f"{self._image_shape_static}], but got: {mask.shape[-self._rank:]}") + self._batch_shape_static = tf.broadcast_static_shape( + self._batch_shape_static, mask.shape[:-self._rank]) + self._batch_shape_dynamic = tf.broadcast_dynamic_shape( + self._batch_shape_dynamic, tf.shape(mask)[:-self._rank]) + self._mask = mask + + # Set sampling trajectory after checking dtype and static shape. + if trajectory is not None: + if mask is not None: + raise ValueError("`mask` and `trajectory` cannot be both passed.") + trajectory = tf.convert_to_tensor(trajectory) + if trajectory.dtype != dtype.real_dtype: + raise TypeError( + f"Expected `trajectory` to have dtype `{str(dtype.real_dtype)}`, " + f"but got: {str(trajectory.dtype)}") + if trajectory.shape[-1] != self._rank: + raise ValueError( + f"Expected the last dimension of `trajectory` to be " + f"{self._rank}, but got {trajectory.shape[-1]}") + self._batch_shape_static = tf.broadcast_static_shape( + self._batch_shape_static, trajectory.shape[:-2]) + self._batch_shape_dynamic = tf.broadcast_dynamic_shape( + self._batch_shape_dynamic, tf.shape(trajectory)[:-2]) + self._trajectory = trajectory + + # Set sampling density after checking dtype and static shape. + if density is not None: + if self._trajectory is None: + raise ValueError("`density` must be passed with `trajectory`.") + density = tf.convert_to_tensor(density) + if density.dtype != dtype.real_dtype: + raise TypeError( + f"Expected `density` to have dtype `{str(dtype.real_dtype)}`, " + f"but got: {str(density.dtype)}") + if density.shape[-1] != self._trajectory.shape[-2]: + raise ValueError( + f"Expected the last dimension of `density` to be " + f"{self._trajectory.shape[-2]}, but got {density.shape[-1]}") + self._batch_shape_static = tf.broadcast_static_shape( + self._batch_shape_static, density.shape[:-1]) + self._batch_shape_dynamic = tf.broadcast_dynamic_shape( + self._batch_shape_dynamic, tf.shape(density)[:-1]) + self._density = density + + # Set sensitivity maps after checking dtype and static shape. + if sensitivities is not None: + sensitivities = tf.convert_to_tensor(sensitivities) + if sensitivities.dtype != dtype: + raise TypeError( + f"Expected `sensitivities` to have dtype `{str(dtype)}`, but got: " + f"{str(sensitivities.dtype)}") + if not sensitivities.shape[-self._rank:].is_compatible_with( + self._image_shape_static): + raise ValueError( + f"Expected the last dimensions of `sensitivities` to be " + f"compatible with {self._image_shape_static}, but got: " + f"{sensitivities.shape[-self._rank:]}") + self._batch_shape_static = tf.broadcast_static_shape( + self._batch_shape_static, + sensitivities.shape[:-(self._rank + 1)]) + self._batch_shape_dynamic = tf.broadcast_dynamic_shape( + self._batch_shape_dynamic, + tf.shape(sensitivities)[:-(self._rank + 1)]) + self._sensitivities = sensitivities + + if phase is not None: + phase = tf.convert_to_tensor(phase) + if phase.dtype != dtype.real_dtype: + raise TypeError( + f"Expected `phase` to have dtype `{str(dtype.real_dtype)}`, " + f"but got: {str(phase.dtype)}") + if not phase.shape[-self._rank:].is_compatible_with( + self._image_shape_static): + raise ValueError( + f"Expected the last dimensions of `phase` to be " + f"compatible with {self._image_shape_static}, but got: " + f"{phase.shape[-self._rank:]}") + self._batch_shape_static = tf.broadcast_static_shape( + self._batch_shape_static, phase.shape[:-self._rank]) + self._batch_shape_dynamic = tf.broadcast_dynamic_shape( + self._batch_shape_dynamic, tf.shape(phase)[:-self._rank]) + self._phase = phase + + # Set batch shapes. + extra_dims = self._extra_shape_static.rank + if extra_dims is None: + raise ValueError("rank of `extra_shape` must be known statically.") + if extra_dims > 0: + self._batch_shape_static = self._batch_shape_static[:-extra_dims] + self._batch_shape_dynamic = self._batch_shape_dynamic[:-extra_dims] + + # Save some tensors for later use during computation. The `_i_` prefix + # indicates that these tensors are for internal use. We cannot modify the + # original tensors because they are components of the composite tensor that + # represents this operator, and the overall composite tensor cannot be + # mutated in certain circumstances such as in Keras models. + self._i_mask = self._mask + self._i_trajectory = self._trajectory + self._i_density = self._density + self._i_phase = self._phase + self._i_sensitivities = self._sensitivities + + # If multicoil, add coil dimension to mask, trajectory and density. + if self._i_sensitivities is not None: + if self._i_mask is not None: + self._i_mask = tf.expand_dims(self._i_mask, axis=-(self._rank + 1)) + if self._i_trajectory is not None: + self._i_trajectory = tf.expand_dims(self._i_trajectory, axis=-3) + if self._i_density is not None: + self._i_density = tf.expand_dims(self._i_density, axis=-2) + if self._i_phase is not None: + self._i_phase = tf.expand_dims(self._i_phase, axis=-(self._rank + 1)) + + # Select masking algorithm. Options are `multiplex` and `multiply`. + # `multiply` seems faster in most cases, but this needs better profiling. + self._masking_algorithm = 'multiply' + + if self._i_mask is not None: + if self._masking_algorithm == 'multiplex': + # Preallocate zeros tensor for multiplexing. + self._i_zeros = tf.zeros(shape=tf.shape(self._i_mask), dtype=self.dtype) + elif self._masking_algorithm == 'multiply': + # Cast the mask to operator's dtype for multiplication. + self._i_mask = tf.cast(self._i_mask, dtype) + else: + raise ValueError( + f"Unknown masking algorithm: {self._masking_algorithm}") + + # Compute the density compensation weights used internally. + if self._i_density is not None: + self._i_density = tf.cast(tf.math.sqrt( + tf.math.reciprocal_no_nan(self._i_density)), dtype) + # Compute the phase modulator used internally. + if self._i_phase is not None: + self._i_phase = tf.math.exp(tf.dtypes.complex( + tf.constant(0.0, dtype=dtype.real_dtype), self._i_phase)) + + # Set normalization. + self._fft_norm = check_util.validate_enum( + fft_norm, {None, 'ortho'}, 'fft_norm') + if self._fft_norm == 'ortho': # Compute normalization factors. + self._fft_norm_factor = tf.math.reciprocal( + tf.math.sqrt(tf.cast( + tf.math.reduce_prod(self._image_shape_dynamic), dtype))) + + # Normalize coil sensitivities. + self._sens_norm = sens_norm + if self._i_sensitivities is not None and self._sens_norm: + self._i_sensitivities = math_ops.normalize_no_nan( + self._i_sensitivities, axis=-(self._rank + 1)) + + # Intensity correction. + self._intensity_correction = intensity_correction + if self._i_sensitivities is not None and self._intensity_correction: + # This is redundant if `sens_norm` is `True`. + self._intensity_weights_sqrt = tf.math.reciprocal_no_nan( + tf.math.sqrt(tf.norm(self._i_sensitivities, axis=-(self._rank + 1)))) + + # Set dynamic domain. + if dynamic_domain is not None and self._extra_shape.rank == 0: + raise ValueError( + "Argument `dynamic_domain` requires a non-scalar `extra_shape`.") + if dynamic_domain is not None: + self._dynamic_domain = check_util.validate_enum( + dynamic_domain, {'time', 'frequency'}, name='dynamic_domain') + else: + self._dynamic_domain = None + + # This variable is used by `LinearOperatorGramMRI` to disable the NUFFT. + self._skip_nufft = False + + def _transform(self, x, adjoint=False): + """Transform [batch] input `x`. + + Args: + x: A `tf.Tensor` of type `self.dtype` and shape + `[..., *self.domain_shape]` containing images, if `adjoint` is `False`, + or a `tf.Tensor` of type `self.dtype` and shape + `[..., *self.range_shape]` containing *k*-space data, if `adjoint` is + `True`. + adjoint: A `boolean` indicating whether to apply the adjoint of the + operator. + + Returns: + A `tf.Tensor` of type `self.dtype` and shape `[..., *self.range_shape]` + containing *k*-space data, if `adjoint` is `False`, or a `tf.Tensor` of + type `self.dtype` and shape `[..., *self.domain_shape]` containing + images, if `adjoint` is `True`. + + Raises: + ValueError: If the masking algorithm is invalid. + """ + if adjoint: + # Apply density compensation. + if self._i_density is not None and not self._skip_nufft: + x *= self._i_density + + # Apply adjoint Fourier operator. + if self.is_non_cartesian: # Non-Cartesian imaging, use NUFFT. + if not self._skip_nufft: + x = fft_ops.nufft(x, self._i_trajectory, + grid_shape=self._image_shape_dynamic, + transform_type='type_1', + fft_direction='backward') + if self._fft_norm is not None: + x *= self._fft_norm_factor + + else: # Cartesian imaging, use FFT. + if self._i_mask is not None: + # Apply undersampling. + if self._masking_algorithm == 'multiplex': + x = tf.where(self._i_mask, x, self._i_zeros) + elif self._masking_algorithm == 'multiply': + x *= self._i_mask + else: + raise ValueError( + f"Unknown masking algorithm: {self._masking_algorithm}") + x = fft_ops.ifftn(x, axes=self._image_axes, + norm=self._fft_norm or 'forward', shift=True) + + # Apply coil combination. + if self.is_multicoil: + x *= tf.math.conj(self._i_sensitivities) + x = tf.math.reduce_sum(x, axis=-(self._rank + 1)) + + # Maybe remove phase from image. + if self.is_phase_constrained: + x *= tf.math.conj(self._i_phase) + x = tf.cast(tf.math.real(x), self.dtype) + + # Apply intensity correction. + if self.is_multicoil and self._intensity_correction: + x *= self._intensity_weights_sqrt + + # Apply FFT along dynamic axis, if necessary. + if self.is_dynamic and self.dynamic_domain == 'frequency': + x = fft_ops.fftn(x, axes=[self.dynamic_axis], + norm='ortho', shift=True) + + else: # Forward operator. + + # Apply IFFT along dynamic axis, if necessary. + if self.is_dynamic and self.dynamic_domain == 'frequency': + x = fft_ops.ifftn(x, axes=[self.dynamic_axis], + norm='ortho', shift=True) + + # Apply intensity correction. + if self.is_multicoil and self._intensity_correction: + x *= self._intensity_weights_sqrt + + # Add phase to real-valued image if reconstruction is phase-constrained. + if self.is_phase_constrained: + x = tf.cast(tf.math.real(x), self.dtype) + x *= self._i_phase + + # Apply sensitivity modulation. + if self.is_multicoil: + x = tf.expand_dims(x, axis=-(self._rank + 1)) + x *= self._i_sensitivities + + # Apply Fourier operator. + if self.is_non_cartesian: # Non-Cartesian imaging, use NUFFT. + if not self._skip_nufft: + x = fft_ops.nufft(x, self._i_trajectory, + transform_type='type_2', + fft_direction='forward') + if self._fft_norm is not None: + x *= self._fft_norm_factor + + else: # Cartesian imaging, use FFT. + x = fft_ops.fftn(x, axes=self._image_axes, + norm=self._fft_norm or 'backward', shift=True) + if self._i_mask is not None: + # Apply undersampling. + if self._masking_algorithm == 'multiplex': + x = tf.where(self._i_mask, x, self._i_zeros) + elif self._masking_algorithm == 'multiply': + x *= self._i_mask + else: + raise ValueError( + f"Unknown masking algorithm: {self._masking_algorithm}") + + # Apply density compensation. + if self._i_density is not None and not self._skip_nufft: + x *= self._i_density + + return x + + def _preprocess(self, x, adjoint=False): + if adjoint: + if self._i_density is not None: + x *= self._i_density + else: + raise NotImplementedError( + "`_preprocess` not implemented for forward transform.") + return x + + def _postprocess(self, x, adjoint=False): + if adjoint: + # Apply temporal Fourier operator, if necessary. + if self.is_dynamic and self.dynamic_domain == 'frequency': + x = fft_ops.ifftn(x, axes=[self.dynamic_axis], + norm='ortho', shift=True) + + # Apply intensity correction, if necessary. + if self.is_multicoil and self._intensity_correction: + x *= self._intensity_weights_sqrt + else: + raise NotImplementedError( + "`_postprocess` not implemented for forward transform.") + return x + + def _domain_shape(self): + """Returns the static shape of the domain space of this operator.""" + return self._extra_shape_static.concatenate(self._image_shape_static) + + def _domain_shape_tensor(self): + """Returns the dynamic shape of the domain space of this operator.""" + return tf.concat([self._extra_shape_dynamic, self._image_shape_dynamic], 0) + + def _range_shape(self): + """Returns the shape of the range space of this operator.""" + if self.is_cartesian: + range_shape = self._image_shape_static.as_list() + else: + range_shape = [self._trajectory.shape[-2]] + if self.is_multicoil: + range_shape = [self.num_coils] + range_shape + return self._extra_shape_static.concatenate(range_shape) + + def _range_shape_tensor(self): + if self.is_cartesian: + range_shape = self._image_shape_dynamic + else: + range_shape = [tf.shape(self._trajectory)[-2]] + if self.is_multicoil: + range_shape = tf.concat([[self.num_coils_tensor()], range_shape], 0) + return tf.concat([self._extra_shape_dynamic, range_shape], 0) + + def _batch_shape(self): + """Returns the static batch shape of this operator.""" + return self._batch_shape_static + + def _batch_shape_tensor(self): + """Returns the dynamic batch shape of this operator.""" + return self._batch_shape_dynamic + + @property + def image_shape(self): + """The image shape.""" + return self._image_shape_static + + def image_shape_tensor(self): + """The image shape as a tensor.""" + return self._image_shape_dynamic + + @property + def rank(self): + """The number of spatial dimensions. + + Returns: + An `int`, typically 2 or 3. + """ + return self._rank + + @property + def mask(self): + """The sampling mask. + + Returns: + A boolean `tf.Tensor` of shape `batch_shape + extra_shape + image_shape`, + or `None` if the operator is fully sampled or non-Cartesian. + """ + return self._mask + + @property + def trajectory(self): + """The k-space trajectory. + + Returns: + A real `tf.Tensor` of shape `batch_shape + extra_shape + [samples, rank]`, + or `None` if the operator is Cartesian. + """ + return self._trajectory + + @property + def density(self): + """The sampling density. + + Returns: + A real `tf.Tensor` of shape `batch_shape + extra_shape + [samples]`, + or `None` if the operator is Cartesian or has unknown sampling density. + """ + return self._density + + @property + def is_cartesian(self): + """Whether this is a Cartesian MRI operator.""" + return self._trajectory is None + + @property + def is_non_cartesian(self): + """Whether this is a non-Cartesian MRI operator.""" + return self._trajectory is not None + + @property + def is_multicoil(self): + """Whether this is a multicoil MRI operator.""" + return self._sensitivities is not None + + @property + def is_phase_constrained(self): + """Whether this is a phase-constrained MRI operator.""" + return self._phase is not None + + @property + def is_dynamic(self): + """Whether this is a dynamic MRI operator.""" + return self._dynamic_domain is not None + + @property + def dynamic_domain(self): + """The dynamic domain of this operator.""" + return self._dynamic_domain + + @property + def dynamic_axis(self): + """The dynamic axis of this operator.""" + return -(self._rank + 1) if self.is_dynamic else None + + @property + def num_coils(self): + """The number of coils, computed statically.""" + if self._sensitivities is None: + return None + return self._sensitivities.shape[-(self._rank + 1)] + + def num_coils_tensor(self): + """The number of coils, computed dynamically.""" + if self._sensitivities is None: + return tf.convert_to_tensor(-1, dtype=tf.int32) + return tf.shape(self._sensitivities)[-(self._rank + 1)] + + def _ignore_batch_dims_in_shape(self, shape, argname): + if shape is None: + return None + shape = tf.convert_to_tensor(shape, dtype=tf.int32) + if shape.shape.rank == 2: + warned = _WARNED_IGNORED_BATCH_DIMENSIONS.get(argname, False) + if not warned: + _WARNED_IGNORED_BATCH_DIMENSIONS[argname] = True + warnings.warn( + f"Operator {self.name} got a batched `{argname}` argument. " + f"It is not possible to process images with " + f"different shapes in the same batch. " + f"If the input batch has more than one element, " + f"only the first shape will be used. " + f"It is up to you to verify if this behavior is correct.") + return tf.ensure_shape(shape[0], shape.shape[1:]) + return shape + + @property + def _composite_tensor_fields(self): + return ("image_shape", + "extra_shape", + "mask", + "trajectory", + "density", + "sensitivities", + "phase", + "fft_norm", + "sens_norm", + "intensity_correction", + "dynamic_domain", + "dtype") + + @property + def _composite_tensor_prefer_static_fields(self): + return ("image_shape", "extra_shape") + + +@api_util.export("linalg.LinearOperatorGramMRI") +class LinearOperatorGramMRI(LinearOperatorMRI): # pylint: disable=abstract-method + """Linear operator representing the Gram matrix of an MRI measurement system. + + If $A$ is a `tfmri.linalg.LinearOperatorMRI`, then this ooperator + represents the matrix $G = A^H A$. + + In certain circumstances, this operator may be able to apply the matrix + $G$ more efficiently than the composition $G = A^H A$ using + `tfmri.linalg.LinearOperatorMRI` objects. + + Args: + image_shape: A 1D integer `tf.Tensor`. The shape of the images + that this operator acts on. Must have length 2 or 3. + extra_shape: An optional 1D integer `tf.Tensor`. Additional + dimensions that should be included within the operator domain. Note that + `extra_shape` is not needed to reconstruct independent batches of images. + However, it is useful when this operator is used as part of a + reconstruction that performs computation along non-spatial dimensions, + e.g. for temporal regularization. Defaults to `None`. + mask: An optional `tf.Tensor` of type `tf.bool`. The sampling mask. Must + have shape `[..., *S]`, where `S` is the `image_shape` and `...` is + the batch shape, which can have any number of dimensions. If `mask` is + passed, this operator represents an undersampled MRI operator. + trajectory: An optional `tf.Tensor` of type `float32` or `float64`. Must + have shape `[..., M, N]`, where `N` is the rank (number of spatial + dimensions), `M` is the number of samples in the encoded space and `...` + is the batch shape, which can have any number of dimensions. If + `trajectory` is passed, this operator represents a non-Cartesian MRI + operator. + density: An optional `tf.Tensor` of type `float32` or `float64`. The + sampling densities. Must have shape `[..., M]`, where `M` is the number of + samples and `...` is the batch shape, which can have any number of + dimensions. This input is only relevant for non-Cartesian MRI operators. + If passed, the non-Cartesian operator will include sampling density + compensation. If `None`, the operator will not perform sampling density + compensation. + sensitivities: An optional `tf.Tensor` of type `complex64` or `complex128`. + The coil sensitivity maps. Must have shape `[..., C, *S]`, where `S` + is the `image_shape`, `C` is the number of coils and `...` is the batch + shape, which can have any number of dimensions. + phase: An optional `tf.Tensor` of type `float32` or `float64`. A phase + estimate for the image. If provided, this operator will be + phase-constrained. + fft_norm: FFT normalization mode. Must be `None` (no normalization) + or `'ortho'`. Defaults to `'ortho'`. + sens_norm: A `boolean`. Whether to normalize coil sensitivities. Defaults to + `True`. + dynamic_domain: A `str`. The domain of the dynamic dimension, if present. + Must be one of `'time'` or `'frequency'`. May only be provided together + with a non-scalar `extra_shape`. The dynamic dimension is the last + dimension of `extra_shape`. The `'time'` mode (default) should be + used for regular dynamic reconstruction. The `'frequency'` mode should be + used for reconstruction in x-f space. + toeplitz_nufft: A `boolean`. If `True`, uses the Toeplitz approach [5] + to compute $F^H F x$, where $F$ is the non-uniform Fourier + operator. If `False`, the same operation is performed using the standard + NUFFT operation. The Toeplitz approach might be faster than the direct + approach but is slightly less accurate. This argument is only relevant + for non-Cartesian reconstruction and will be ignored for Cartesian + problems. + dtype: A `tf.dtypes.DType`. The dtype of this operator. Must be `complex64` + or `complex128`. Defaults to `complex64`. + name: An optional `str`. The name of this operator. + """ + def __init__(self, + image_shape, + extra_shape=None, + mask=None, + trajectory=None, + density=None, + sensitivities=None, + phase=None, + fft_norm='ortho', + sens_norm=True, + dynamic_domain=None, + toeplitz_nufft=False, + dtype=tf.complex64, + name="LinearOperatorGramMRI"): + super().__init__( + image_shape, + extra_shape=extra_shape, + mask=mask, + trajectory=trajectory, + density=density, + sensitivities=sensitivities, + phase=phase, + fft_norm=fft_norm, + sens_norm=sens_norm, + dynamic_domain=dynamic_domain, + dtype=dtype, + name=name + ) + + self.toeplitz_nufft = toeplitz_nufft + if self.toeplitz_nufft and self.is_non_cartesian: + # Create a Gram NUFFT operator with Toeplitz embedding. + self._linop_gram_nufft = linear_operator_nufft.LinearOperatorGramNUFFT( + image_shape, trajectory=self._trajectory, density=self._density, + norm=fft_norm, toeplitz=True) + # Disable NUFFT computation on base class. The NUFFT will instead be + # performed by the Gram NUFFT operator. + self._skip_nufft = True + + def _transform(self, x, adjoint=False): + x = super()._transform(x) + if self.toeplitz_nufft: + x = self._linop_gram_nufft.transform(x) + x = super()._transform(x, adjoint=True) + return x + + def _range_shape(self): + return self._domain_shape() + + def _range_shape_tensor(self): + return self._domain_shape_tensor() diff --git a/tensorflow_mri/python/linalg/linear_operator_mri_test.py b/tensorflow_mri/python/linalg/linear_operator_mri_test.py new file mode 100755 index 00000000..7cc12a28 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_mri_test.py @@ -0,0 +1,214 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_mri`.""" +# pylint: disable=missing-class-docstring,missing-function-docstring + +from absl.testing import parameterized +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator_mri +from tensorflow_mri.python.ops import fft_ops +from tensorflow_mri.python.ops import image_ops +from tensorflow_mri.python.ops import traj_ops +from tensorflow_mri.python.util import test_util + + +class LinearOperatorMRITest(test_util.TestCase): + """Tests for MRI linear operator.""" + def test_fft(self): + """Test FFT operator.""" + # Test init. + linop = linear_operator_mri.LinearOperatorMRI([2, 2], fft_norm=None) + + # Test matvec. + signal = tf.constant([1, 2, 4, 4], dtype=tf.complex64) + expected = [-1, 5, 1, 11] + result = tf.linalg.matvec(linop, signal) + self.assertAllClose(expected, result) + + # Test domain shape. + self.assertIsInstance(linop.domain_shape, tf.TensorShape) + self.assertAllEqual([2, 2], linop.domain_shape) + self.assertAllEqual([2, 2], linop.domain_shape_tensor()) + + # Test range shape. + self.assertIsInstance(linop.range_shape, tf.TensorShape) + self.assertAllEqual([2, 2], linop.range_shape) + self.assertAllEqual([2, 2], linop.range_shape_tensor()) + + # Test batch shape. + self.assertIsInstance(linop.batch_shape, tf.TensorShape) + self.assertAllEqual([], linop.batch_shape) + self.assertAllEqual([], linop.batch_shape_tensor()) + + def test_fft_with_mask(self): + """Test FFT operator with mask.""" + # Test init. + linop = linear_operator_mri.LinearOperatorMRI( + [2, 2], mask=[[False, False], [True, True]], fft_norm=None) + + # Test matvec. + signal = tf.constant([1, 2, 4, 4], dtype=tf.complex64) + expected = [0, 0, 1, 11] + result = tf.linalg.matvec(linop, signal) + self.assertAllClose(expected, result) + + # Test domain shape. + self.assertIsInstance(linop.domain_shape, tf.TensorShape) + self.assertAllEqual([2, 2], linop.domain_shape) + self.assertAllEqual([2, 2], linop.domain_shape_tensor()) + + # Test range shape. + self.assertIsInstance(linop.range_shape, tf.TensorShape) + self.assertAllEqual([2, 2], linop.range_shape) + self.assertAllEqual([2, 2], linop.range_shape_tensor()) + + # Test batch shape. + self.assertIsInstance(linop.batch_shape, tf.TensorShape) + self.assertAllEqual([], linop.batch_shape) + self.assertAllEqual([], linop.batch_shape_tensor()) + + def test_fft_with_batch_mask(self): + """Test FFT operator with batch mask.""" + # Test init. + linop = linear_operator_mri.LinearOperatorMRI( + [2, 2], mask=[[[True, True], [False, False]], + [[False, False], [True, True]], + [[False, True], [True, False]]], fft_norm=None) + + # Test matvec. + signal = tf.constant([1, 2, 4, 4], dtype=tf.complex64) + expected = [[-1, 5, 0, 0], [0, 0, 1, 11], [0, 5, 1, 0]] + result = tf.linalg.matvec(linop, signal) + self.assertAllClose(expected, result) + + # Test domain shape. + self.assertIsInstance(linop.domain_shape, tf.TensorShape) + self.assertAllEqual([2, 2], linop.domain_shape) + self.assertAllEqual([2, 2], linop.domain_shape_tensor()) + + # Test range shape. + self.assertIsInstance(linop.range_shape, tf.TensorShape) + self.assertAllEqual([2, 2], linop.range_shape) + self.assertAllEqual([2, 2], linop.range_shape_tensor()) + + # Test batch shape. + self.assertIsInstance(linop.batch_shape, tf.TensorShape) + self.assertAllEqual([3], linop.batch_shape) + self.assertAllEqual([3], linop.batch_shape_tensor()) + + def test_fft_norm(self): + """Test FFT normalization.""" + linop = linear_operator_mri.LinearOperatorMRI([2, 2], fft_norm='ortho') + x = tf.constant([1 + 2j, 2 - 2j, -1 - 6j, 3 + 4j], dtype=tf.complex64) + # With norm='ortho', subsequent application of the operator and its adjoint + # should not scale the input. + y = tf.linalg.matvec(linop.H, tf.linalg.matvec(linop, x)) + self.assertAllClose(x, y) + + def test_nufft_with_sensitivities(self): + resolution = 128 + image_shape = [resolution, resolution] + num_coils = 4 + image, sensitivities = image_ops.phantom( + shape=image_shape, num_coils=num_coils, dtype=tf.complex64, + return_sensitivities=True) + image = image_ops.phantom(shape=image_shape, dtype=tf.complex64) + trajectory = traj_ops.radial_trajectory(resolution, resolution // 2 + 1, + flatten_encoding_dims=True) + density = traj_ops.radial_density(resolution, resolution // 2 + 1, + flatten_encoding_dims=True) + + linop = linear_operator_mri.LinearOperatorMRI( + image_shape, trajectory=trajectory, density=density, + sensitivities=sensitivities) + + # Test shapes. + expected_domain_shape = image_shape + self.assertAllClose(expected_domain_shape, linop.domain_shape) + self.assertAllClose(expected_domain_shape, linop.domain_shape_tensor()) + expected_range_shape = [num_coils, (2 * resolution) * (resolution // 2 + 1)] + self.assertAllClose(expected_range_shape, linop.range_shape) + self.assertAllClose(expected_range_shape, linop.range_shape_tensor()) + + # Test forward. + weights = tf.cast(tf.math.sqrt(tf.math.reciprocal_no_nan(density)), + tf.complex64) + norm = tf.math.sqrt(tf.cast(tf.math.reduce_prod(image_shape), tf.complex64)) + expected = fft_ops.nufft(image * sensitivities, trajectory) * weights / norm + kspace = linop.transform(image) + self.assertAllClose(expected, kspace) + + # Test adjoint. + expected = tf.math.reduce_sum( + fft_ops.nufft( + kspace * weights, trajectory, grid_shape=image_shape, + transform_type='type_1', fft_direction='backward') / norm * + tf.math.conj(sensitivities), axis=-3) + recon = linop.transform(kspace, adjoint=True) + self.assertAllClose(expected, recon) + + + +class LinearOperatorGramMRITest(test_util.TestCase): + @parameterized.product(batch=[False, True], extra=[False, True], + toeplitz_nufft=[False, True]) + def test_general(self, batch, extra, toeplitz_nufft): + resolution = 128 + image_shape = [resolution, resolution] + num_coils = 4 + image, sensitivities = image_ops.phantom( + shape=image_shape, num_coils=num_coils, dtype=tf.complex64, + return_sensitivities=True) + image = image_ops.phantom(shape=image_shape, dtype=tf.complex64) + trajectory = traj_ops.radial_trajectory(resolution, resolution // 2 + 1, + flatten_encoding_dims=True) + density = traj_ops.radial_density(resolution, resolution // 2 + 1, + flatten_encoding_dims=True) + if batch: + image = tf.stack([image, image * 2]) + if extra: + extra_shape = [2] + else: + extra_shape = None + else: + extra_shape = None + + linop = linear_operator_mri.LinearOperatorMRI( + image_shape, extra_shape=extra_shape, + trajectory=trajectory, density=density, + sensitivities=sensitivities) + linop_gram = linear_operator_mri.LinearOperatorGramMRI( + image_shape, extra_shape=extra_shape, + trajectory=trajectory, density=density, + sensitivities=sensitivities, toeplitz_nufft=toeplitz_nufft) + + # Test shapes. + expected_domain_shape = image_shape + if extra_shape is not None: + expected_domain_shape = extra_shape + image_shape + self.assertAllClose(expected_domain_shape, linop_gram.domain_shape) + self.assertAllClose(expected_domain_shape, linop_gram.domain_shape_tensor()) + self.assertAllClose(expected_domain_shape, linop_gram.range_shape) + self.assertAllClose(expected_domain_shape, linop_gram.range_shape_tensor()) + + # Test transform. + expected = linop.transform(linop.transform(image), adjoint=True) + self.assertAllClose(expected, linop_gram.transform(image), + rtol=1e-4, atol=1e-4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/linalg/linear_operator_nufft.py b/tensorflow_mri/python/linalg/linear_operator_nufft.py new file mode 100644 index 00000000..0875eab3 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_nufft.py @@ -0,0 +1,504 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Linear algebra operations. + +This module contains linear operators and solvers. +""" + +import tensorflow as tf + +from tensorflow_mri.python.ops import array_ops +from tensorflow_mri.python.ops import fft_ops +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import check_util +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.util import tensor_util + + +@api_util.export("linalg.LinearOperatorNUFFT") +class LinearOperatorNUFFT(linear_operator.LinearOperator): # pylint: disable=abstract-method + """Linear operator acting like a nonuniform DFT matrix. + + Args: + domain_shape: A 1D integer `tf.Tensor`. The domain shape of this + operator. This is usually the shape of the image but may include + additional dimensions. + trajectory: A `tf.Tensor` of type `float32` or `float64`. Contains the + sampling locations or *k*-space trajectory. Must have shape + `[..., M, N]`, where `N` is the rank (number of dimensions), `M` is + the number of samples and `...` is the batch shape, which can have any + number of dimensions. + density: A `tf.Tensor` of type `float32` or `float64`. Contains the + sampling density at each point in `trajectory`. Must have shape + `[..., M]`, where `M` is the number of samples and `...` is the batch + shape, which can have any number of dimensions. Defaults to `None`, in + which case the density is assumed to be 1.0 in all locations. + norm: A `str`. The FFT normalization mode. Must be `None` (no normalization) + or `'ortho'`. + name: An optional `str`. The name of this operator. + + Notes: + In MRI, sampling density compensation is typically performed during the + adjoint transform. However, in order to maintain certain properties of the + linear operator, this operator applies the compensation orthogonally, i.e., + it scales the data by the square root of `density` in both forward and + adjoint transforms. If you are using this operator to compute the adjoint + and wish to apply the full compensation, you can do so via the + `preprocess` method. + + Example: + >>> # Create some data. + >>> image_shape = (128, 128) + >>> image = tfmri.image.phantom(shape=image_shape, dtype=tf.complex64) + >>> trajectory = tfmri.sampling.radial_trajectory( + ... base_resolution=128, views=129, flatten_encoding_dims=True) + >>> density = tfmri.sampling.radial_density( + ... base_resolution=128, views=129, flatten_encoding_dims=True) + >>> # Create a NUFFT operator. + >>> linop = tfmri.linalg.LinearOperatorNUFFT( + ... image_shape, trajectory=trajectory, density=density) + >>> # Create k-space. + >>> kspace = tfmri.signal.nufft(image, trajectory) + >>> # This reconstructs the image applying only partial compensation + >>> # (square root of weights). + >>> image = linop.transform(kspace, adjoint=True) + >>> # This reconstructs the image with full compensation. + >>> image = linop.transform( + ... linop.preprocess(kspace, adjoint=True), adjoint=True) + + """ + def __init__(self, + domain_shape, + trajectory, + density=None, + norm='ortho', + name="LinearOperatorNUFFT"): + + parameters = dict( + domain_shape=domain_shape, + trajectory=trajectory, + norm=norm, + name=name + ) + + # Get domain shapes. + self._domain_shape_static, self._domain_shape_dynamic = ( + tensor_util.static_and_dynamic_shapes_from_shape(domain_shape)) + + # Validate the remaining inputs. + self.trajectory = check_util.validate_tensor_dtype( + tf.convert_to_tensor(trajectory), 'floating', 'trajectory') + self.norm = check_util.validate_enum(norm, {None, 'ortho'}, 'norm') + + # We infer the operation's rank from the trajectory. + self._rank_static = self.trajectory.shape[-1] + self._rank_dynamic = tf.shape(self.trajectory)[-1] + # The domain rank is >= the operation rank. + domain_rank_static = self._domain_shape_static.rank + domain_rank_dynamic = tf.shape(self._domain_shape_dynamic)[0] + # The difference between this operation's rank and the domain rank is the + # number of extra dims. + extra_dims_static = domain_rank_static - self._rank_static + extra_dims_dynamic = domain_rank_dynamic - self._rank_dynamic + + # The grid shape are the last `rank` dimensions of domain_shape. We don't + # need the static grid shape. + self._grid_shape = self._domain_shape_dynamic[-self._rank_dynamic:] + + # We need to do some work to figure out the batch shapes. This operator + # could have a batch shape, if the trajectory has a batch shape. However, + # we allow the user to include one or more batch dimensions in the domain + # shape, if they so wish. Therefore, not all batch dimensions in the + # trajectory are necessarily part of the batch shape. + + # The total number of dimensions in `trajectory` is equal to + # `batch_dims + extra_dims + 2`. + # Compute the true batch shape (i.e., the batch dimensions that are + # NOT included in the domain shape). + batch_dims_dynamic = tf.rank(self.trajectory) - extra_dims_dynamic - 2 + if (self.trajectory.shape.rank is not None and + extra_dims_static is not None): + # We know the total number of dimensions in `trajectory` and we know + # the number of extra dims, so we can compute the number of batch dims + # statically. + batch_dims_static = self.trajectory.shape.rank - extra_dims_static - 2 + else: + # We are missing at least some information, so the number of batch + # dimensions is unknown. + batch_dims_static = None + + self._batch_shape_dynamic = tf.shape(self.trajectory)[:batch_dims_dynamic] + if batch_dims_static is not None: + self._batch_shape_static = self.trajectory.shape[:batch_dims_static] + else: + self._batch_shape_static = tf.TensorShape(None) + + # Compute the "extra" shape. This shape includes those dimensions which + # are not part of the NUFFT (e.g., they are effectively batch dimensions), + # but which are included in the domain shape rather than in the batch shape. + extra_shape_dynamic = self._domain_shape_dynamic[:-self._rank_dynamic] + if self._rank_static is not None: + extra_shape_static = self._domain_shape_static[:-self._rank_static] + else: + extra_shape_static = tf.TensorShape(None) + + # Check that the "extra" shape in `domain_shape` and `trajectory` are + # compatible for broadcasting. + shape1, shape2 = extra_shape_static, self.trajectory.shape[:-2] + try: + tf.broadcast_static_shape(shape1, shape2) + except ValueError as err: + raise ValueError( + f"The \"batch\" shapes in `domain_shape` and `trajectory` are not " + f"compatible for broadcasting: {shape1} vs {shape2}") from err + + # Compute the range shape. + self._range_shape_dynamic = tf.concat( + [extra_shape_dynamic, tf.shape(self.trajectory)[-2:-1]], 0) + self._range_shape_static = extra_shape_static.concatenate( + self.trajectory.shape[-2:-1]) + + # Statically check that density can be broadcasted with trajectory. + if density is not None: + try: + tf.broadcast_static_shape(self.trajectory.shape[:-1], density.shape) + except ValueError as err: + raise ValueError( + f"The \"batch\" shapes in `trajectory` and `density` are not " + f"compatible for broadcasting: {self.trajectory.shape[:-1]} vs " + f"{density.shape}") from err + self.density = tf.convert_to_tensor(density) + self.weights = tf.math.reciprocal_no_nan(self.density) + self._weights_sqrt = tf.cast( + tf.math.sqrt(self.weights), + tensor_util.get_complex_dtype(self.trajectory.dtype)) + else: + self.density = None + self.weights = None + + super().__init__(tensor_util.get_complex_dtype(self.trajectory.dtype), + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name=name, + parameters=parameters) + + # Compute normalization factors. + if self.norm == 'ortho': + norm_factor = tf.math.reciprocal( + tf.math.sqrt(tf.cast(tf.math.reduce_prod(self._grid_shape), + self.dtype))) + self._norm_factor_forward = norm_factor + self._norm_factor_adjoint = norm_factor + + def _transform(self, x, adjoint=False): + if adjoint: + if self.density is not None: + x *= self._weights_sqrt + x = fft_ops.nufft(x, self.trajectory, + grid_shape=self._grid_shape, + transform_type='type_1', + fft_direction='backward') + if self.norm is not None: + x *= self._norm_factor_adjoint + else: + x = fft_ops.nufft(x, self.trajectory, + transform_type='type_2', + fft_direction='forward') + if self.norm is not None: + x *= self._norm_factor_forward + if self.density is not None: + x *= self._weights_sqrt + return x + + def _preprocess(self, x, adjoint=False): + if adjoint: + if self.density is not None: + x *= self._weights_sqrt + else: + raise NotImplementedError( + "_preprocess not implemented for forward transform.") + return x + + def _postprocess(self, x, adjoint=False): + if adjoint: + pass # nothing to do + else: + raise NotImplementedError( + "_postprocess not implemented for forward transform.") + return x + + def _domain_shape(self): + return self._domain_shape_static + + def _domain_shape_tensor(self): + return self._domain_shape_dynamic + + def _range_shape(self): + return self._range_shape_static + + def _range_shape_tensor(self): + return self._range_shape_dynamic + + def _batch_shape(self): + return self._batch_shape_static + + def _batch_shape_tensor(self): + return self._batch_shape_dynamic + + @property + def rank(self): + return self._rank_static + + def rank_tensor(self): + return self._rank_dynamic + + +@api_util.export("linalg.LinearOperatorGramNUFFT") +class LinearOperatorGramNUFFT(LinearOperatorNUFFT): # pylint: disable=abstract-method + """Linear operator acting like the Gram matrix of an NUFFT operator. + + If $F$ is a `tfmri.linalg.LinearOperatorNUFFT`, then this operator + applies $F^H F$. This operator is self-adjoint. + + Args: + domain_shape: A 1D integer `tf.Tensor`. The domain shape of this + operator. This is usually the shape of the image but may include + additional dimensions. + trajectory: A `tf.Tensor` of type `float32` or `float64`. Contains the + sampling locations or *k*-space trajectory. Must have shape + `[..., M, N]`, where `N` is the rank (number of dimensions), `M` is + the number of samples and `...` is the batch shape, which can have any + number of dimensions. + density: A `tf.Tensor` of type `float32` or `float64`. Contains the + sampling density at each point in `trajectory`. Must have shape + `[..., M]`, where `M` is the number of samples and `...` is the batch + shape, which can have any number of dimensions. Defaults to `None`, in + which case the density is assumed to be 1.0 in all locations. + norm: A `str`. The FFT normalization mode. Must be `None` (no normalization) + or `'ortho'`. + toeplitz: A `boolean`. If `True`, uses the Toeplitz approach [1] + to compute $F^H F x$, where $F$ is the NUFFT operator. + If `False`, the same operation is performed using the standard + NUFFT operation. The Toeplitz approach might be faster than the direct + approach but is slightly less accurate. This argument is only relevant + for non-Cartesian reconstruction and will be ignored for Cartesian + problems. + name: An optional `str`. The name of this operator. + + References: + 1. Fessler, J. A., Lee, S., Olafsson, V. T., Shi, H. R., & Noll, D. C. + (2005). Toeplitz-based iterative image reconstruction for MRI with + correction for magnetic field inhomogeneity. IEEE Transactions on Signal + Processing, 53(9), 3393-3402. + """ + def __init__(self, + domain_shape, + trajectory, + density=None, + norm='ortho', + toeplitz=False, + name="LinearOperatorNUFFT"): + super().__init__( + domain_shape=domain_shape, + trajectory=trajectory, + density=density, + norm=norm, + name=name + ) + + self.toeplitz = toeplitz + if self.toeplitz: + # Compute the FFT shift for adjoint NUFFT computation. + self._fft_shift = tf.cast(self._grid_shape // 2, self.dtype.real_dtype) + # Compute the Toeplitz kernel. + self._toeplitz_kernel = self._compute_toeplitz_kernel() + # Kernel shape (without batch dimensions). + self._kernel_shape = tf.shape(self._toeplitz_kernel)[-self.rank_tensor():] + + def _transform(self, x, adjoint=False): # pylint: disable=unused-argument + """Applies this linear operator.""" + # This operator is self-adjoint, so `adjoint` arg is unused. + if self.toeplitz: + # Using specialized Toeplitz implementation. + return self._transform_toeplitz(x) + # Using standard NUFFT implementation. + return super()._transform(super()._transform(x), adjoint=True) + + def _transform_toeplitz(self, x): + """Applies this linear operator using the Toeplitz approach.""" + input_shape = tf.shape(x) + fft_axes = tf.range(-self.rank_tensor(), 0) + x = fft_ops.fftn(x, axes=fft_axes, shape=self._kernel_shape) + x *= self._toeplitz_kernel + x = fft_ops.ifftn(x, axes=fft_axes) + x = tf.slice(x, tf.zeros([tf.rank(x)], dtype=tf.int32), input_shape) + return x + + def _compute_toeplitz_kernel(self): + """Computes the kernel for the Toeplitz approach.""" + trajectory = self.trajectory + weights = self.weights + if self.rank is None: + raise NotImplementedError( + f"The rank of {self.name} must be known statically.") + + if weights is None: + # If no weights were passed, use ones. + weights = tf.ones(tf.shape(trajectory)[:-1], dtype=self.dtype.real_dtype) + # Cast weights to complex dtype. + weights = tf.cast(tf.math.sqrt(weights), self.dtype) + + # Compute N-D kernel recursively. Begin with last axis. + last_axis = self.rank - 1 + kernel = self._compute_kernel_recursive(trajectory, weights, last_axis) + + # Make sure that the kernel is symmetric/Hermitian/self-adjoint. + kernel = self._enforce_kernel_symmetry(kernel) + + # Additional normalization by sqrt(2 ** rank). This is required because + # we are using FFTs with twice the length of the original image. + if self.norm == 'ortho': + kernel *= tf.cast(tf.math.sqrt(2.0 ** self.rank), kernel.dtype) + + # Put the kernel in Fourier space. + fft_axes = list(range(-self.rank, 0)) + fft_norm = self.norm or "backward" + return fft_ops.fftn(kernel, axes=fft_axes, norm=fft_norm) + + def _compute_kernel_recursive(self, trajectory, weights, axis): + """Recursively computes the kernel for the Toeplitz approach. + + This function works by computing the two halves of the kernel along each + axis. The "left" half is computed using the input trajectory. The "right" + half is computed using the trajectory flipped along the current axis, and + then reversed. Then the two halves are concatenated, with a block of zeros + inserted in between. If there is more than one axis, the process is repeated + recursively for each axis. + + This function calls the adjoint NUFFT 2 ** N times, where N is the number + of dimensions. NOTE: this could be optimized to 2 ** (N - 1) calls. + + Args: + trajectory: A `tf.Tensor` containing the current *k*-space trajectory. + weights: A `tf.Tensor` containing the current density compensation + weights. + axis: An `int` denoting the current axis. + + Returns: + A `tf.Tensor` containing the kernel. + + Raises: + NotImplementedError: If the rank of the operator is not known statically. + """ + # Account for the batch dimensions. We do not need to do the recursion + # for these. + batch_dims = self.batch_shape.rank + if batch_dims is None: + raise NotImplementedError( + f"The number of batch dimensions of {self.name} must be known " + f"statically.") + # The current axis without the batch dimensions. + image_axis = axis + batch_dims + if axis == 0: + # Outer-most axis. Compute left half, then use Hermitian symmetry to + # compute right half. + # TODO(jmontalt): there should be a way to compute the NUFFT only once. + kernel_left = self._nufft_adjoint(weights, trajectory) + flippings = tf.tensor_scatter_nd_update( + tf.ones([self.rank_tensor()]), [[axis]], [-1]) + kernel_right = self._nufft_adjoint(weights, trajectory * flippings) + else: + # We still have two or more axes to process. Compute left and right kernels + # by calling this function recursively. We call ourselves twice, first + # with current frequencies, then with negated frequencies along current + # axes. + kernel_left = self._compute_kernel_recursive( + trajectory, weights, axis - 1) + flippings = tf.tensor_scatter_nd_update( + tf.ones([self.rank_tensor()]), [[axis]], [-1]) + kernel_right = self._compute_kernel_recursive( + trajectory * flippings, weights, axis - 1) + + # Remove zero frequency and reverse. + kernel_right = tf.reverse(array_ops.slice_along_axis( + kernel_right, image_axis, 1, tf.shape(kernel_right)[image_axis] - 1), + [image_axis]) + + # Create block of zeros to be inserted between the left and right halves of + # the kernel. + zeros_shape = tf.concat([ + tf.shape(kernel_left)[:image_axis], [1], + tf.shape(kernel_left)[(image_axis + 1):]], 0) + zeros = tf.zeros(zeros_shape, dtype=kernel_left.dtype) + + # Concatenate the left and right halves of kernel, with a block of zeros in + # the middle. + kernel = tf.concat([kernel_left, zeros, kernel_right], image_axis) + return kernel + + def _nufft_adjoint(self, x, trajectory=None): + """Applies the adjoint NUFFT operator. + + We use this instead of `super()._transform(x, adjoint=True)` because we + need to be able to change the trajectory and to apply an FFT shift. + + Args: + x: A `tf.Tensor` containing the input data (typically the weights or + ones). + trajectory: A `tf.Tensor` containing the *k*-space trajectory, which + may have been flipped and therefore different from the original. If + `None`, the original trajectory is used. + + Returns: + A `tf.Tensor` containing the result of the adjoint NUFFT. + """ + # Apply FFT shift. + x *= tf.math.exp(tf.dtypes.complex( + tf.constant(0, dtype=self.dtype.real_dtype), + tf.math.reduce_sum(trajectory * self._fft_shift, -1))) + # Temporarily update trajectory. + if trajectory is not None: + temp = self.trajectory + self.trajectory = trajectory + x = super()._transform(x, adjoint=True) + if trajectory is not None: + self.trajectory = temp + return x + + def _enforce_kernel_symmetry(self, kernel): + """Enforces Hermitian symmetry on an input kernel. + + Args: + kernel: A `tf.Tensor`. An approximately Hermitian kernel. + + Returns: + A Hermitian-symmetric kernel. + """ + kernel_axes = list(range(-self.rank, 0)) + reversed_kernel = tf.roll( + tf.reverse(kernel, kernel_axes), + shift=tf.ones([tf.size(kernel_axes)], dtype=tf.int32), + axis=kernel_axes) + return (kernel + tf.math.conj(reversed_kernel)) / 2 + + def _range_shape(self): + # Override the NUFFT operator's range shape. The range shape for this + # operator is the same as the domain shape. + return self._domain_shape() + + def _range_shape_tensor(self): + return self._domain_shape_tensor() diff --git a/tensorflow_mri/python/linalg/linear_operator_nufft_test.py b/tensorflow_mri/python/linalg/linear_operator_nufft_test.py new file mode 100755 index 00000000..8f50d9e4 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_nufft_test.py @@ -0,0 +1,249 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_nufft`.""" +# pylint: disable=missing-class-docstring,missing-function-docstring + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow_mri.python.geometry import rotation_2d +from tensorflow_mri.python.linalg import linear_operator_nufft +from tensorflow_mri.python.ops import fft_ops +from tensorflow_mri.python.ops import image_ops +from tensorflow_mri.python.ops import traj_ops +from tensorflow_mri.python.util import test_util + + +class LinearOperatorNUFFTTest(test_util.TestCase): + @parameterized.named_parameters( + ("normalized", "ortho"), + ("unnormalized", None) + ) + def test_general(self, norm): + shape = [8, 12] + n_points = 100 + rank = 2 + rng = np.random.default_rng() + traj = rng.uniform(low=-np.pi, high=np.pi, size=(n_points, rank)) + traj = traj.astype(np.float32) + linop = linear_operator_nufft.LinearOperatorNUFFT(shape, traj, norm=norm) + + self.assertIsInstance(linop.domain_shape, tf.TensorShape) + self.assertIsInstance(linop.domain_shape_tensor(), tf.Tensor) + self.assertIsInstance(linop.range_shape, tf.TensorShape) + self.assertIsInstance(linop.range_shape_tensor(), tf.Tensor) + self.assertIsInstance(linop.batch_shape, tf.TensorShape) + self.assertIsInstance(linop.batch_shape_tensor(), tf.Tensor) + self.assertAllClose(shape, linop.domain_shape) + self.assertAllClose(shape, linop.domain_shape_tensor()) + self.assertAllClose([n_points], linop.range_shape) + self.assertAllClose([n_points], linop.range_shape_tensor()) + self.assertAllClose([], linop.batch_shape) + self.assertAllClose([], linop.batch_shape_tensor()) + + # Check forward. + x = (rng.uniform(size=shape).astype(np.float32) + + rng.uniform(size=shape).astype(np.float32) * 1j) + expected_forward = fft_ops.nufft(x, traj) + if norm: + expected_forward /= np.sqrt(np.prod(shape)) + result_forward = linop.transform(x) + self.assertAllClose(expected_forward, result_forward, rtol=1e-5, atol=1e-5) + + # Check adjoint. + expected_adjoint = fft_ops.nufft(result_forward, traj, grid_shape=shape, + transform_type="type_1", + fft_direction="backward") + if norm: + expected_adjoint /= np.sqrt(np.prod(shape)) + result_adjoint = linop.transform(result_forward, adjoint=True) + self.assertAllClose(expected_adjoint, result_adjoint, rtol=1e-5, atol=1e-5) + + + @parameterized.named_parameters( + ("normalized", "ortho"), + ("unnormalized", None) + ) + def test_with_batch_dim(self, norm): + shape = [8, 12] + n_points = 100 + batch_size = 4 + traj_shape = [batch_size, n_points] + rank = 2 + rng = np.random.default_rng() + traj = rng.uniform(low=-np.pi, high=np.pi, size=(*traj_shape, rank)) + traj = traj.astype(np.float32) + linop = linear_operator_nufft.LinearOperatorNUFFT(shape, traj, norm=norm) + + self.assertIsInstance(linop.domain_shape, tf.TensorShape) + self.assertIsInstance(linop.domain_shape_tensor(), tf.Tensor) + self.assertIsInstance(linop.range_shape, tf.TensorShape) + self.assertIsInstance(linop.range_shape_tensor(), tf.Tensor) + self.assertIsInstance(linop.batch_shape, tf.TensorShape) + self.assertIsInstance(linop.batch_shape_tensor(), tf.Tensor) + self.assertAllClose(shape, linop.domain_shape) + self.assertAllClose(shape, linop.domain_shape_tensor()) + self.assertAllClose([n_points], linop.range_shape) + self.assertAllClose([n_points], linop.range_shape_tensor()) + self.assertAllClose([batch_size], linop.batch_shape) + self.assertAllClose([batch_size], linop.batch_shape_tensor()) + + # Check forward. + x = (rng.uniform(size=shape).astype(np.float32) + + rng.uniform(size=shape).astype(np.float32) * 1j) + expected_forward = fft_ops.nufft(x, traj) + if norm: + expected_forward /= np.sqrt(np.prod(shape)) + result_forward = linop.transform(x) + self.assertAllClose(expected_forward, result_forward, rtol=1e-5, atol=1e-5) + + # Check adjoint. + expected_adjoint = fft_ops.nufft(result_forward, traj, grid_shape=shape, + transform_type="type_1", + fft_direction="backward") + if norm: + expected_adjoint /= np.sqrt(np.prod(shape)) + result_adjoint = linop.transform(result_forward, adjoint=True) + self.assertAllClose(expected_adjoint, result_adjoint, rtol=1e-5, atol=1e-5) + + + @parameterized.named_parameters( + ("normalized", "ortho"), + ("unnormalized", None) + ) + def test_with_extra_dim(self, norm): + shape = [8, 12] + n_points = 100 + batch_size = 4 + traj_shape = [batch_size, n_points] + rank = 2 + rng = np.random.default_rng() + traj = rng.uniform(low=-np.pi, high=np.pi, size=(*traj_shape, rank)) + traj = traj.astype(np.float32) + linop = linear_operator_nufft.LinearOperatorNUFFT( + [batch_size, *shape], traj, norm=norm) + + self.assertIsInstance(linop.domain_shape, tf.TensorShape) + self.assertIsInstance(linop.domain_shape_tensor(), tf.Tensor) + self.assertIsInstance(linop.range_shape, tf.TensorShape) + self.assertIsInstance(linop.range_shape_tensor(), tf.Tensor) + self.assertIsInstance(linop.batch_shape, tf.TensorShape) + self.assertIsInstance(linop.batch_shape_tensor(), tf.Tensor) + self.assertAllClose([batch_size, *shape], linop.domain_shape) + self.assertAllClose([batch_size, *shape], linop.domain_shape_tensor()) + self.assertAllClose([batch_size, n_points], linop.range_shape) + self.assertAllClose([batch_size, n_points], linop.range_shape_tensor()) + self.assertAllClose([], linop.batch_shape) + self.assertAllClose([], linop.batch_shape_tensor()) + + # Check forward. + x = (rng.uniform(size=[batch_size, *shape]).astype(np.float32) + + rng.uniform(size=[batch_size, *shape]).astype(np.float32) * 1j) + expected_forward = fft_ops.nufft(x, traj) + if norm: + expected_forward /= np.sqrt(np.prod(shape)) + result_forward = linop.transform(x) + self.assertAllClose(expected_forward, result_forward, rtol=1e-5, atol=1e-5) + + # Check adjoint. + expected_adjoint = fft_ops.nufft(result_forward, traj, grid_shape=shape, + transform_type="type_1", + fft_direction="backward") + if norm: + expected_adjoint /= np.sqrt(np.prod(shape)) + result_adjoint = linop.transform(result_forward, adjoint=True) + self.assertAllClose(expected_adjoint, result_adjoint, rtol=1e-5, atol=1e-5) + + + def test_with_density(self): + image_shape = (128, 128) + image = image_ops.phantom(shape=image_shape, dtype=tf.complex64) + trajectory = traj_ops.radial_trajectory( + 128, 128, flatten_encoding_dims=True) + density = traj_ops.radial_density( + 128, 128, flatten_encoding_dims=True) + weights = tf.cast(tf.math.sqrt(tf.math.reciprocal_no_nan(density)), + tf.complex64) + + linop = linear_operator_nufft.LinearOperatorNUFFT( + image_shape, trajectory=trajectory) + linop_d = linear_operator_nufft.LinearOperatorNUFFT( + image_shape, trajectory=trajectory, density=density) + + # Test forward. + kspace = linop.transform(image) + kspace_d = linop_d.transform(image) + self.assertAllClose(kspace * weights, kspace_d) + + # Test adjoint and preprocess function. + recon = linop.transform( + linop.preprocess(kspace, adjoint=True) * weights * weights, + adjoint=True) + recon_d1 = linop_d.transform(kspace_d, adjoint=True) + recon_d2 = linop_d.transform(linop_d.preprocess(kspace, adjoint=True), + adjoint=True) + self.assertAllClose(recon, recon_d1) + self.assertAllClose(recon, recon_d2) + + +class LinearOperatorGramNUFFTTest(test_util.TestCase): + @parameterized.product( + density=[False, True], + norm=[None, 'ortho'], + toeplitz=[False, True], + batch=[False, True] + ) + def test_general(self, density, norm, toeplitz, batch): + with tf.device('/cpu:0'): + image_shape = (128, 128) + image = image_ops.phantom(shape=image_shape, dtype=tf.complex64) + trajectory = traj_ops.radial_trajectory( + 128, 129, flatten_encoding_dims=True) + if density is True: + density = traj_ops.radial_density( + 128, 129, flatten_encoding_dims=True) + else: + density = None + + # If testing batches, create new inputs to generate a batch. + if batch: + image = tf.stack([image, image * 0.5]) + trajectory = tf.stack([ + trajectory, + rotation_2d.Rotation2D.from_euler([np.pi / 2]).rotate(trajectory)]) + if density is not None: + density = tf.stack([density, density]) + + linop = linear_operator_nufft.LinearOperatorNUFFT( + image_shape, trajectory=trajectory, density=density, norm=norm) + linop_gram = linear_operator_nufft.LinearOperatorGramNUFFT( + image_shape, trajectory=trajectory, density=density, norm=norm, + toeplitz=toeplitz) + + recon = linop.transform(linop.transform(image), adjoint=True) + recon_gram = linop_gram.transform(image) + + if norm is None: + # Reduce the magnitude of these values to avoid the need to use a large + # tolerance. + recon /= tf.cast(tf.math.reduce_prod(image_shape), tf.complex64) + recon_gram /= tf.cast(tf.math.reduce_prod(image_shape), tf.complex64) + + self.assertAllClose(recon, recon_gram, rtol=1e-4, atol=1e-4) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/linalg/linear_operator_scaled_identity_test.py b/tensorflow_mri/python/linalg/linear_operator_scaled_identity_test.py new file mode 100644 index 00000000..333f904b --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_scaled_identity_test.py @@ -0,0 +1,15 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_scaled_identity`.""" diff --git a/tensorflow_mri/python/util/linalg_imaging_test.py b/tensorflow_mri/python/linalg/linear_operator_test.py similarity index 55% rename from tensorflow_mri/python/util/linalg_imaging_test.py rename to tensorflow_mri/python/linalg/linear_operator_test.py index bab6fbcb..6627206a 100644 --- a/tensorflow_mri/python/util/linalg_imaging_test.py +++ b/tensorflow_mri/python/linalg/linear_operator_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,16 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests for module `util.linalg_imaging`.""" +"""Tests for module `linear_operator`.""" # pylint: disable=missing-class-docstring,missing-function-docstring import tensorflow as tf -from tensorflow_mri.python.util import linalg_imaging +from tensorflow_mri.python.linalg import linear_operator from tensorflow_mri.python.util import test_util -class LinearOperatorAppendColumn(linalg_imaging.LinalgImagingMixin, # pylint: disable=abstract-method +class LinearOperatorAppendColumn(linear_operator.LinearOperatorMixin, # pylint: disable=abstract-method tf.linalg.LinearOperator): """Linear operator which appends a column of zeros to the input. @@ -50,8 +50,8 @@ def _range_shape(self): return self._range_shape_value -class LinalgImagingMixin(test_util.TestCase): - """Tests for `linalg_ops.LinalgImagingMixin`.""" +class LinearOperatorMixin(test_util.TestCase): + """Tests for `LinearOperatorMixin`.""" @classmethod def setUpClass(cls): # Test shapes. @@ -115,7 +115,7 @@ def test_matmul_operator(self): def test_adjoint(self): """Test `adjoint` method.""" self.assertIsInstance(self.linop.adjoint(), - linalg_imaging.LinalgImagingMixin) + linear_operator.LinearOperatorMixin) self.assertAllClose(self.linop.adjoint() @ self.y_col, self.x_col) self.assertAllClose(self.linop.adjoint().domain_shape, self.range_shape) self.assertAllClose(self.linop.adjoint().range_shape, self.domain_shape) @@ -126,7 +126,7 @@ def test_adjoint(self): def test_adjoint_property(self): """Test `H` property.""" - self.assertIsInstance(self.linop.H, linalg_imaging.LinalgImagingMixin) + self.assertIsInstance(self.linop.H, linear_operator.LinearOperatorMixin) self.assertAllClose(self.linop.H @ self.y_col, self.x_col) self.assertAllClose(self.linop.H.domain_shape, self.range_shape) self.assertAllClose(self.linop.H.range_shape, self.domain_shape) @@ -145,85 +145,3 @@ def test_unsupported_matmul(self): tf.linalg.matmul(self.linop, invalid_x) with self.assertRaisesRegex(ValueError, message): self.linop @ invalid_x # pylint: disable=pointless-statement - - -class LinearOperatorDiagTest(test_util.TestCase): - """Tests for `linalg_imaging.LinearOperatorDiag`.""" - def test_transform(self): - """Test `transform` method.""" - diag = tf.constant([[1., 2.], [3., 4.]]) - diag_linop = linalg_imaging.LinearOperatorDiag(diag, rank=2) - x = tf.constant([[2., 2.], [2., 2.]]) - self.assertAllClose([[2., 4.], [6., 8.]], diag_linop.transform(x)) - - def test_transform_adjoint(self): - """Test `transform` method with adjoint.""" - diag = tf.constant([[1., 2.], [3., 4.]]) - diag_linop = linalg_imaging.LinearOperatorDiag(diag, rank=2) - x = tf.constant([[2., 2.], [2., 2.]]) - self.assertAllClose([[2., 4.], [6., 8.]], - diag_linop.transform(x, adjoint=True)) - - def test_transform_complex(self): - """Test `transform` method with complex values.""" - diag = tf.constant([[1. + 1.j, 2. + 2.j], [3. + 3.j, 4. + 4.j]], - dtype=tf.complex64) - diag_linop = linalg_imaging.LinearOperatorDiag(diag, rank=2) - x = tf.constant([[2., 2.], [2., 2.]], dtype=tf.complex64) - self.assertAllClose([[2. + 2.j, 4. + 4.j], [6. + 6.j, 8. + 8.j]], - diag_linop.transform(x)) - - def test_transform_adjoint_complex(self): - """Test `transform` method with adjoint and complex values.""" - diag = tf.constant([[1. + 1.j, 2. + 2.j], [3. + 3.j, 4. + 4.j]], - dtype=tf.complex64) - diag_linop = linalg_imaging.LinearOperatorDiag(diag, rank=2) - x = tf.constant([[2., 2.], [2., 2.]], dtype=tf.complex64) - self.assertAllClose([[2. - 2.j, 4. - 4.j], [6. - 6.j, 8. - 8.j]], - diag_linop.transform(x, adjoint=True)) - - def test_shapes(self): - """Test shapes.""" - diag = tf.constant([[1., 2.], [3., 4.]]) - diag_linop = linalg_imaging.LinearOperatorDiag(diag, rank=2) - self.assertIsInstance(diag_linop.domain_shape, tf.TensorShape) - self.assertIsInstance(diag_linop.range_shape, tf.TensorShape) - self.assertAllEqual([2, 2], diag_linop.domain_shape) - self.assertAllEqual([2, 2], diag_linop.range_shape) - - def test_tensor_shapes(self): - """Test tensor shapes.""" - diag = tf.constant([[1., 2.], [3., 4.]]) - diag_linop = linalg_imaging.LinearOperatorDiag(diag, rank=2) - self.assertIsInstance(diag_linop.domain_shape_tensor(), tf.Tensor) - self.assertIsInstance(diag_linop.range_shape_tensor(), tf.Tensor) - self.assertAllEqual([2, 2], diag_linop.domain_shape_tensor()) - self.assertAllEqual([2, 2], diag_linop.range_shape_tensor()) - - def test_batch_shapes(self): - """Test batch shapes.""" - diag = tf.constant([[1., 2., 3.], [4., 5., 6.]]) - diag_linop = linalg_imaging.LinearOperatorDiag(diag, rank=1) - self.assertIsInstance(diag_linop.domain_shape, tf.TensorShape) - self.assertIsInstance(diag_linop.range_shape, tf.TensorShape) - self.assertIsInstance(diag_linop.batch_shape, tf.TensorShape) - self.assertAllEqual([3], diag_linop.domain_shape) - self.assertAllEqual([3], diag_linop.range_shape) - self.assertAllEqual([2], diag_linop.batch_shape) - - def test_tensor_batch_shapes(self): - """Test tensor batch shapes.""" - diag = tf.constant([[1., 2., 3.], [4., 5., 6.]]) - diag_linop = linalg_imaging.LinearOperatorDiag(diag, rank=1) - self.assertIsInstance(diag_linop.domain_shape_tensor(), tf.Tensor) - self.assertIsInstance(diag_linop.range_shape_tensor(), tf.Tensor) - self.assertIsInstance(diag_linop.batch_shape_tensor(), tf.Tensor) - self.assertAllEqual([3], diag_linop.domain_shape) - self.assertAllEqual([3], diag_linop.range_shape) - self.assertAllEqual([2], diag_linop.batch_shape) - - def test_name(self): - """Test names.""" - diag = tf.constant([[1., 2.], [3., 4.]]) - diag_linop = linalg_imaging.LinearOperatorDiag(diag, rank=2) - self.assertEqual("LinearOperatorDiag", diag_linop.name) diff --git a/tensorflow_mri/python/linalg/linear_operator_wavelet.py b/tensorflow_mri/python/linalg/linear_operator_wavelet.py new file mode 100644 index 00000000..57d81092 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_wavelet.py @@ -0,0 +1,153 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Wavelet linear operator.""" + +import functools + +import tensorflow as tf + +from tensorflow_mri.python.ops import array_ops +from tensorflow_mri.python.ops import wavelet_ops +from tensorflow_mri.python.util import api_util +from tensorflow_mri.python.util import check_util +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.util import tensor_util + + +@api_util.export("linalg.LinearOperatorWavelet") +class LinearOperatorWavelet(linear_operator.LinearOperator): # pylint: disable=abstract-method + """Linear operator representing a wavelet decomposition matrix. + + Args: + domain_shape: A 1D `tf.Tensor` or a `list` of `int`. The domain shape of + this linear operator. + wavelet: A `str` or a `pywt.Wavelet`_, or a `list` thereof. When passed a + `list`, different wavelets are applied along each axis in `axes`. + mode: A `str`. The padding or signal extension mode. Must be one of the + values supported by `tfmri.signal.wavedec`. Defaults to `'symmetric'`. + level: An `int` >= 0. The decomposition level. If `None` (default), + the maximum useful level of decomposition will be used (see + `tfmri.signal.max_wavelet_level`). + axes: A `list` of `int`. The axes over which the DWT is computed. Axes refer + only to domain dimensions without regard for the batch dimensions. + Defaults to `None` (all domain dimensions). + dtype: A `tf.dtypes.DType`. The data type for this operator. Defaults to + `float32`. + name: A `str`. A name for this operator. + """ + def __init__(self, + domain_shape, + wavelet, + mode='symmetric', + level=None, + axes=None, + dtype=tf.dtypes.float32, + name="LinearOperatorWavelet"): + # Set parameters. + parameters = dict( + domain_shape=domain_shape, + wavelet=wavelet, + mode=mode, + level=level, + axes=axes, + dtype=dtype, + name=name + ) + + # Get the static and dynamic shapes and save them for later use. + self._domain_shape_static, self._domain_shape_dynamic = ( + tensor_util.static_and_dynamic_shapes_from_shape(domain_shape)) + # At the moment, the wavelet implementation relies on shapes being + # statically known. + if not self._domain_shape_static.is_fully_defined(): + raise ValueError(f"static `domain_shape` must be fully defined, " + f"but got {self._domain_shape_static}") + static_rank = self._domain_shape_static.rank + + # Set arguments. + self.wavelet = wavelet + self.mode = mode + self.level = level + self.axes = check_util.validate_static_axes(axes, + rank=static_rank, + min_length=1, + canonicalize="negative", + must_be_unique=True, + scalar_to_list=True, + none_means_all=True) + + # Compute the coefficient slices needed for adjoint (wavelet + # reconstruction). + x = tf.ensure_shape(tf.zeros(self._domain_shape_dynamic, dtype=dtype), + self._domain_shape_static) + x = wavelet_ops.wavedec(x, wavelet=self.wavelet, mode=self.mode, + level=self.level, axes=self.axes) + y, self._coeff_slices = wavelet_ops.coeffs_to_tensor(x, axes=self.axes) + + # Get the range shape. + self._range_shape_static = y.shape + self._range_shape_dynamic = tf.shape(y) + + # Call base class. + super().__init__(dtype, + is_non_singular=None, + is_self_adjoint=None, + is_positive_definite=None, + is_square=None, + name=name, + parameters=parameters) + + def _transform(self, x, adjoint=False): + # While `wavedec` and `waverec` can transform only a subset of axes (and + # thus theoretically support batches), there is a caveat due to the + # `coeff_slices` object required by `waverec`. This object contains + # information relevant to a specific batch shape. While we could recompute + # this object for every input batch shape, it is easier to just process + # each batch independently. + if x.shape.rank is not None and self._domain_shape_static.rank is not None: + # Rank of input and this operator are known statically, so we can infer + # the number of batch dimensions statically too. + batch_dims = x.shape.rank - self._domain_shape_static.rank + else: + # We need to obtain the number of batch dimensions dynamically. + batch_dims = tf.rank(x) - tf.shape(self._domain_shape_dynamic)[0] + # Transform each batch. + x = array_ops.map_fn( + functools.partial(self._transform_batch, adjoint=adjoint), + x, batch_dims=batch_dims) + return x + + def _transform_batch(self, x, adjoint=False): + if adjoint: + x = wavelet_ops.tensor_to_coeffs(x, self._coeff_slices) + x = wavelet_ops.waverec(x, wavelet=self.wavelet, mode=self.mode, + axes=self.axes) + else: + x = wavelet_ops.wavedec(x, wavelet=self.wavelet, mode=self.mode, + level=self.level, axes=self.axes) + x, _ = wavelet_ops.coeffs_to_tensor(x, axes=self.axes) + return x + + def _domain_shape(self): + return self._domain_shape_static + + def _range_shape(self): + return self._range_shape_static + + def _domain_shape_tensor(self): + return self._domain_shape_dynamic + + def _range_shape_tensor(self): + return self._range_shape_dynamic diff --git a/tensorflow_mri/python/linalg/linear_operator_wavelet_test.py b/tensorflow_mri/python/linalg/linear_operator_wavelet_test.py new file mode 100755 index 00000000..a0ecee87 --- /dev/null +++ b/tensorflow_mri/python/linalg/linear_operator_wavelet_test.py @@ -0,0 +1,87 @@ +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for module `linear_operator_wavelet`.""" +# pylint: disable=missing-class-docstring,missing-function-docstring + +from absl.testing import parameterized +import numpy as np +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator_wavelet +from tensorflow_mri.python.ops import wavelet_ops +from tensorflow_mri.python.util import test_util + + +class LinearOperatorWaveletTest(test_util.TestCase): + @parameterized.named_parameters( + # name, wavelet, level, axes, domain_shape, range_shape + ("test0", "haar", None, None, [6, 6], [7, 7]), + ("test1", "haar", 1, None, [6, 6], [6, 6]), + ("test2", "haar", None, -1, [6, 6], [6, 7]), + ("test3", "haar", None, [-1], [6, 6], [6, 7]) + ) + def test_general(self, wavelet, level, axes, domain_shape, range_shape): + # Instantiate. + linop = linear_operator_wavelet.LinearOperatorWavelet( + domain_shape, wavelet=wavelet, level=level, axes=axes) + + # Example data. + data = np.arange(np.prod(domain_shape)).reshape(domain_shape) + data = data.astype("float32") + + # Forward and adjoint. + expected_forward, coeff_slices = wavelet_ops.coeffs_to_tensor( + wavelet_ops.wavedec(data, wavelet=wavelet, level=level, axes=axes), + axes=axes) + expected_adjoint = wavelet_ops.waverec( + wavelet_ops.tensor_to_coeffs(expected_forward, coeff_slices), + wavelet=wavelet, axes=axes) + + # Test shapes. + self.assertAllClose(domain_shape, linop.domain_shape) + self.assertAllClose(domain_shape, linop.domain_shape_tensor()) + self.assertAllClose(range_shape, linop.range_shape) + self.assertAllClose(range_shape, linop.range_shape_tensor()) + + # Test transform. + result_forward = linop.transform(data) + result_adjoint = linop.transform(result_forward, adjoint=True) + self.assertAllClose(expected_forward, result_forward) + self.assertAllClose(expected_adjoint, result_adjoint) + + def test_with_batch_inputs(self): + """Test batch shape.""" + axes = [-2, -1] + data = np.arange(4 * 8 * 8).reshape(4, 8, 8).astype("float32") + linop = linear_operator_wavelet.LinearOperatorWavelet( + (8, 8), wavelet="haar", level=1) + + # Forward and adjoint. + expected_forward, coeff_slices = wavelet_ops.coeffs_to_tensor( + wavelet_ops.wavedec(data, wavelet='haar', level=1, axes=axes), + axes=axes) + expected_adjoint = wavelet_ops.waverec( + wavelet_ops.tensor_to_coeffs(expected_forward, coeff_slices), + wavelet='haar', axes=axes) + + result_forward = linop.transform(data) + self.assertAllClose(expected_forward, result_forward) + + result_adjoint = linop.transform(result_forward, adjoint=True) + self.assertAllClose(expected_adjoint, result_adjoint) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensorflow_mri/python/losses/__init__.py b/tensorflow_mri/python/losses/__init__.py index d8986663..9629f708 100644 --- a/tensorflow_mri/python/losses/__init__.py +++ b/tensorflow_mri/python/losses/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/losses/confusion_losses.py b/tensorflow_mri/python/losses/confusion_losses.py index 0650192b..d71227b4 100644 --- a/tensorflow_mri/python/losses/confusion_losses.py +++ b/tensorflow_mri/python/losses/confusion_losses.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -228,8 +228,9 @@ class FocalTverskyLoss(ConfusionLoss): The focal Tversky loss is computed as: - .. math:: + $$ L = \left ( 1 - \frac{\mathrm{TP} + \epsilon}{\mathrm{TP} + \alpha \mathrm{FP} + \beta \mathrm{FN} + \epsilon} \right ) ^ \gamma + $$ This loss allows control over the relative importance of false positives and false negatives through the `alpha` and `beta` parameters, which may be useful @@ -244,9 +245,9 @@ class FocalTverskyLoss(ConfusionLoss): epsilon: A `float`. A smoothing factor. Defaults to 1e-5. Notes: - [1] and [2] use inverted notations for the :math:`\alpha` and :math:`\beta` + [1] and [2] use inverted notations for the $\alpha$ and $\beta$ parameters. Here we use the notation of [1]. Also note that [2] refers to - :math:`\gamma` as :math:`\frac{1}{\gamma}`. + $\gamma$ as $\frac{1}{\gamma}$. References: [1] Salehi, S. S. M., Erdogmus, D., & Gholipour, A. (2017, September). @@ -301,8 +302,9 @@ class TverskyLoss(FocalTverskyLoss): The Tversky loss is computed as: - .. math:: + $$ L = \left ( 1 - \frac{\mathrm{TP} + \epsilon}{\mathrm{TP} + \alpha \mathrm{FP} + \beta \mathrm{FN} + \epsilon} \right ) + $$ Args: alpha: A `float`. Weight given to false positives. Defaults to 0.3. @@ -339,8 +341,9 @@ class F1Loss(TverskyLoss): The F1 loss is computed as: - .. math:: + $$ L = \left ( 1 - \frac{\mathrm{TP} + \epsilon}{\mathrm{TP} + \frac{1}{2} \mathrm{FP} + \frac{1}{2} \mathrm{FN} + \epsilon} \right ) + $$ Args: epsilon: A `float`. A smoothing factor. Defaults to 1e-5. @@ -373,8 +376,9 @@ class IoULoss(TverskyLoss): The IoU loss is computed as: - .. math:: + $$ L = \left ( 1 - \frac{\mathrm{TP} + \epsilon}{\mathrm{TP} + \mathrm{FP} + \mathrm{FN} + \epsilon} \right ) + $$ Args: epsilon: A `float`. A smoothing factor. Defaults to 1e-5. diff --git a/tensorflow_mri/python/losses/confusion_losses_test.py b/tensorflow_mri/python/losses/confusion_losses_test.py index 4673df90..4a9fbd51 100755 --- a/tensorflow_mri/python/losses/confusion_losses_test.py +++ b/tensorflow_mri/python/losses/confusion_losses_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/losses/iqa_losses.py b/tensorflow_mri/python/losses/iqa_losses.py index bde0c74d..0db4a349 100644 --- a/tensorflow_mri/python/losses/iqa_losses.py +++ b/tensorflow_mri/python/losses/iqa_losses.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ from tensorflow_mri.python.ops import image_ops from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util -from tensorflow_mri.python.util import deprecation from tensorflow_mri.python.util import keras_util @@ -87,7 +86,7 @@ def get_config(self): class SSIMLoss(LossFunctionWrapperIQA): """Computes the structural similarity (SSIM) loss. - The SSIM loss is equal to :math:`1.0 - \textrm{SSIM}`. + The SSIM loss is equal to $1.0 - \textrm{SSIM}$. Args: max_val: The dynamic range of the images (i.e., the difference between @@ -111,11 +110,6 @@ class SSIMLoss(LossFunctionWrapperIQA): `(rank of inputs) - batch_dims - 1`. Defaults to `None`. `image_dims` can always be inferred if `batch_dims` was specified, so you only need to provide one of the two. - rank: An `int`. The number of spatial dimensions. Must be 2 or 3. Defaults - to `tf.rank(y_true) - 2`. In other words, if rank is not explicitly set, - `y_true` and `y_pred` should have shape `[batch, height, width, channels]` - if processing 2D images or `[batch, depth, height, width, channels]` if - processing 3D images. multichannel: A `boolean`. Whether multichannel computation is enabled. If `False`, the inputs `y_true` and `y_pred` are not expected to have a channel dimension, i.e. they should have shape @@ -130,14 +124,10 @@ class SSIMLoss(LossFunctionWrapperIQA): name: String name of the loss instance. References: - .. [1] Zhao, H., Gallo, O., Frosio, I., & Kautz, J. (2016). Loss functions + 1. Zhao, H., Gallo, O., Frosio, I., & Kautz, J. (2016). Loss functions for image restoration with neural networks. IEEE Transactions on computational imaging, 3(1), 47-57. """ - @deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) def __init__(self, max_val=None, filter_size=11, @@ -146,7 +136,6 @@ def __init__(self, k2=0.03, batch_dims=None, image_dims=None, - rank=None, multichannel=True, complex_part=None, reduction=tf.keras.losses.Reduction.AUTO, @@ -161,7 +150,6 @@ def __init__(self, k2=k2, batch_dims=batch_dims, image_dims=image_dims, - rank=rank, multichannel=multichannel, complex_part=complex_part) @@ -172,7 +160,7 @@ def __init__(self, class SSIMMultiscaleLoss(LossFunctionWrapperIQA): """Computes the multiscale structural similarity (MS-SSIM) loss. - The MS-SSIM loss is equal to :math:`1.0 - \textrm{MS-SSIM}`. + The MS-SSIM loss is equal to $1.0 - \textrm{MS-SSIM}$. Args: max_val: The dynamic range of the images (i.e., the difference between @@ -201,11 +189,6 @@ class SSIMMultiscaleLoss(LossFunctionWrapperIQA): `(rank of inputs) - batch_dims - 1`. Defaults to `None`. `image_dims` can always be inferred if `batch_dims` was specified, so you only need to provide one of the two. - rank: An `int`. The number of spatial dimensions. Must be 2 or 3. Defaults - to `tf.rank(y_true) - 2`. In other words, if rank is not explicitly set, - `y_true` and `y_pred` should have shape `[batch, height, width, channels]` - if processing 2D images or `[batch, depth, height, width, channels]` if - processing 3D images. multichannel: A `boolean`. Whether multichannel computation is enabled. If `False`, the inputs `y_true` and `y_pred` are not expected to have a channel dimension, i.e. they should have shape @@ -220,14 +203,10 @@ class SSIMMultiscaleLoss(LossFunctionWrapperIQA): name: String name of the loss instance. References: - .. [1] Zhao, H., Gallo, O., Frosio, I., & Kautz, J. (2016). Loss functions + 1. Zhao, H., Gallo, O., Frosio, I., & Kautz, J. (2016). Loss functions for image restoration with neural networks. IEEE Transactions on computational imaging, 3(1), 47-57. """ - @deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) def __init__(self, max_val=None, power_factors=image_ops._MSSSIM_WEIGHTS, @@ -237,7 +216,6 @@ def __init__(self, k2=0.03, batch_dims=None, image_dims=None, - rank=None, multichannel=True, complex_part=None, reduction=tf.keras.losses.Reduction.AUTO, @@ -253,23 +231,18 @@ def __init__(self, k2=k2, batch_dims=batch_dims, image_dims=image_dims, - rank=rank, multichannel=multichannel, complex_part=complex_part) @api_util.export("losses.ssim_loss") -@deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) @tf.keras.utils.register_keras_serializable(package="MRI") def ssim_loss(y_true, y_pred, max_val=None, filter_size=11, filter_sigma=1.5, - k1=0.01, k2=0.03, batch_dims=None, image_dims=None, rank=None): + k1=0.01, k2=0.03, batch_dims=None, image_dims=None): r"""Computes the structural similarity (SSIM) loss. - The SSIM loss is equal to :math:`1.0 - \textrm{SSIM}`. + The SSIM loss is equal to $1.0 - \textrm{SSIM}$. Args: y_true: A `Tensor`. Ground truth images. For 2D images, must have rank >= 3 @@ -305,18 +278,13 @@ def ssim_loss(y_true, y_pred, max_val=None, `(rank of inputs) - batch_dims - 1`. Defaults to `None`. `image_dims` can always be inferred if `batch_dims` was specified, so you only need to provide one of the two. - rank: An `int`. The number of spatial dimensions. Must be 2 or 3. Defaults - to `tf.rank(y_true) - 2`. In other words, if rank is not explicitly set, - `y_true` and `y_pred` should have shape `[batch, height, width, channels]` - if processing 2D images or `[batch, depth, height, width, channels]` if - processing 3D images. Returns: A `Tensor` of type `float32` and shape `batch_shape` containing an SSIM value for each image in the batch. References: - .. [1] Zhao, H., Gallo, O., Frosio, I., & Kautz, J. (2016). Loss functions + 1. Zhao, H., Gallo, O., Frosio, I., & Kautz, J. (2016). Loss functions for image restoration with neural networks. IEEE Transactions on computational imaging, 3(1), 47-57. """ @@ -327,24 +295,19 @@ def ssim_loss(y_true, y_pred, max_val=None, k1=k1, k2=k2, batch_dims=batch_dims, - image_dims=image_dims, - rank=rank) + image_dims=image_dims) @api_util.export("losses.ssim_multiscale_loss") -@deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) @tf.keras.utils.register_keras_serializable(package="MRI") def ssim_multiscale_loss(y_true, y_pred, max_val=None, power_factors=image_ops._MSSSIM_WEIGHTS, # pylint: disable=protected-access filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03, - batch_dims=None, image_dims=None, rank=None): + batch_dims=None, image_dims=None): r"""Computes the multiscale structural similarity (MS-SSIM) loss. - The MS-SSIM loss is equal to :math:`1.0 - \textrm{MS-SSIM}`. + The MS-SSIM loss is equal to $1.0 - \textrm{MS-SSIM}$. Args: y_true: A `Tensor`. Ground truth images. For 2D images, must have rank >= 3 @@ -387,18 +350,13 @@ def ssim_multiscale_loss(y_true, y_pred, max_val=None, `(rank of inputs) - batch_dims - 1`. Defaults to `None`. `image_dims` can always be inferred if `batch_dims` was specified, so you only need to provide one of the two. - rank: An `int`. The number of spatial dimensions. Must be 2 or 3. Defaults - to `tf.rank(y_true) - 2`. In other words, if rank is not explicitly set, - `y_true` and `y_pred` should have shape `[batch, height, width, channels]` - if processing 2D images or `[batch, depth, height, width, channels]` if - processing 3D images. Returns: A `Tensor` of type `float32` and shape `batch_shape` containing an SSIM value for each image in the batch. References: - .. [1] Zhao, H., Gallo, O., Frosio, I., & Kautz, J. (2016). Loss functions + 1. Zhao, H., Gallo, O., Frosio, I., & Kautz, J. (2016). Loss functions for image restoration with neural networks. IEEE Transactions on computational imaging, 3(1), 47-57. """ @@ -410,8 +368,7 @@ def ssim_multiscale_loss(y_true, y_pred, max_val=None, k1=k1, k2=k2, batch_dims=batch_dims, - image_dims=image_dims, - rank=rank) + image_dims=image_dims) # For backward compatibility. diff --git a/tensorflow_mri/python/losses/iqa_losses_test.py b/tensorflow_mri/python/losses/iqa_losses_test.py index ab2e530e..968a81e0 100755 --- a/tensorflow_mri/python/losses/iqa_losses_test.py +++ b/tensorflow_mri/python/losses/iqa_losses_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/metrics/__init__.py b/tensorflow_mri/python/metrics/__init__.py index c25c648e..896aaed3 100644 --- a/tensorflow_mri/python/metrics/__init__.py +++ b/tensorflow_mri/python/metrics/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/metrics/confusion_metrics.py b/tensorflow_mri/python/metrics/confusion_metrics.py index d20cf70e..19a05ecb 100644 --- a/tensorflow_mri/python/metrics/confusion_metrics.py +++ b/tensorflow_mri/python/metrics/confusion_metrics.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -299,8 +299,9 @@ class Accuracy(ConfusionMetric): Estimates how often predictions match labels. - .. math:: + $$ \textrm{accuracy} = \frac{\textrm{TP} + \textrm{TN}}{\textrm{TP} + \textrm{TN} + \textrm{FP} + \textrm{FN}} + $$ Args: name: String name of the metric instance. @@ -337,8 +338,9 @@ class TruePositiveRate(ConfusionMetric): The true positive rate (TPR), also called sensitivity or recall, is the proportion of correctly predicted positives among all positive instances. - .. math:: + $$ \textrm{TPR} = \frac{\textrm{TP}}{\textrm{TP} + \textrm{FN}} + $$ Args: name: String name of the metric instance. @@ -374,8 +376,9 @@ class TrueNegativeRate(ConfusionMetric): The true negative rate (TNR), also called specificity or selectivity, is the proportion of correctly predicted negatives among all negative instances. - .. math:: + $$ \textrm{TNR} = \frac{\textrm{TN}}{\textrm{TN} + \textrm{FP}} + $$ Args: name: String name of the metric instance. @@ -410,8 +413,9 @@ class PositivePredictiveValue(ConfusionMetric): The positive predictive value (PPV), also called precision, is the proportion of correctly predicted positives among all positive calls. - .. math:: + $$ \textrm{PPV} = \frac{\textrm{TP}}{\textrm{TP} + \textrm{FP}} + $$ Args: name: String name of the metric instance. @@ -446,8 +450,9 @@ class NegativePredictiveValue(ConfusionMetric): The negative predictive value (NPV) is the proportion of correctly predicted negatives among all negative calls. - .. math:: + $$ \textrm{NPV} = \frac{\textrm{TN}}{\textrm{TN} + \textrm{FN}} + $$ Args: name: String name of the metric instance. @@ -482,8 +487,9 @@ class TverskyIndex(ConfusionMetric): The Tversky index is an asymmetric similarity measure [1]_. It is a generalization of the F-beta family of scores and the IoU. - .. math:: + $$ \textrm{TI} = \frac{\textrm{TP}}{\textrm{TP} + \alpha * \textrm{FP} + \beta * \textrm{FN}} + $$ Args: alpha: A `float`. The weight given to false positives. Defaults to 0.5. @@ -492,7 +498,7 @@ class TverskyIndex(ConfusionMetric): dtype: Data type of the metric result. References: - .. [1] Tversky, A. (1977). Features of similarity. Psychological review, + 1. Tversky, A. (1977). Features of similarity. Psychological review, 84(4), 327. """ # pylint: disable=line-too-long def __init__(self, @@ -541,8 +547,9 @@ class FBetaScore(TverskyIndex): The F-beta score is the weighted harmonic mean of precision and recall. - .. math:: + $$ F_{\beta} = (1 + \beta^2) * \frac{\textrm{precision} * \textrm{precision}}{(\beta^2 \cdot \textrm{precision}) + \textrm{recall}} + $$ Args: beta: A `float`. Determines the weight of precision and recall in harmonic @@ -587,8 +594,9 @@ class F1Score(FBetaScore): The F-1 score is the harmonic mean of precision and recall. - .. math:: + $$ F_1 = 2 \cdot \frac{\textrm{precision} \cdot \textrm{recall}}{\textrm{precision} + \textrm{recall}} + $$ Args: name: String name of the metric instance. @@ -622,8 +630,9 @@ class IoU(TverskyIndex): Also known as Jaccard index. - .. math:: + $$ \textrm{IoU} = \frac{\textrm{TP}}{\textrm{TP} + \textrm{FP} + \textrm{FN}} + $$ Args: name: String name of the metric instance. diff --git a/tensorflow_mri/python/metrics/confusion_metrics_test.py b/tensorflow_mri/python/metrics/confusion_metrics_test.py index 37fd5972..c5b8dd0b 100644 --- a/tensorflow_mri/python/metrics/confusion_metrics_test.py +++ b/tensorflow_mri/python/metrics/confusion_metrics_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/metrics/iqa_metrics.py b/tensorflow_mri/python/metrics/iqa_metrics.py index c23c5090..62217ed4 100755 --- a/tensorflow_mri/python/metrics/iqa_metrics.py +++ b/tensorflow_mri/python/metrics/iqa_metrics.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ from tensorflow_mri.python.ops import image_ops from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util -from tensorflow_mri.python.util import deprecation class MeanMetricWrapperIQA(tf.keras.metrics.MeanMetricWrapper): @@ -111,11 +110,6 @@ class PSNR(MeanMetricWrapperIQA): `(rank of inputs) - batch_dims - 1`. Defaults to `None`. `image_dims` can always be inferred if `batch_dims` was specified, so you only need to provide one of the two. - rank: An `int`. The number of spatial dimensions. Must be 2 or 3. Defaults - to `tf.rank(y_true) - 2`. In other words, if rank is not explicitly set, - `y_true` and `y_pred` should have shape `[batch, height, width, channels]` - if processing 2D images or `[batch, depth, height, width, channels]` if - processing 3D images. multichannel: A `boolean`. Whether multichannel computation is enabled. If `False`, the inputs `y_true` and `y_pred` are not expected to have a channel dimension, i.e. they should have shape @@ -128,15 +122,10 @@ class PSNR(MeanMetricWrapperIQA): name: String name of the metric instance. dtype: Data type of the metric result. """ - @deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) def __init__(self, max_val=None, batch_dims=None, image_dims=None, - rank=None, multichannel=True, complex_part=None, name='psnr', @@ -147,7 +136,6 @@ def __init__(self, max_val=max_val, batch_dims=batch_dims, image_dims=image_dims, - rank=rank, multichannel=multichannel, complex_part=complex_part) @@ -205,14 +193,10 @@ class SSIM(MeanMetricWrapperIQA): dtype: Data type of the metric result. References: - .. [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). + 1. Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4), 600-612. """ - @deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) def __init__(self, max_val=None, filter_size=11, @@ -221,7 +205,6 @@ def __init__(self, k2=0.03, batch_dims=None, image_dims=None, - rank=None, multichannel=True, complex_part=None, name='ssim', @@ -237,7 +220,6 @@ def __init__(self, k2=k2, batch_dims=batch_dims, image_dims=image_dims, - rank=rank, multichannel=multichannel, complex_part=complex_part) @@ -293,15 +275,11 @@ class SSIMMultiscale(MeanMetricWrapperIQA): dtype: Data type of the metric result. References: - .. [1] Wang, Z., Simoncelli, E. P., & Bovik, A. C. (2003, November). + 1. Wang, Z., Simoncelli, E. P., & Bovik, A. C. (2003, November). Multiscale structural similarity for image quality assessment. In The Thrity-Seventh Asilomar Conference on Signals, Systems & Computers, 2003 (Vol. 2, pp. 1398-1402). Ieee. """ - @deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) def __init__(self, max_val=None, filter_size=11, @@ -310,7 +288,6 @@ def __init__(self, k2=0.03, batch_dims=None, image_dims=None, - rank=None, multichannel=True, complex_part=None, name='ms_ssim', @@ -326,7 +303,6 @@ def __init__(self, k2=k2, batch_dims=batch_dims, image_dims=image_dims, - rank=rank, multichannel=multichannel, complex_part=complex_part) diff --git a/tensorflow_mri/python/metrics/iqa_metrics_test.py b/tensorflow_mri/python/metrics/iqa_metrics_test.py index 85175dc8..9965d110 100755 --- a/tensorflow_mri/python/metrics/iqa_metrics_test.py +++ b/tensorflow_mri/python/metrics/iqa_metrics_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/models/__init__.py b/tensorflow_mri/python/models/__init__.py index c5f8e166..71f191c5 100644 --- a/tensorflow_mri/python/models/__init__.py +++ b/tensorflow_mri/python/models/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,3 +16,4 @@ from tensorflow_mri.python.models import conv_blocks from tensorflow_mri.python.models import conv_endec +from tensorflow_mri.python.models import graph_like_network diff --git a/tensorflow_mri/python/models/conv_blocks.py b/tensorflow_mri/python/models/conv_blocks.py index 417fae7f..ede6fb96 100644 --- a/tensorflow_mri/python/models/conv_blocks.py +++ b/tensorflow_mri/python/models/conv_blocks.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,44 +29,41 @@ # ============================================================================== """Convolutional neural network blocks.""" -import itertools import string import tensorflow as tf +import tensorflow_addons as tfa +from tensorflow_mri.python import activations +from tensorflow_mri.python import initializers +from tensorflow_mri.python.models import graph_like_network from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util +from tensorflow_mri.python.util import doc_util from tensorflow_mri.python.util import layer_util -CONV_BLOCK_DOC_TEMPLATE = string.Template( - """${rank}D convolutional block. - - A basic Conv + BN + Activation block. The number of convolutional layers is - determined by `filters`. BN and activation are optional. - - Args: - filters: A list of `int` numbers or an `int` number of filters. Given an - `int` input, a single convolution is applied; otherwise a series of - convolutions are applied. - kernel_size: An integer or tuple/list of `rank` integers, specifying the +ARGS = string.Template(""" + filters: A `int` or a `list` of `int`. Given an `int` input, a single + convolution is applied; otherwise a series of convolutions are applied. + kernel_size: An `int` or `list` of ${rank} `int`s, specifying the size of the convolution window. Can be a single integer to specify the same value for all spatial dimensions. - strides: An integer or tuple/list of `rank` integers, specifying the strides + strides: An `int` or a `list` of ${rank} `int`s, specifying the strides of the convolution along each spatial dimension. Can be a single integer to specify the same value for all spatial dimensions. activation: A callable or a Keras activation identifier. The activation to use in all layers. Defaults to `'relu'`. - out_activation: A callable or a Keras activation identifier. The activation + output_activation: A callable or a Keras activation identifier. The activation to use in the last layer. Defaults to `'same'`, in which case we use the same activation as in previous layers as defined by `activation`. use_bias: A `boolean`, whether the block's layers use bias vectors. Defaults to `True`. kernel_initializer: A `tf.keras.initializers.Initializer` or a Keras initializer identifier. Initializer for convolutional kernels. Defaults to - `'VarianceScaling'`. + `'variance_scaling'`. bias_initializer: A `tf.keras.initializers.Initializer` or a Keras - initializer identifier. Initializer for bias terms. Defaults to `'Zeros'`. + initializer identifier. Initializer for bias terms. Defaults to `'zeros'`. kernel_regularizer: A `tf.keras.initializers.Regularizer` or a Keras regularizer identifier. Regularizer for convolutional kernels. Defaults to `None`. @@ -75,13 +72,15 @@ use_batch_norm: If `True`, use batch normalization. Defaults to `False`. use_sync_bn: If `True`, use synchronised batch normalization. Defaults to `False`. + use_instance_norm: If `True`, use instance normalization. Defaults to + `False`. bn_momentum: A `float`. Momentum for the moving average in batch normalization. bn_epsilon: A `float`. Small float added to variance to avoid dividing by zero during batch normalization. - use_residual: A `boolean`. If `True`, the input is added to the outputs to + use_residual: A boolean. If `True`, the input is added to the outputs to create a residual learning block. Defaults to `False`. - use_dropout: A `boolean`. If `True`, a dropout layer is inserted after + use_dropout: A boolean. If `True`, a dropout layer is inserted after each activation. Defaults to `False`. dropout_rate: A `float`. The dropout rate. Only relevant if `use_dropout` is `True`. Defaults to 0.3. @@ -89,26 +88,35 @@ `'spatial'`. Standard dropout drops individual elements from the feature maps, whereas spatial dropout drops entire feature maps. Only relevant if `use_dropout` is `True`. Defaults to `'standard'`. - **kwargs: Additional keyword arguments to be passed to base class. - """) +""") + + +class ConvBlock(graph_like_network.GraphLikeNetwork): + """${rank}D convolutional block. + A basic Conv + BN + Activation + Dropout block. The number of convolutional + layers is determined by the length of `filters`. BN and activation are + optional. -class ConvBlock(tf.keras.Model): - """Convolutional block (private base class).""" + Args: + ${args} + **kwargs: Additional keyword arguments to be passed to base class. + """ def __init__(self, rank, filters, kernel_size, strides=1, activation='relu', - out_activation='same', + output_activation='same', use_bias=True, - kernel_initializer='VarianceScaling', - bias_initializer='Zeros', + kernel_initializer='variance_scaling', + bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, use_batch_norm=False, use_sync_bn=False, + use_instance_norm=False, bn_momentum=0.99, bn_epsilon=0.001, use_residual=False, @@ -117,129 +125,213 @@ def __init__(self, dropout_type='standard', **kwargs): """Create a basic convolution block.""" + conv_fn = kwargs.pop('_conv_fn', layer_util.get_nd_layer('Conv', rank)) + conv_kwargs = kwargs.pop('_conv_kwargs', {}) super().__init__(**kwargs) - self._rank = rank - self._filters = [filters] if isinstance(filters, int) else filters - self._kernel_size = kernel_size - self._strides = strides - self._activation = activation - self._out_activation = out_activation - self._use_bias = use_bias - self._kernel_initializer = kernel_initializer - self._bias_initializer = bias_initializer - self._kernel_regularizer = kernel_regularizer - self._bias_regularizer = bias_regularizer - self._use_batch_norm = use_batch_norm - self._use_sync_bn = use_sync_bn - self._bn_momentum = bn_momentum - self._bn_epsilon = bn_epsilon - self._use_residual = use_residual - self._use_dropout = use_dropout - self._dropout_rate = dropout_rate - self._dropout_type = check_util.validate_enum( + self.rank = rank + self.filters = [filters] if isinstance(filters, int) else filters + self.kernel_size = kernel_size + self.strides = strides + self.activation = activations.get(activation) + if output_activation == 'same': + self.output_activation = self.activation + else: + self.output_activation = activations.get(output_activation) + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self.use_batch_norm = use_batch_norm + self.use_sync_bn = use_sync_bn + self.use_instance_norm = use_instance_norm + self.bn_momentum = bn_momentum + self.bn_epsilon = bn_epsilon + self.use_residual = use_residual + self.use_dropout = use_dropout + self.dropout_rate = dropout_rate + self.dropout_type = check_util.validate_enum( dropout_type, {'standard', 'spatial'}, 'dropout_type') - self._num_layers = len(self._filters) - conv = layer_util.get_nd_layer('Conv', self._rank) + if use_batch_norm and use_instance_norm: + raise ValueError('Cannot use both batch and instance normalization.') - if self._use_batch_norm: - if self._use_sync_bn: + if self.use_batch_norm: + if self.use_sync_bn: bn = tf.keras.layers.experimental.SyncBatchNormalization else: bn = tf.keras.layers.BatchNormalization - if self._use_dropout: - if self._dropout_type == 'standard': + if self.use_dropout: + if self.dropout_type == 'standard': dropout = tf.keras.layers.Dropout - elif self._dropout_type == 'spatial': - dropout = layer_util.get_nd_layer('SpatialDropout', self._rank) + elif self.dropout_type == 'spatial': + dropout = layer_util.get_nd_layer('SpatialDropout', self.rank) if tf.keras.backend.image_data_format() == 'channels_last': - self._channel_axis = -1 - else: - self._channel_axis = 1 - - self._convs = [] - self._norms = [] - self._dropouts = [] - for num_filters in self._filters: - self._convs.append( - conv(filters=num_filters, - kernel_size=self._kernel_size, - strides=self._strides, - padding='same', - data_format=None, - activation=None, - use_bias=self._use_bias, - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer)) - if self._use_batch_norm: - self._norms.append( - bn(axis=self._channel_axis, - momentum=self._bn_momentum, - epsilon=self._bn_epsilon)) - if self._use_dropout: - self._dropouts.append(dropout(rate=self._dropout_rate)) - - self._activation_fn = tf.keras.activations.get(self._activation) - if self._out_activation == 'same': - self._out_activation_fn = self._activation_fn + self.channel_axis = -1 else: - self._out_activation_fn = tf.keras.activations.get(self._out_activation) + self.channel_axis = 1 - def call(self, inputs, training=None): # pylint: disable=unused-argument, missing-param-doc - """Runs forward pass on the input tensor.""" - x = inputs + conv_kwargs.update(dict( + filters=None, # To be filled during loop below. + kernel_size=self.kernel_size, + strides=self.strides, + padding='same', + data_format=None, + activation=None, + use_bias=self.use_bias, + kernel_initializer=self.kernel_initializer, + bias_initializer=self.bias_initializer, + kernel_regularizer=self.kernel_regularizer, + bias_regularizer=self.bias_regularizer, + dtype=self.dtype)) - for i, (conv, norm, dropout) in enumerate( - itertools.zip_longest(self._convs, self._norms, self._dropouts)): + self._levels = len(self.filters) + self._layers = [] + for level in range(self._levels): # Convolution. - x = conv(x) - # Batch normalization. - if self._use_batch_norm: - x = norm(x, training=training) + conv_kwargs['filters'] = self.filters[level] + self._layers.append(conv_fn(**conv_kwargs)) + # Normalization. + if self.use_batch_norm: + self._layers.append( + bn(axis=self.channel_axis, + momentum=self.bn_momentum, + epsilon=self.bn_epsilon)) + if self.use_instance_norm: + self._layers.append(tfa.layers.InstanceNormalization( + axis=self.channel_axis)) # Activation. - if i == self._num_layers - 1: # Last layer. - x = self._out_activation_fn(x) + if level == self._levels - 1: + # Last level, and `output_activation` is not the same as `activation`. + self._layers.append( + tf.keras.layers.Activation(self.output_activation)) else: - x = self._activation_fn(x) + self._layers.append( + tf.keras.layers.Activation(self.activation)) # Dropout. - if self._use_dropout: - x = dropout(x, training=training) + if self.use_dropout: + self._layers.append(dropout(rate=self.dropout_rate)) + + # Residual. + if self.use_residual: + self._add = tf.keras.layers.Add() - # Residual connection. - if self._use_residual: - x += inputs + def call(self, inputs): # pylint: disable=unused-argument + x = inputs + for layer in self._layers: + x = layer(x) + if self.use_residual: + x = self._add([x, inputs]) return x def get_config(self): """Gets layer configuration.""" config = { - 'filters': self._filters, - 'kernel_size': self._kernel_size, - 'strides': self._strides, - 'activation': self._activation, - 'out_activation': self._out_activation, - 'use_bias': self._use_bias, - 'kernel_initializer': self._kernel_initializer, - 'bias_initializer': self._bias_initializer, - 'kernel_regularizer': self._kernel_regularizer, - 'bias_regularizer': self._bias_regularizer, - 'use_batch_norm': self._use_batch_norm, - 'use_sync_bn': self._use_sync_bn, - 'bn_momentum': self._bn_momentum, - 'bn_epsilon': self._bn_epsilon, - 'use_residual': self._use_residual, - 'use_dropout': self._use_dropout, - 'dropout_rate': self._dropout_rate, - 'dropout_type': self._dropout_type + 'filters': self.filters, + 'kernel_size': self.kernel_size, + 'strides': self.strides, + 'activation': activations.serialize(self.activation), + 'output_activation': activations.serialize( + self.output_activation), + 'use_bias': self.use_bias, + 'kernel_initializer': initializers.serialize(self.kernel_initializer), + 'bias_initializer': initializers.serialize(self.bias_initializer), + 'kernel_regularizer': tf.keras.regularizers.serialize( + self.kernel_regularizer), + 'bias_regularizer': tf.keras.regularizers.serialize( + self.bias_regularizer), + 'use_batch_norm': self.use_batch_norm, + 'use_sync_bn': self.use_sync_bn, + 'use_instance_norm': self.use_instance_norm, + 'bn_momentum': self.bn_momentum, + 'bn_epsilon': self.bn_epsilon, + 'use_residual': self.use_residual, + 'use_dropout': self.use_dropout, + 'dropout_rate': self.dropout_rate, + 'dropout_type': self.dropout_type } base_config = super().get_config() return {**base_config, **config} +class ConvBlockLSTM(ConvBlock): + """${rank}D convolutional LSTM block. + + Args: + ${args} + stateful: A boolean. If `True`, the last state for each sample at index `i` + in a batch will be used as initial state for the sample of index `i` in + the following batch. Defaults to `False`. + recurrent_regularizer: A `tf.keras.initializers.Regularizer` or a Keras + regularizer identifier. The regularizer applied to the recurrent kernel. + Defaults to `None`. + """ + def __init__(self, + rank, + filters, + kernel_size, + strides=1, + activation='relu', + output_activation='same', + use_bias=True, + kernel_initializer='variance_scaling', + bias_initializer='zeros', + kernel_regularizer=None, + bias_regularizer=None, + use_batch_norm=False, + use_sync_bn=False, + use_instance_norm=False, + bn_momentum=0.99, + bn_epsilon=0.001, + use_residual=False, + use_dropout=False, + dropout_rate=0.3, + dropout_type='standard', + stateful=False, + recurrent_regularizer=None, + **kwargs): + self.stateful = stateful + self.recurrent_regularizer = tf.keras.regularizers.get( + recurrent_regularizer) + super().__init__(rank=rank, + filters=filters, + kernel_size=kernel_size, + strides=strides, + activation=activation, + output_activation=output_activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + bias_regularizer=bias_regularizer, + use_batch_norm=use_batch_norm, + use_sync_bn=use_sync_bn, + use_instance_norm=use_instance_norm, + bn_momentum=bn_momentum, + bn_epsilon=bn_epsilon, + use_residual=use_residual, + use_dropout=use_dropout, + dropout_rate=dropout_rate, + dropout_type=dropout_type, + _conv_fn=layer_util.get_nd_layer('ConvLSTM', rank), + _conv_kwargs=dict( + stateful=self.stateful, + recurrent_regularizer=self.recurrent_regularizer, + return_sequences=True), + **kwargs) + + def get_config(self): + base_config = super().get_config() + config = { + 'stateful': self.stateful, + 'recurrent_regularizer': tf.keras.regularizers.serialize( + self.recurrent_regularizer) + } + return {**base_config, **config} + + @api_util.export("models.ConvBlock1D") @tf.keras.utils.register_keras_serializable(package='MRI') class ConvBlock1D(ConvBlock): @@ -261,6 +353,48 @@ def __init__(self, *args, **kwargs): super().__init__(3, *args, **kwargs) -ConvBlock1D.__doc__ = CONV_BLOCK_DOC_TEMPLATE.substitute(rank=1) -ConvBlock2D.__doc__ = CONV_BLOCK_DOC_TEMPLATE.substitute(rank=2) -ConvBlock3D.__doc__ = CONV_BLOCK_DOC_TEMPLATE.substitute(rank=3) +@api_util.export("models.ConvBlockLSTM1D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class ConvBlockLSTM1D(ConvBlockLSTM): + def __init__(self, *args, **kwargs): + super().__init__(1, *args, **kwargs) + + +@api_util.export("models.ConvBlockLSTM2D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class ConvBlockLSTM2D(ConvBlockLSTM): + def __init__(self, *args, **kwargs): + super().__init__(2, *args, **kwargs) + + +@api_util.export("models.ConvBlockLSTM3D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class ConvBlockLSTM3D(ConvBlockLSTM): + def __init__(self, *args, **kwargs): + super().__init__(3, *args, **kwargs) + + +ConvBlock1D.__doc__ = string.Template(ConvBlock.__doc__).substitute( + rank=1, args=ARGS.substitute(rank=1)) +ConvBlock2D.__doc__ = string.Template(ConvBlock.__doc__).substitute( + rank=2, args=ARGS.substitute(rank=2)) +ConvBlock3D.__doc__ = string.Template(ConvBlock.__doc__).substitute( + rank=3, args=ARGS.substitute(rank=3)) + + +ConvBlock1D.__signature__ = doc_util.get_nd_layer_signature(ConvBlock) +ConvBlock2D.__signature__ = doc_util.get_nd_layer_signature(ConvBlock) +ConvBlock3D.__signature__ = doc_util.get_nd_layer_signature(ConvBlock) + + +ConvBlockLSTM1D.__doc__ = string.Template(ConvBlockLSTM.__doc__).substitute( + rank=1, args=ARGS.substitute(rank=1)) +ConvBlockLSTM2D.__doc__ = string.Template(ConvBlockLSTM.__doc__).substitute( + rank=2, args=ARGS.substitute(rank=2)) +ConvBlockLSTM3D.__doc__ = string.Template(ConvBlockLSTM.__doc__).substitute( + rank=3, args=ARGS.substitute(rank=3)) + + +ConvBlockLSTM1D.__signature__ = doc_util.get_nd_layer_signature(ConvBlockLSTM) +ConvBlockLSTM2D.__signature__ = doc_util.get_nd_layer_signature(ConvBlockLSTM) +ConvBlockLSTM3D.__signature__ = doc_util.get_nd_layer_signature(ConvBlockLSTM) diff --git a/tensorflow_mri/python/models/conv_blocks_test.py b/tensorflow_mri/python/models/conv_blocks_test.py index 27942a5e..15c60c07 100644 --- a/tensorflow_mri/python/models/conv_blocks_test.py +++ b/tensorflow_mri/python/models/conv_blocks_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +17,8 @@ from absl.testing import parameterized import tensorflow as tf +from tensorflow_mri.python.activations import complex_activations +from tensorflow_mri.python.layers import convolutional from tensorflow_mri.python.models import conv_blocks from tensorflow_mri.python.util import model_util from tensorflow_mri.python.util import test_util @@ -40,6 +42,21 @@ def test_conv_block_creation(self, rank, filters, kernel_size): # pylint: disabl self.assertAllEqual(features.shape, [1] + [128] * rank + [filters]) + def test_complex_valued(self): + """Tests complex-valued conv block.""" + inputs = tf.dtypes.complex( + tf.random.stateless_normal(shape=(2, 32, 32, 4), seed=[12, 34]), + tf.random.stateless_normal(shape=(2, 32, 32, 4), seed=[56, 78])) + + block = conv_blocks.ConvBlock2D( + filters=[6, 6], + kernel_size=3, + activation=complex_activations.complex_relu, + dtype=tf.complex64) + + result = block(inputs) + self.assertAllClose((2, 32, 32, 6), result.shape) + self.assertDTypeEqual(result, tf.complex64) def test_serialize_deserialize(self): """Test de/serialization.""" @@ -48,14 +65,15 @@ def test_serialize_deserialize(self): kernel_size=3, strides=1, activation='tanh', - out_activation='linear', + output_activation='linear', use_bias=False, - kernel_initializer='ones', - bias_initializer='ones', - kernel_regularizer='l2', - bias_regularizer='l1', + kernel_initializer={'class_name': 'Ones', 'config': {}}, + bias_initializer={'class_name': 'Ones', 'config': {}}, + kernel_regularizer=None, + bias_regularizer=None, use_batch_norm=True, use_sync_bn=True, + use_instance_norm=False, bn_momentum=0.98, bn_epsilon=0.002, use_residual=True, @@ -69,6 +87,177 @@ def test_serialize_deserialize(self): block2 = conv_blocks.ConvBlock2D.from_config(block.get_config()) self.assertAllEqual(block2.get_config(), block.get_config()) + def test_arch(self): + """Tests the architecture of the block.""" + tf.keras.backend.clear_session() + inputs = tf.keras.Input(shape=(32, 32, 4)) + model = conv_blocks.ConvBlock2D( + filters=16, kernel_size=3).functional(inputs) + + expected = [ + # name, type, output_shape, params + ('input_1', tf.keras.layers.InputLayer, [(None, 32, 32, 4)], 0), + ('conv2d', convolutional.Conv2D, (None, 32, 32, 16), 592), + ('activation', tf.keras.layers.Activation, (None, 32, 32, 16), 0) + ] + self._check_layers(expected, model.layers) + + def test_multilayer(self): + """Tests the architecture of the block with multiple layers.""" + tf.keras.backend.clear_session() + inputs = tf.keras.Input(shape=(32, 32, 4)) + model = conv_blocks.ConvBlock2D( + filters=[8, 16], kernel_size=3).functional(inputs) + + expected = [ + # name, type, output_shape, params + ('input_1', tf.keras.layers.InputLayer, [(None, 32, 32, 4)], 0), + ('conv2d', convolutional.Conv2D, (None, 32, 32, 8), 296), + ('activation', tf.keras.layers.Activation, (None, 32, 32, 8), 0), + ('conv2d_1', convolutional.Conv2D, (None, 32, 32, 16), 1168), + ('activation_1', tf.keras.layers.Activation, (None, 32, 32, 16), 0) + ] + self._check_layers(expected, model.layers) + + def test_arch_activation(self): + """Tests the architecture of the block with activation.""" + tf.keras.backend.clear_session() + inputs = tf.keras.Input(shape=(32, 32, 4)) + model = conv_blocks.ConvBlock2D( + filters=16, kernel_size=3, activation='sigmoid').functional(inputs) + + expected = [ + # name, type, output_shape, params + ('input_1', tf.keras.layers.InputLayer, [(None, 32, 32, 4)], 0), + ('conv2d', convolutional.Conv2D, (None, 32, 32, 16), 592), + ('activation', tf.keras.layers.Activation, (None, 32, 32, 16), 0) + ] + self._check_layers(expected, model.layers) + + self.assertEqual(tf.keras.activations.sigmoid, model.layers[-1].activation) + + def test_arch_output_activation(self): + """Tests the architecture of the block with output activation.""" + tf.keras.backend.clear_session() + inputs = tf.keras.Input(shape=(32, 32, 4)) + model = conv_blocks.ConvBlock2D( + filters=[8, 16], + kernel_size=5, + activation='relu', + output_activation='tanh').functional(inputs) + + expected = [ + # name, type, output_shape, params + ('input_1', tf.keras.layers.InputLayer, [(None, 32, 32, 4)], 0), + ('conv2d', convolutional.Conv2D, (None, 32, 32, 8), 808), + ('activation', tf.keras.layers.Activation, (None, 32, 32, 8), 0), + ('conv2d_1', convolutional.Conv2D, (None, 32, 32, 16), 3216), + ('activation_1', tf.keras.layers.Activation, (None, 32, 32, 16), 0) + ] + self._check_layers(expected, model.layers) + + self.assertEqual(tf.keras.activations.relu, model.layers[2].activation) + self.assertEqual(tf.keras.activations.tanh, model.layers[4].activation) + + def test_arch_batch_norm(self): + """Tests the architecture of the block with batch norm.""" + tf.keras.backend.clear_session() + inputs = tf.keras.Input(shape=(32, 32, 4)) + model = conv_blocks.ConvBlock2D( + filters=16, kernel_size=3, use_batch_norm=True).functional(inputs) + + expected = [ + # name, type, output_shape, params + ('input_1', tf.keras.layers.InputLayer, [(None, 32, 32, 4)], 0), + ('conv2d', convolutional.Conv2D, (None, 32, 32, 16), 592), + ('batch_normalization', + tf.keras.layers.BatchNormalization, (None, 32, 32, 16), 64), + ('activation', tf.keras.layers.Activation, (None, 32, 32, 16), 0) + ] + self._check_layers(expected, model.layers) + + def test_arch_dropout(self): + """Tests the architecture of the block with dropout.""" + tf.keras.backend.clear_session() + inputs = tf.keras.Input(shape=(32, 32, 4)) + model = conv_blocks.ConvBlock2D( + filters=16, kernel_size=3, use_dropout=True).functional(inputs) + + expected = [ + # name, type, output_shape, params + ('input_1', tf.keras.layers.InputLayer, [(None, 32, 32, 4)], 0), + ('conv2d', convolutional.Conv2D, (None, 32, 32, 16), 592), + ('activation', tf.keras.layers.Activation, (None, 32, 32, 16), 0), + ('dropout', tf.keras.layers.Dropout, (None, 32, 32, 16), 0) + ] + self._check_layers(expected, model.layers) + + def test_arch_lstm(self): + """Tests the architecture of the LSTM block.""" + tf.keras.backend.clear_session() + inputs = tf.keras.Input(shape=(None, 32, 32, 4)) + model = conv_blocks.ConvBlockLSTM2D( + filters=16, kernel_size=3).functional(inputs) + + expected = [ + # name, type, output_shape, params + ('input_1', tf.keras.layers.InputLayer, [(None, None, 32, 32, 4)], 0), + ('conv_lstm2d', + tf.keras.layers.ConvLSTM2D, (None, None, 32, 32, 16), 11584), + ('activation', tf.keras.layers.Activation, (None, None, 32, 32, 16), 0), + ] + self._check_layers(expected, model.layers) + + self.assertFalse(model.layers[1].stateful) + + def test_arch_lstm_stateful(self): + """Tests the architecture of the stateful LSTM block.""" + tf.keras.backend.clear_session() + inputs = tf.keras.Input(shape=(6, 32, 32, 4), batch_size=2) + model = conv_blocks.ConvBlockLSTM2D( + filters=16, kernel_size=3, stateful=True).functional(inputs) + + expected = [ + # name, type, output_shape, params + ('input_1', tf.keras.layers.InputLayer, [(2, 6, 32, 32, 4)], 0), + ('conv_lstm2d', tf.keras.layers.ConvLSTM2D, (2, 6, 32, 32, 16), 11584), + ('activation', tf.keras.layers.Activation, (2, 6, 32, 32, 16), 0), + ] + self._check_layers(expected, model.layers) + + self.assertTrue(model.layers[1].stateful) + + def test_reset_states(self): + """Tests the reset_states method.""" + tf.keras.backend.clear_session() + model = conv_blocks.ConvBlockLSTM2D( + filters=16, kernel_size=3, stateful=True) + + input_data = tf.random.stateless_normal((2, 6, 32, 32, 4), [12, 34]) + + # Test subclassed model directly. + _ = model(input_data) + model.reset_states() + + self.assertAllEqual(tf.zeros_like(model.layers[0].states), + model.layers[0].states) + self.assertTrue(model.layers[0].stateful) + + # Test functional model. + model = model.functional(tf.keras.Input(shape=(6, 32, 32, 4), batch_size=2)) + _ = model(input_data) + model.reset_states() + + self.assertAllEqual(tf.zeros_like(model.layers[1].states), + model.layers[1].states) + self.assertTrue(model.layers[1].stateful) + + def _check_layers(self, expected, actual): + actual = [ + (layer.name, type(layer), layer.output_shape, layer.count_params()) + for layer in actual] + self.assertEqual(expected, actual) + if __name__ == '__main__': tf.test.main() diff --git a/tensorflow_mri/python/models/conv_endec.py b/tensorflow_mri/python/models/conv_endec.py index 8e6dea07..95a680e7 100644 --- a/tensorflow_mri/python/models/conv_endec.py +++ b/tensorflow_mri/python/models/conv_endec.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,19 +18,20 @@ import tensorflow as tf +from tensorflow_mri.python import activations +from tensorflow_mri.python import initializers +from tensorflow_mri.python.layers import concatenate from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util +from tensorflow_mri.python.util import doc_util from tensorflow_mri.python.util import model_util # pylint: disable=cyclic-import from tensorflow_mri.python.util import layer_util -UNET_DOC_TEMPLATE = string.Template( - """${rank}D U-Net model. - - Args: +ARGS = string.Template(""" filters: A `list` of `int`. The number of filters for convolutional layers at each scale. The number of scales is inferred as `len(filters)`. - kernel_size: An integer or tuple/list of ${rank} integers, specifying the + kernel_size: An `int` or a `list` of ${rank} `int`s, specifying the size of the convolution window. Can be a single integer to specify the same value for all spatial dimensions. pool_size: The pooling size for the pooling operations. Defaults to 2. @@ -43,9 +44,9 @@ `'relu'`. kernel_initializer: A `tf.keras.initializers.Initializer` or a Keras initializer identifier. Initializer for convolutional kernels. Defaults to - `'VarianceScaling'`. + `'variance_scaling'`. bias_initializer: A `tf.keras.initializers.Initializer` or a Keras - initializer identifier. Initializer for bias terms. Defaults to `'Zeros'`. + initializer identifier. Initializer for bias terms. Defaults to `'zeros'`. kernel_regularizer: A `tf.keras.initializers.Regularizer` or a Keras regularizer identifier. Regularizer for convolutional kernels. Defaults to `None`. @@ -58,10 +59,10 @@ normalization. bn_epsilon: A `float`. Small float added to variance to avoid dividing by zero during batch normalization. - out_channels: An `int`. The number of output channels. - out_kernel_size: An `int` or a list of ${rank} `int`. The size of the + output_filters: An `int`. The number of output channels. + output_kernel_size: An `int` or a `list` of ${rank} `int`s. The size of the convolutional kernel for the output layer. Defaults to `kernel_size`. - out_activation: A callable or a Keras activation identifier. The output + output_activation: A callable or a Keras activation identifier. The output activation. Defaults to `None`. use_global_residual: A `boolean`. If `True`, adds a global residual connection to create a residual learning network. Defaults to `False`. @@ -75,21 +76,33 @@ `use_dropout` is `True`. Defaults to `'standard'`. use_tight_frame: A `boolean`. If `True`, creates a tight frame U-Net as described in [2]. Defaults to `False`. - **kwargs: Additional keyword arguments to be passed to base class. - - References: - .. [1] Ronneberger, O., Fischer, P., & Brox, T. (2015, October). U-net: - Convolutional networks for biomedical image segmentation. In International - Conference on Medical image computing and computer-assisted intervention - (pp. 234-241). Springer, Cham. - .. [2] Han, Y., & Ye, J. C. (2018). Framing U-Net via deep convolutional - framelets: Application to sparse-view CT. IEEE transactions on medical - imaging, 37(6), 1418-1429. - """) + use_resize_and_concatenate: A `boolean`. If `True`, the upsampled feature + maps are resized (by cropping) to match the shape of the incoming + skip connection prior to concatenation. This enables more flexible input + shapes. Defaults to `True`. +""") class UNet(tf.keras.Model): - """U-Net model (private base class).""" + """${rank}D U-Net model. + + Args: + ${args} + **kwargs: Additional keyword arguments to be passed to base class. + + References: + 1. Ronneberger, O., Fischer, P., & Brox, T. (2015, October). U-net: + Convolutional networks for biomedical image segmentation. In + International Conference on Medical image computing and computer-assisted + intervention (pp. 234-241). Springer, Cham. + 2. Han, Y., & Ye, J. C. (2018). Framing U-Net via deep convolutional + framelets: Application to sparse-view CT. IEEE transactions on medical + imaging, 37(6), 1418-1429. + 3. Hauptmann, A., Arridge, S., Lucka, F., Muthurangu, V., & Steeden, J. A. + (2019). Real-time cardiovascular MR with spatio-temporal artifact + suppression using deep learning-proof of concept in congenital heart + disease. Magnetic resonance in medicine, 81(2), 1143-1156. + """ def __init__(self, rank, filters, @@ -99,176 +112,215 @@ def __init__(self, use_deconv=False, activation='relu', use_bias=True, - kernel_initializer='VarianceScaling', - bias_initializer='Zeros', + kernel_initializer='variance_scaling', + bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, use_batch_norm=False, use_sync_bn=False, + use_instance_norm=False, bn_momentum=0.99, bn_epsilon=0.001, - out_channels=None, - out_kernel_size=None, - out_activation=None, + output_filters=None, + output_kernel_size=None, + output_activation=None, use_global_residual=False, use_dropout=False, dropout_rate=0.3, dropout_type='standard', use_tight_frame=False, + use_resize_and_concatenate=False, **kwargs): - """Creates a UNet model.""" + block_fn = kwargs.pop( + '_block_fn', model_util.get_nd_model('ConvBlock', rank)) + block_kwargs = kwargs.pop('_block_kwargs', {}) + is_time_distributed = kwargs.pop('_is_time_distributed', False) super().__init__(**kwargs) - self._filters = filters - self._kernel_size = kernel_size - self._pool_size = pool_size - self._rank = rank - self._block_depth = block_depth - self._use_deconv = use_deconv - self._activation = activation - self._use_bias = use_bias - self._kernel_initializer = kernel_initializer - self._bias_initializer = bias_initializer - self._kernel_regularizer = kernel_regularizer - self._bias_regularizer = bias_regularizer - self._use_batch_norm = use_batch_norm - self._use_sync_bn = use_sync_bn - self._bn_momentum = bn_momentum - self._bn_epsilon = bn_epsilon - self._out_channels = out_channels - self._out_kernel_size = out_kernel_size - self._out_activation = out_activation - self._use_global_residual = use_global_residual - self._use_dropout = use_dropout - self._dropout_rate = dropout_rate - self._dropout_type = check_util.validate_enum( + self.rank = rank + self.filters = filters + self.kernel_size = kernel_size + self.pool_size = pool_size + self.block_depth = block_depth + self.use_deconv = use_deconv + self.activation = activations.get(activation) + self.use_bias = use_bias + self.kernel_initializer = initializers.get(kernel_initializer) + self.bias_initializer = initializers.get(bias_initializer) + self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) + self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer) + self.use_batch_norm = use_batch_norm + self.use_sync_bn = use_sync_bn + self.use_instance_norm = use_instance_norm + self.bn_momentum = bn_momentum + self.bn_epsilon = bn_epsilon + self.output_filters = output_filters + self.output_kernel_size = output_kernel_size + self.output_activation = activations.get(output_activation) + self.use_global_residual = use_global_residual + self.use_dropout = use_dropout + self.dropout_rate = dropout_rate + self.dropout_type = check_util.validate_enum( dropout_type, {'standard', 'spatial'}, 'dropout_type') - self._use_tight_frame = use_tight_frame - self._dwt_kwargs = {} - self._dwt_kwargs['format_dict'] = False - self._scales = len(filters) + self.use_tight_frame = use_tight_frame + self.use_resize_and_concatenate = use_resize_and_concatenate + + self.scales = len(self.filters) # Check inputs are consistent. if use_tight_frame and pool_size != 2: raise ValueError('pool_size must be 2 if use_tight_frame is True.') - block_layer = model_util.get_nd_model('ConvBlock', self._rank) - block_config = dict( + block_kwargs.update(dict( filters=None, # To be filled for each scale. - kernel_size=self._kernel_size, + kernel_size=self.kernel_size, strides=1, - activation=self._activation, - use_bias=self._use_bias, - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer, - use_batch_norm=self._use_batch_norm, - use_sync_bn=self._use_sync_bn, - bn_momentum=self._bn_momentum, - bn_epsilon=self._bn_epsilon, - use_dropout=self._use_dropout, - dropout_rate=self._dropout_rate, - dropout_type=self._dropout_type) + activation=self.activation, + use_bias=self.use_bias, + kernel_initializer=self.kernel_initializer, + bias_initializer=self.bias_initializer, + kernel_regularizer=self.kernel_regularizer, + bias_regularizer=self.bias_regularizer, + use_batch_norm=self.use_batch_norm, + use_sync_bn=self.use_sync_bn, + use_instance_norm=self.use_instance_norm, + bn_momentum=self.bn_momentum, + bn_epsilon=self.bn_epsilon, + use_dropout=self.use_dropout, + dropout_rate=self.dropout_rate, + dropout_type=self.dropout_type, + dtype=self.dtype)) # Configure pooling layer. - if self._use_tight_frame: + if self.use_tight_frame: pool_name = 'DWT' - pool_config = self._dwt_kwargs + pool_config = dict(format_dict=False) else: pool_name = 'MaxPool' pool_config = dict( - pool_size=self._pool_size, - strides=self._pool_size, - padding='same') - pool_layer = layer_util.get_nd_layer(pool_name, self._rank) + pool_size=self.pool_size, + strides=self.pool_size, + padding='same', + dtype=self.dtype) + pool_fn = layer_util.get_nd_layer(pool_name, self.rank) + if is_time_distributed: + pool_fn = wrap_time_distributed(pool_fn) # Configure upsampling layer. - if self._use_deconv: - upsamp_name = 'ConvTranspose' - upsamp_config = dict( - filters=None, # To be filled for each scale. - kernel_size=self._kernel_size, - strides=self._pool_size, - padding='same', - activation=None, - use_bias=self._use_bias, - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer) + upsamp_config = dict( + filters=None, # To be filled for each scale. + kernel_size=self.kernel_size, + pool_size=self.pool_size, + padding='same', + activation=self.activation, + use_bias=self.use_bias, + kernel_initializer=self.kernel_initializer, + bias_initializer=self.bias_initializer, + kernel_regularizer=self.kernel_regularizer, + bias_regularizer=self.bias_regularizer, + dtype=self.dtype) + if self.use_deconv: + # Use transposed convolution for upsampling. + def upsamp_fn(**config): + config['strides'] = config.pop('pool_size') + convt_fn = layer_util.get_nd_layer('ConvTranspose', self.rank) + if is_time_distributed: + convt_fn = wrap_time_distributed(convt_fn) + return convt_fn(**config) + else: + # Use upsampling + conv for upsampling. + def upsamp_fn(**config): + pool_size = config.pop('pool_size') + upsamp_fn_ = layer_util.get_nd_layer('UpSampling', rank) + conv_fn = layer_util.get_nd_layer('Conv', rank) + + if is_time_distributed: + upsamp_fn_ = wrap_time_distributed(upsamp_fn_) + conv_fn = wrap_time_distributed(conv_fn) + + upsamp_layer = upsamp_fn_(size=pool_size, dtype=self.dtype) + conv_layer = conv_fn(**config) + return (upsamp_layer, conv_layer) + + # Configure concatenation layer. + if self.use_resize_and_concatenate: + concat_fn = concatenate.ResizeAndConcatenate else: - upsamp_name = 'UpSampling' - upsamp_config = dict( - size=self._pool_size) - upsamp_layer = layer_util.get_nd_layer(upsamp_name, self._rank) + concat_fn = tf.keras.layers.Concatenate if tf.keras.backend.image_data_format() == 'channels_last': - self._channel_axis = -1 + self.channel_axis = -1 else: - self._channel_axis = 1 - - self._enc_blocks = [] - self._dec_blocks = [] - self._pools = [] - self._upsamps = [] - self._concats = [] - if self._use_tight_frame: + self.channel_axis = 1 + + self._enc_blocks = [None] * self.scales + self._dec_blocks = [None] * (self.scales - 1) + self._pools = [None] * (self.scales - 1) + self._upsamps = [None] * (self.scales - 1) + self._concats = [None] * (self.scales - 1) + if self.use_tight_frame: # For tight frame model, we also need to upsample each of the detail # components. - self._detail_upsamps = [] - - # Configure backbone and decoder. - for scale, filt in enumerate(self._filters): - block_config['filters'] = [filt] * self._block_depth - self._enc_blocks.append(block_layer(**block_config)) - - if scale < len(self._filters) - 1: - self._pools.append(pool_layer(**pool_config)) - if use_deconv: - upsamp_config['filters'] = filt - self._upsamps.append(upsamp_layer(**upsamp_config)) - if self._use_tight_frame: + self._detail_upsamps = [None] * (self.scales - 1) + + # Configure encoder. + for scale in range(self.scales): + block_kwargs['filters'] = [filters[scale]] * self.block_depth + self._enc_blocks[scale] = block_fn(**block_kwargs) + + if scale < len(self.filters) - 1: # Not the last scale. + self._pools[scale] = pool_fn(**pool_config) + + # Configure decoder. + for scale in range(self.scales - 2, -1, -1): + block_kwargs['filters'] = [filters[scale]] * self.block_depth + + if scale < len(self.filters) - 1: # Not the last scale. + # Add upsampling layer. + upsamp_config['filters'] = filters[scale] + self._upsamps[scale] = upsamp_fn(**upsamp_config) + # For tight-frame U-Net only. + if self.use_tight_frame: # Add one upsampling layer for each detail component. There are 1 # detail components for 1D, 3 detail components for 2D, and 7 detail # components for 3D. - self._detail_upsamps.append([upsamp_layer(**upsamp_config) - for _ in range(2 ** self._rank - 1)]) - self._concats.append( - tf.keras.layers.Concatenate(axis=self._channel_axis)) - self._dec_blocks.append(block_layer(**block_config)) + self._detail_upsamps[scale] = [upsamp_fn(**upsamp_config) + for _ in range(2 ** self.rank - 1)] + # Add concatenation layer. + self._concats[scale] = concat_fn(axis=self.channel_axis) + # Add decoding block. + self._dec_blocks[scale] = block_fn(**block_kwargs) # Configure output block. - if self._out_channels is not None: - block_config['filters'] = self._out_channels - if self._out_kernel_size is not None: - block_config['kernel_size'] = self._out_kernel_size - # If network is residual, the activation is performed after the residual - # addition. - if self._use_global_residual: - block_config['activation'] = None - else: - block_config['activation'] = self._out_activation - self._out_block = block_layer(**block_config) + if self.output_filters is not None: + block_kwargs['filters'] = self.output_filters + if self.output_kernel_size is not None: + block_kwargs['kernel_size'] = self.output_kernel_size + # If network is residual, the activation is performed after the residual + # addition. + if self.use_global_residual: + block_kwargs['activation'] = None + else: + block_kwargs['activation'] = self.output_activation + self._out_block = block_fn(**block_kwargs) # Configure residual addition, if requested. - if self._use_global_residual: + if self.use_global_residual: self._add = tf.keras.layers.Add() - self._out_activation_fn = tf.keras.activations.get(self._out_activation) + self._out_activation = tf.keras.layers.Activation(self.output_activation) - def call(self, inputs, training=None): # pylint: disable=missing-param-doc,unused-argument - """Runs forward pass on the input tensors.""" + def call(self, inputs): x = inputs # For skip connections to decoder. - cache = [None] * (self._scales - 1) - if self._use_tight_frame: - detail_cache = [None] * (self._scales - 1) + cache = [None] * (self.scales - 1) + if self.use_tight_frame: + detail_cache = [None] * (self.scales - 1) # Backbone. - for scale in range(self._scales - 1): + for scale in range(self.scales - 1): cache[scale] = self._enc_blocks[scale](x) x = self._pools[scale](cache[scale]) - if self._use_tight_frame: + if self.use_tight_frame: # Store details for later concatenation, and continue processing # approximation coefficients. detail_cache[scale] = x[1:] # details @@ -278,67 +330,171 @@ def call(self, inputs, training=None): # pylint: disable=missing-param-doc,unuse x = self._enc_blocks[-1](x) # Decoder. - for scale in range(self._scales - 2, -1, -1): - x = self._upsamps[scale](x) - concat_inputs = [x, cache[scale]] - if self._use_tight_frame: + for scale in range(self.scales - 2, -1, -1): + # If not using deconv, `self.upsamps[scale]` is a tuple containing two + # layers (upsampling + conv). + if self.use_deconv: + x = self._upsamps[scale](x) + else: + x = self._upsamps[scale][0](x) + x = self._upsamps[scale][1](x) + concat_inputs = [cache[scale], x] + if self.use_tight_frame: # Upsample detail components too. - d = [up(d) for d, up in zip( - detail_cache[scale], self._detail_upsamps[scale])] + d = [up(d) for d, up in zip(detail_cache[scale], + self._detail_upsamps[scale])] # Add to concatenation. concat_inputs.extend(d) x = self._concats[scale](concat_inputs) x = self._dec_blocks[scale](x) # Head. - if self._out_channels is not None: + if self.output_filters is not None: x = self._out_block(x) # Global residual connection. - if self._use_global_residual: + if self.use_global_residual: x = self._add([x, inputs]) - if self._out_activation is not None: - x = self._out_activation_fn(x) + if self.output_activation is not None: + x = self._out_activation(x) return x + def compute_output_shape(self, input_shape): + input_shape = tf.TensorShape(input_shape) + if self.output_filters is not None: + output_filters = self.output_filters + else: + output_filters = self.filters[0] + return input_shape[:-1].concatenate([output_filters]) + def get_config(self): """Returns model configuration for serialization.""" config = { - 'filters': self._filters, - 'kernel_size': self._kernel_size, - 'pool_size': self._pool_size, - 'block_depth': self._block_depth, - 'use_deconv': self._use_deconv, - 'activation': self._activation, - 'use_bias': self._use_bias, - 'kernel_initializer': self._kernel_initializer, - 'bias_initializer': self._bias_initializer, - 'kernel_regularizer': self._kernel_regularizer, - 'bias_regularizer': self._bias_regularizer, - 'use_batch_norm': self._use_batch_norm, - 'use_sync_bn': self._use_sync_bn, - 'bn_momentum': self._bn_momentum, - 'bn_epsilon': self._bn_epsilon, - 'out_channels': self._out_channels, - 'out_kernel_size': self._out_kernel_size, - 'out_activation': self._out_activation, - 'use_global_residual': self._use_global_residual, - 'use_dropout': self._use_dropout, - 'dropout_rate': self._dropout_rate, - 'dropout_type': self._dropout_type, - 'use_tight_frame': self._use_tight_frame + 'filters': self.filters, + 'kernel_size': self.kernel_size, + 'pool_size': self.pool_size, + 'block_depth': self.block_depth, + 'use_deconv': self.use_deconv, + 'activation': activations.serialize(self.activation), + 'use_bias': self.use_bias, + 'kernel_initializer': initializers.serialize(self.kernel_initializer), + 'bias_initializer': initializers.serialize(self.bias_initializer), + 'kernel_regularizer': tf.keras.regularizers.serialize( + self.kernel_regularizer), + 'bias_regularizer': tf.keras.regularizers.serialize( + self.bias_regularizer), + 'use_batch_norm': self.use_batch_norm, + 'use_sync_bn': self.use_sync_bn, + 'use_instance_norm': self.use_instance_norm, + 'bn_momentum': self.bn_momentum, + 'bn_epsilon': self.bn_epsilon, + 'output_filters': self.output_filters, + 'output_kernel_size': self.output_kernel_size, + 'output_activation': activations.serialize( + self.output_activation), + 'use_global_residual': self.use_global_residual, + 'use_dropout': self.use_dropout, + 'dropout_rate': self.dropout_rate, + 'dropout_type': self.dropout_type, + 'use_tight_frame': self.use_tight_frame, + 'use_resize_and_concatenate': self.use_resize_and_concatenate } base_config = super().get_config() return {**base_config, **config} - @classmethod - def from_config(cls, config): - if 'base_filters' in config: - # Old config format. Convert to new format. - config['filters'] = [config.pop('base_filters') * (2 ** scale) - for scale in config.pop('scales')] - return super().from_config(config) + +class UNetLSTM(UNet): + """${rank}D LSTM U-Net model. + + Args: + ${args} + stateful: A boolean. If `True`, the last state for each sample at index `i` + in a batch will be used as initial state for the sample of index `i` in + the following batch. Defaults to `False`. + recurrent_regularizer: A `tf.keras.initializers.Regularizer` or a Keras + regularizer identifier. The regularizer applied to the recurrent kernel. + Defaults to `None`. + """ + def __init__(self, + rank, + filters, + kernel_size, + pool_size=2, + block_depth=2, + use_deconv=False, + activation='relu', + use_bias=True, + kernel_initializer='variance_scaling', + bias_initializer='zeros', + kernel_regularizer=None, + bias_regularizer=None, + use_batch_norm=False, + use_sync_bn=False, + use_instance_norm=False, + bn_momentum=0.99, + bn_epsilon=0.001, + output_filters=None, + output_kernel_size=None, + output_activation=None, + use_global_residual=False, + use_dropout=False, + dropout_rate=0.3, + dropout_type='standard', + use_tight_frame=False, + use_resize_and_concatenate=False, + stateful=False, + recurrent_regularizer=None, + **kwargs): + self.stateful = stateful + self.recurrent_regularizer = tf.keras.regularizers.get( + recurrent_regularizer) + super().__init__(rank=rank, + filters=filters, + kernel_size=kernel_size, + pool_size=pool_size, + block_depth=block_depth, + use_deconv=use_deconv, + activation=activation, + use_bias=use_bias, + kernel_initializer=kernel_initializer, + bias_initializer=bias_initializer, + kernel_regularizer=kernel_regularizer, + bias_regularizer=bias_regularizer, + use_batch_norm=use_batch_norm, + use_sync_bn=use_sync_bn, + use_instance_norm=use_instance_norm, + bn_momentum=bn_momentum, + bn_epsilon=bn_epsilon, + output_filters=output_filters, + output_kernel_size=output_kernel_size, + output_activation=output_activation, + use_global_residual=use_global_residual, + use_dropout=use_dropout, + dropout_rate=dropout_rate, + dropout_type=dropout_type, + use_tight_frame=use_tight_frame, + use_resize_and_concatenate=use_resize_and_concatenate, + _block_fn=model_util.get_nd_model('ConvBlockLSTM', rank), + _block_kwargs=dict( + stateful=self.stateful, + recurrent_regularizer=self.recurrent_regularizer), + _is_time_distributed=True, + **kwargs) + + def get_config(self): + base_config = super().get_config() + config = { + 'stateful': self.stateful, + 'recurrent_regularizer': tf.keras.regularizers.serialize( + self.recurrent_regularizer) + } + return {**base_config, **config} + + +def wrap_time_distributed(fn): + return lambda *args, **kwargs: ( + tf.keras.layers.TimeDistributed(fn(*args, **kwargs))) @api_util.export("models.UNet1D") @@ -362,6 +518,48 @@ def __init__(self, *args, **kwargs): super().__init__(3, *args, **kwargs) -UNet1D.__doc__ = UNET_DOC_TEMPLATE.substitute(rank=1) -UNet2D.__doc__ = UNET_DOC_TEMPLATE.substitute(rank=2) -UNet3D.__doc__ = UNET_DOC_TEMPLATE.substitute(rank=3) +@api_util.export("models.UNetLSTM1D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class UNetLSTM1D(UNetLSTM): + def __init__(self, *args, **kwargs): + super().__init__(1, *args, **kwargs) + + +@api_util.export("models.UNetLSTM2D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class UNetLSTM2D(UNetLSTM): + def __init__(self, *args, **kwargs): + super().__init__(2, *args, **kwargs) + + +@api_util.export("models.UNetLSTM3D") +@tf.keras.utils.register_keras_serializable(package='MRI') +class UNetLSTM3D(UNetLSTM): + def __init__(self, *args, **kwargs): + super().__init__(3, *args, **kwargs) + + +UNet1D.__doc__ = string.Template(UNet.__doc__).substitute( + rank=1, args=ARGS.substitute(rank=1)) +UNet2D.__doc__ = string.Template(UNet.__doc__).substitute( + rank=2, args=ARGS.substitute(rank=2)) +UNet3D.__doc__ = string.Template(UNet.__doc__).substitute( + rank=3, args=ARGS.substitute(rank=3)) + + +UNet1D.__signature__ = doc_util.get_nd_layer_signature(UNet) +UNet2D.__signature__ = doc_util.get_nd_layer_signature(UNet) +UNet3D.__signature__ = doc_util.get_nd_layer_signature(UNet) + + +UNetLSTM1D.__doc__ = string.Template(UNetLSTM.__doc__).substitute( + rank=1, args=ARGS.substitute(rank=1)) +UNetLSTM2D.__doc__ = string.Template(UNetLSTM.__doc__).substitute( + rank=2, args=ARGS.substitute(rank=2)) +UNetLSTM3D.__doc__ = string.Template(UNetLSTM.__doc__).substitute( + rank=3, args=ARGS.substitute(rank=3)) + + +UNetLSTM1D.__signature__ = doc_util.get_nd_layer_signature(UNetLSTM) +UNetLSTM2D.__signature__ = doc_util.get_nd_layer_signature(UNetLSTM) +UNetLSTM3D.__signature__ = doc_util.get_nd_layer_signature(UNetLSTM) diff --git a/tensorflow_mri/python/models/conv_endec_test.py b/tensorflow_mri/python/models/conv_endec_test.py index 0cfc0931..3cb24142 100644 --- a/tensorflow_mri/python/models/conv_endec_test.py +++ b/tensorflow_mri/python/models/conv_endec_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +18,10 @@ from absl.testing import parameterized import tensorflow as tf +from tensorflow_mri.python.layers import convolutional +from tensorflow_mri.python.layers import pooling +from tensorflow_mri.python.layers import reshaping +from tensorflow_mri.python.models import conv_blocks from tensorflow_mri.python.models import conv_endec from tensorflow_mri.python.util import test_util @@ -35,7 +39,7 @@ def test_unet_creation(self, # pylint: disable=missing-param-doc rank, filters, kernel_size, - out_channels, + output_filters, use_deconv, use_global_residual): """Test object creation.""" @@ -51,14 +55,14 @@ def test_unet_creation(self, # pylint: disable=missing-param-doc filters=filters, kernel_size=kernel_size, use_deconv=use_deconv, - out_channels=out_channels, + output_filters=output_filters, use_global_residual=use_global_residual) features = network(inputs) - if out_channels is None: - out_channels = filters[0] + if output_filters is None: + output_filters = filters[0] - self.assertAllEqual(features.shape, [1] + [128] * rank + [out_channels]) + self.assertAllEqual(features.shape, [1] + [128] * rank + [output_filters]) @test_util.run_all_execution_modes @@ -84,6 +88,21 @@ def test_use_bias(self, use_bias): if hasattr(layer, 'use_bias'): self.assertEqual(use_bias, layer.use_bias) + def test_complex_valued(self): + """Test complex-valued U-Net.""" + inputs = tf.dtypes.complex( + tf.random.stateless_normal(shape=(2, 32, 32, 4), seed=[12, 34]), + tf.random.stateless_normal(shape=(2, 32, 32, 4), seed=[56, 78])) + + block = conv_endec.UNet2D( + filters=[4, 8], + kernel_size=3, + activation='complex_relu', + dtype=tf.complex64) + + result = block(inputs) + self.assertAllClose((2, 32, 32, 4), result.shape) + self.assertDTypeEqual(result, tf.complex64) def test_serialize_deserialize(self): """Test de/serialization.""" @@ -95,29 +114,210 @@ def test_serialize_deserialize(self): use_deconv=True, activation='tanh', use_bias=False, - kernel_initializer='ones', - bias_initializer='ones', - kernel_regularizer='l2', - bias_regularizer='l1', + kernel_initializer={'class_name': 'Ones', 'config': {}}, + bias_initializer={'class_name': 'Ones', 'config': {}}, + kernel_regularizer={'class_name': 'L2', 'config': {'l2': 1.0}}, + bias_regularizer=None, use_batch_norm=True, use_sync_bn=True, bn_momentum=0.98, bn_epsilon=0.002, - out_channels=1, - out_kernel_size=1, - out_activation='relu', + output_filters=1, + output_kernel_size=1, + output_activation='relu', use_global_residual=True, use_dropout=True, dropout_rate=0.5, dropout_type='spatial', - use_tight_frame=True) + use_tight_frame=True, + use_instance_norm=False, + use_resize_and_concatenate=False) block = conv_endec.UNet2D(**config) - self.assertEqual(block.get_config(), config) + self.assertEqual(config, block.get_config()) block2 = conv_endec.UNet2D.from_config(block.get_config()) self.assertAllEqual(block.get_config(), block2.get_config()) + def test_arch(self): + """Tests basic model arch.""" + tf.keras.backend.clear_session() + + model = conv_endec.UNet2D(filters=[8, 16], kernel_size=3) + inputs = tf.keras.Input(shape=(32, 32, 1), batch_size=1) + model = tf.keras.Model(inputs, model.call(inputs)) + + expected = [ + # name, type, output_shape, params + ('input_1', 'InputLayer', [(1, 32, 32, 1)], 0), + ('conv_block2d', 'ConvBlock2D', (1, 32, 32, 8), 664), + ('max_pooling2d', 'MaxPooling2D', (1, 16, 16, 8), 0), + ('conv_block2d_1', 'ConvBlock2D', (1, 16, 16, 16), 3488), + ('up_sampling2d', 'UpSampling2D', (1, 32, 32, 16), 0), + ('conv2d_4', 'Conv2D', (1, 32, 32, 8), 1160), + ('concatenate', 'Concatenate', (1, 32, 32, 16), 0), + ('conv_block2d_2', 'ConvBlock2D', (1, 32, 32, 8), 1744)] + + self.assertAllEqual( + [elem[0] for elem in expected], + [layer.name for layer in get_layers(model)]) + + self.assertAllEqual( + [elem[1] for elem in expected], + [layer.__class__.__name__ for layer in get_layers(model)]) + + self.assertAllEqual( + [elem[2] for elem in expected], + [layer.output_shape for layer in get_layers(model)]) + + self.assertAllEqual( + [elem[3] for elem in expected], + [layer.count_params() for layer in get_layers(model)]) + + def test_arch_with_deconv(self): + """Tests model arch with deconvolution.""" + tf.keras.backend.clear_session() + + model = conv_endec.UNet2D(filters=[8, 16], kernel_size=3, use_deconv=True) + inputs = tf.keras.Input(shape=(32, 32, 1), batch_size=1) + model = tf.keras.Model(inputs, model.call(inputs)) + + expected = [ + # name, type, output_shape + ('input_1', 'InputLayer', [(1, 32, 32, 1)], 0), + ('conv_block2d', 'ConvBlock2D', (1, 32, 32, 8), 664), + ('max_pooling2d', 'MaxPooling2D', (1, 16, 16, 8), 0), + ('conv_block2d_1', 'ConvBlock2D', (1, 16, 16, 16), 3488), + ('conv2d_transpose', 'Conv2DTranspose', (1, 32, 32, 8), 1160), + ('concatenate', 'Concatenate', (1, 32, 32, 16), 0), + ('conv_block2d_2', 'ConvBlock2D', (1, 32, 32, 8), 1744)] + + self.assertAllEqual( + [elem[0] for elem in expected], + [layer.name for layer in get_layers(model)]) + + self.assertAllEqual( + [elem[1] for elem in expected], + [layer.__class__.__name__ for layer in get_layers(model)]) + + self.assertAllEqual( + [elem[2] for elem in expected], + [layer.output_shape for layer in get_layers(model)]) + + self.assertAllEqual( + [elem[3] for elem in expected], + [layer.count_params() for layer in get_layers(model)]) + + def test_arch_with_out_block(self): + """Tests model arch with output block.""" + tf.keras.backend.clear_session() + + tf.random.set_seed(32) + model = conv_endec.UNet2D(filters=[8, 16], kernel_size=3, output_filters=2) + inputs = tf.keras.Input(shape=(32, 32, 1), batch_size=1) + model = tf.keras.Model(inputs, model.call(inputs)) + + expected = [ + # name, type, output_shape, params + ('input_1', 'InputLayer', [(1, 32, 32, 1)], 0), + ('conv_block2d', 'ConvBlock2D', (1, 32, 32, 8), 664), + ('max_pooling2d', 'MaxPooling2D', (1, 16, 16, 8), 0), + ('conv_block2d_1', 'ConvBlock2D', (1, 16, 16, 16), 3488), + ('up_sampling2d', 'UpSampling2D', (1, 32, 32, 16), 0), + ('conv2d_4', 'Conv2D', (1, 32, 32, 8), 1160), + ('concatenate', 'Concatenate', (1, 32, 32, 16), 0), + ('conv_block2d_2', 'ConvBlock2D', (1, 32, 32, 8), 1744), + ('conv_block2d_3', 'ConvBlock2D', (1, 32, 32, 2), 146)] + + self.assertAllEqual( + [elem[0] for elem in expected], + [layer.name for layer in get_layers(model)]) + + self.assertAllEqual( + [elem[1] for elem in expected], + [layer.__class__.__name__ for layer in get_layers(model)]) + + self.assertAllEqual( + [elem[2] for elem in expected], + [layer.output_shape for layer in get_layers(model)]) + + self.assertAllEqual( + [elem[3] for elem in expected], + [layer.count_params() for layer in get_layers(model)]) + + out_block = model.layers[-1] + self.assertLen(out_block.layers, 2) + self.assertIsInstance(out_block.layers[0], convolutional.Conv2D) + self.assertIsInstance(out_block.layers[1], tf.keras.layers.Activation) + self.assertEqual(tf.keras.activations.linear, + out_block.layers[1].activation) + + input_data = tf.random.stateless_normal((1, 32, 32, 1), [12, 34]) + output_data = model.predict(input_data) + + # New model with output activation. + tf.random.set_seed(32) + model = conv_endec.UNet2D( + filters=[8, 16], kernel_size=3, output_filters=2, + output_activation='sigmoid') + inputs = tf.keras.Input(shape=(32, 32, 1), batch_size=1) + model = tf.keras.Model(inputs, model.call(inputs)) + + self.assertAllClose(tf.keras.activations.sigmoid(output_data), + model.predict(input_data)) + + def test_arch_lstm(self): + """Tests LSTM model arch.""" + tf.keras.backend.clear_session() + + model = conv_endec.UNetLSTM2D(filters=[8, 16], kernel_size=3) + inputs = tf.keras.Input(shape=(4, 32, 32, 1), batch_size=1) + model = tf.keras.Model(inputs, model.call(inputs)) + + expected = [ + # name, type, output_shape, params + ('input_1', tf.keras.layers.InputLayer, [(1, 4, 32, 32, 1)], 0), + ('conv_block_lstm2d', + conv_blocks.ConvBlockLSTM2D, (1, 4, 32, 32, 8), 7264), + ('time_distributed', + tf.keras.layers.TimeDistributed, (1, 4, 16, 16, 8), 0), + ('conv_block_lstm2d_1', + conv_blocks.ConvBlockLSTM2D, (1, 4, 16, 16, 16), 32384), + ('time_distributed_1', + tf.keras.layers.TimeDistributed, (1, 4, 32, 32, 16), 0), + ('time_distributed_2', + tf.keras.layers.TimeDistributed, (1, 4, 32, 32, 8), 1160), + ('concatenate', tf.keras.layers.Concatenate, (1, 4, 32, 32, 16), 0), + ('conv_block_lstm2d_2', + conv_blocks.ConvBlockLSTM2D, (1, 4, 32, 32, 8), 11584)] + + self._check_layers(expected, model.layers) + + # Check that TimeDistributed wrappers wrap the right layers. + self.assertIsInstance(model.layers[2].layer, pooling.MaxPooling2D) + self.assertIsInstance(model.layers[4].layer, reshaping.UpSampling2D) + self.assertIsInstance(model.layers[5].layer, convolutional.Conv2D) + + def _check_layers(self, expected, actual): + actual = [ + (layer.name, type(layer), layer.output_shape, layer.count_params()) + for layer in actual] + self.assertEqual(expected, actual) + + +def get_layers(model, recursive=False): + """Gets all layers in a model (expanding nested models).""" + layers = [] + for layer in model.layers: + if isinstance(layer, tf.keras.Model): + if recursive: + layers.extend(get_layers(layer, recursive=True)) + else: + layers.append(layer) + else: + layers.append(layer) + return layers + if __name__ == '__main__': tf.test.main() diff --git a/tensorflow_mri/python/models/graph_like_network.py b/tensorflow_mri/python/models/graph_like_network.py new file mode 100644 index 00000000..0f37a0d7 --- /dev/null +++ b/tensorflow_mri/python/models/graph_like_network.py @@ -0,0 +1,29 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Graph-like network.""" + +import tensorflow as tf + + +class GraphLikeNetwork(tf.keras.Model): + """Base class for models with graph-like structure. + + Adds a method `functional` that returns a functional model with the same + architecture as the current model. Functional models have some advantages + over subclassing as described in + https://www.tensorflow.org/guide/keras/functional#when_to_use_the_functional_api. + """ # pylint: disable=line-too-long + def functional(self, inputs): + return tf.keras.Model(inputs, self.call(inputs)) diff --git a/tensorflow_mri/python/ops/__init__.py b/tensorflow_mri/python/ops/__init__.py index 461a64f6..7adf607e 100644 --- a/tensorflow_mri/python/ops/__init__.py +++ b/tensorflow_mri/python/ops/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/ops/array_ops.py b/tensorflow_mri/python/ops/array_ops.py index 370018e4..1fa36927 100644 --- a/tensorflow_mri/python/ops/array_ops.py +++ b/tensorflow_mri/python/ops/array_ops.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -76,9 +76,10 @@ def meshgrid(*args): fields over N-D grids, given one-dimensional coordinate arrays `x1, x2, ..., xn`. - .. note:: + ```{note} Similar to `tf.meshgrid`, but uses matrix indexing and returns a stacked tensor (along axis -1) instead of a list of tensors. + ``` Args: *args: `Tensors` with rank 1. @@ -90,6 +91,67 @@ def meshgrid(*args): return tf.stack(tf.meshgrid(*args, indexing='ij'), axis=-1) +@api_util.export("array.meshgrid") +def dynamic_meshgrid(vecs): + """Return coordinate matrices from coordinate vectors. + + Make N-D coordinate arrays for vectorized evaluations of N-D scalar/vector + fields over N-D grids, given one-dimensional coordinate arrays + `x1, x2, ..., xn`. + + ```{note} + Similar to `tf.meshgrid`, but uses matrix indexing, supports dynamic tensor + arrays and returns a stacked tensor (along axis -1) instead of a list of + tensors. + ``` + + Args: + vecs: A `tf.TensorArray` containing the coordinate vectors. + + Returns: + A `Tensor` of shape `[M1, M2, ..., Mn, N]`, where `N` is the number of + tensors in `vecs` and `Mi = tf.size(args[i])`. + """ + if not isinstance(vecs, tf.TensorArray): + # Fall back to static implementation. + return meshgrid(*vecs) + + # Compute shape of the output grid. + output_shape = tf.TensorArray( + dtype=tf.int32, size=vecs.size(), element_shape=()) + + def _cond1(i, vecs, shape): # pylint:disable=unused-argument + return i < vecs.size() + def _body1(i, vecs, shape): + vec = vecs.read(i) + shape = shape.write(i, tf.shape(vec)[0]) + return i + 1, vecs, shape + + _, _, output_shape = tf.while_loop(_cond1, _body1, [0, vecs, output_shape]) + output_shape = output_shape.stack() + + # Compute output grid. + output_grid = tf.TensorArray(dtype=vecs.dtype, size=vecs.size()) + + def _cond2(i, vecs, grid): # pylint:disable=unused-argument + return i < vecs.size() + def _body2(i, vecs, grid): + vec = vecs.read(i) + vec_shape = tf.ones(shape=[vecs.size()], dtype=tf.int32) + vec_shape = tf.tensor_scatter_nd_update(vec_shape, [[i]], [-1]) + vec = tf.reshape(vec, vec_shape) + grid = grid.write(i, tf.broadcast_to(vec, output_shape)) + return i + 1, vecs, grid + + _, _, output_grid = tf.while_loop(_cond2, _body2, [0, vecs, output_grid]) + output_grid = output_grid.stack() + + perm = tf.concat([tf.range(1, vecs.size() + 1), [0]], 0) + output_grid = tf.transpose(output_grid, perm) + + return output_grid + + def ravel_multi_index(multi_indices, dims): """Converts an array of multi-indices into an array of flat indices. @@ -287,13 +349,15 @@ def update_tensor(tensor, slices, value): This operator performs slice assignment. - .. note:: + ```{note} Equivalent to `tensor[slices] = value`. + ``` - .. warning:: + ```{warning} TensorFlow does not support slice assignment because tensors are immutable. This operator works around this limitation by creating a new tensor, which may have performance implications. + ``` Args: tensor: A `tf.Tensor`. @@ -328,9 +392,10 @@ def _with_index_update_helper(update_method, a, slice_spec, updates): # pylint: def map_fn(fn, elems, batch_dims=1, **kwargs): """Transforms `elems` by applying `fn` to each element. - .. note:: + ```{note} Similar to `tf.map_fn`, but it supports unstacking along multiple batch dimensions. + ``` For the parameters, see `tf.map_fn`. The only difference is that there is an additional `batch_dims` keyword argument which allows specifying the number diff --git a/tensorflow_mri/python/ops/array_ops_test.py b/tensorflow_mri/python/ops/array_ops_test.py index a1b2f81f..56588e6a 100755 --- a/tensorflow_mri/python/ops/array_ops_test.py +++ b/tensorflow_mri/python/ops/array_ops_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -60,6 +60,29 @@ def test_meshgrid(self): self.assertAllEqual(result, ref) +class DynamicMeshgridTest(test_util.TestCase): + @test_util.run_in_graph_and_eager_modes + @parameterized.product(static=[False, True]) + def test_dynamic_meshgrid_static(self, static): + vec1 = [1, 2, 3] + vec2 = [4, 5] + + ref = [[[1, 4], [1, 5]], + [[2, 4], [2, 5]], + [[3, 4], [3, 5]]] + + if static: + vecs = [vec1, vec2] + else: + vecs = tf.TensorArray(tf.int32, size=2, infer_shape=False, + clear_after_read=False) + vecs = vecs.write(0, vec1) + vecs = vecs.write(1, vec2) + + result = array_ops.dynamic_meshgrid(vecs) + self.assertAllEqual(result, ref) + + class RavelMultiIndexTest(test_util.TestCase): """Tests for the `ravel_multi_index` op.""" diff --git a/tensorflow_mri/python/ops/coil_ops.py b/tensorflow_mri/python/ops/coil_ops.py deleted file mode 100755 index d4932e17..00000000 --- a/tensorflow_mri/python/ops/coil_ops.py +++ /dev/null @@ -1,715 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Coil array operations. - -This module contains functions to operate with MR coil arrays, such as -estimating coil sensitivities and combining multi-coil images. -""" - -import abc -import collections -import functools - -import numpy as np -import tensorflow as tf -import tensorflow.experimental.numpy as tnp - -from tensorflow_mri.python.ops import array_ops -from tensorflow_mri.python.ops import fft_ops -from tensorflow_mri.python.util import api_util -from tensorflow_mri.python.util import check_util - - -@api_util.export("coils.estimate_sensitivities") -def estimate_coil_sensitivities(input_, - coil_axis=-1, - method='walsh', - **kwargs): - """Estimate coil sensitivity maps. - - This method supports 2D and 3D inputs. - - Args: - input_: A `Tensor`. Must have type `complex64` or `complex128`. Must have - shape `[height, width, coils]` for 2D inputs, or `[depth, height, - width, coils]` for 3D inputs. Alternatively, this function accepts a - transposed array by setting the `coil_axis` argument accordingly. Inputs - should be images if `method` is `'walsh'` or `'inati'`, and k-space data - if `method` is `'espirit'`. - coil_axis: An `int`. Defaults to -1. - method: A `string`. The coil sensitivity estimation algorithm. Must be one - of: `{'walsh', 'inati', 'espirit'}`. Defaults to `'walsh'`. - **kwargs: Additional keyword arguments for the coil sensitivity estimation - algorithm. See Notes. - - Returns: - A `Tensor`. Has the same type as `input_`. Has shape - `input_.shape + [num_maps]` if `method` is `'espirit'`, or shape - `input_.shape` otherwise. - - Notes: - - This function accepts the following method-specific keyword arguments: - - * For `method="walsh"`: - - * **filter_size**: An `int`. The size of the smoothing filter. - - * For `method="inati"`: - - * **filter_size**: An `int`. The size of the smoothing filter. - * **max_iter**: An `int`. The maximum number of iterations. - * **tol**: A `float`. The convergence tolerance. - - * For `method="espirit"`: - - * **calib_size**: An `int` or a list of `ints`. The size of the - calibration region. If `None`, this is set to `input_.shape[:-1]` (ie, - use full input for calibration). Defaults to 24. - * **kernel_size**: An `int` or a list of `ints`. The kernel size. Defaults - to 6. - * **num_maps**: An `int`. The number of output maps. Defaults to 2. - * **null_threshold**: A `float`. The threshold used to determine the size - of the null-space. Defaults to 0.02. - * **eigen_threshold**: A `float`. The threshold used to determine the - locations where coil sensitivity maps should be masked out. Defaults - to 0.95. - * **image_shape**: A `tf.TensorShape` or a list of `ints`. The shape of - the output maps. If `None`, this is set to `input_.shape`. Defaults to - `None`. - - References: - .. [1] Walsh, D.O., Gmitro, A.F. and Marcellin, M.W. (2000), Adaptive - reconstruction of phased array MR imagery. Magn. Reson. Med., 43: - 682-690. https://doi.org/10.1002/(SICI)1522-2594(200005)43:5<682::AID-MRM10>3.0.CO;2-G - - .. [2] Inati, S.J., Hansen, M.S. and Kellman, P. (2014). A fast optimal - method for coil sensitivity estimation and adaptive coil combination for - complex images. Proceedings of the 2014 Joint Annual Meeting - ISMRM-ESMRMB. - - .. [3] Uecker, M., Lai, P., Murphy, M.J., Virtue, P., Elad, M., Pauly, J.M., - Vasanawala, S.S. and Lustig, M. (2014), ESPIRiT—an eigenvalue approach - to autocalibrating parallel MRI: Where SENSE meets GRAPPA. Magn. Reson. - Med., 71: 990-1001. https://doi.org/10.1002/mrm.24751 - """ - # pylint: disable=missing-raises-doc - input_ = tf.convert_to_tensor(input_) - tf.debugging.assert_rank_at_least(input_, 2, message=( - f"Argument `input_` must have rank of at least 2, but got shape: " - f"{input_.shape}")) - coil_axis = check_util.validate_type(coil_axis, int, name='coil_axis') - method = check_util.validate_enum( - method, {'walsh', 'inati', 'espirit'}, name='method') - - # Move coil axis to innermost dimension if not already there. - if coil_axis != -1: - rank = input_.shape.rank - canonical_coil_axis = coil_axis + rank if coil_axis < 0 else coil_axis - perm = ( - [ax for ax in range(rank) if not ax == canonical_coil_axis] + - [canonical_coil_axis]) - input_ = tf.transpose(input_, perm) - - if method == 'walsh': - maps = _estimate_coil_sensitivities_walsh(input_, **kwargs) - elif method == 'inati': - maps = _estimate_coil_sensitivities_inati(input_, **kwargs) - elif method == 'espirit': - maps = _estimate_coil_sensitivities_espirit(input_, **kwargs) - else: - raise RuntimeError("This should never happen.") - - # If necessary, move coil axis back to its original location. - if coil_axis != -1: - inv_perm = tf.math.invert_permutation(perm) - if method == 'espirit': - # When using ESPIRiT method, output has an additional `maps` dimension. - inv_perm = tf.concat([inv_perm, [tf.shape(inv_perm)[0]]], 0) - maps = tf.transpose(maps, inv_perm) - - return maps - - -@api_util.export("coils.combine_coils") -def combine_coils(images, maps=None, coil_axis=-1, keepdims=False): - """Sum of squares or adaptive coil combination. - - Args: - images: A `Tensor`. The input images. - maps: A `Tensor`. The coil sensitivity maps. This argument is optional. - If `maps` is provided, it must have the same shape and type as - `images`. In this case an adaptive coil combination is performed using - the specified maps. If `maps` is `None`, a simple estimate of `maps` - is used (ie, images are combined using the sum of squares method). - coil_axis: An `int`. The coil axis. Defaults to -1. - keepdims: A `boolean`. If `True`, retains the coil dimension with size 1. - - Returns: - A `Tensor`. The combined images. - - References: - .. [1] Roemer, P.B., Edelstein, W.A., Hayes, C.E., Souza, S.P. and - Mueller, O.M. (1990), The NMR phased array. Magn Reson Med, 16: - 192-225. https://doi.org/10.1002/mrm.1910160203 - - .. [2] Bydder, M., Larkman, D. and Hajnal, J. (2002), Combination of signals - from array coils using image-based estimation of coil sensitivity - profiles. Magn. Reson. Med., 47: 539-548. - https://doi.org/10.1002/mrm.10092 - """ - images = tf.convert_to_tensor(images) - if maps is not None: - maps = tf.convert_to_tensor(maps) - - if maps is None: - combined = tf.math.sqrt( - tf.math.reduce_sum(images * tf.math.conj(images), - axis=coil_axis, keepdims=keepdims)) - - else: - combined = tf.math.divide_no_nan( - tf.math.reduce_sum(images * tf.math.conj(maps), - axis=coil_axis, keepdims=keepdims), - tf.math.reduce_sum(maps * tf.math.conj(maps), - axis=coil_axis, keepdims=keepdims)) - - return combined - - -def _estimate_coil_sensitivities_walsh(images, filter_size=5): - """Estimate coil sensitivity maps using Walsh's method. - - For the parameters, see `estimate_coil_sensitivities`. - """ - rank = images.shape.rank - 1 - image_shape = tf.shape(images)[:-1] - num_coils = tf.shape(images)[-1] - - filter_size = check_util.validate_list( - filter_size, element_type=int, length=rank, name='filter_size') - - # Flatten all spatial dimensions into a single axis, so `images` has shape - # `[num_pixels, num_coils]`. - flat_images = tf.reshape(images, [-1, num_coils]) - - # Compute covariance matrix for each pixel; with shape - # `[num_pixels, num_coils, num_coils]`. - correlation_matrix = tf.math.multiply( - tf.reshape(flat_images, [-1, num_coils, 1]), - tf.math.conj(tf.reshape(flat_images, [-1, 1, num_coils]))) - - # Smooth the covariance tensor along the spatial dimensions. - correlation_matrix = tf.reshape( - correlation_matrix, tf.concat([image_shape, [-1]], 0)) - correlation_matrix = _apply_uniform_filter(correlation_matrix, filter_size) - correlation_matrix = tf.reshape(correlation_matrix, [-1] + [num_coils] * 2) - - # Get sensitivity maps as the dominant eigenvector. - _, eigenvectors = tf.linalg.eig(correlation_matrix) # pylint: disable=no-value-for-parameter - maps = eigenvectors[..., -1] - - # Restore spatial axes. - maps = tf.reshape(maps, tf.concat([image_shape, [num_coils]], 0)) - - return maps - - -def _estimate_coil_sensitivities_inati(images, - filter_size=5, - max_iter=5, - tol=1e-3): - """Estimate coil sensitivity maps using Inati's fast method. - - For the parameters, see `estimate_coil_sensitivities`. - """ - rank = images.shape.rank - 1 - spatial_axes = list(range(rank)) - coil_axis = -1 - - # Validate inputs. - filter_size = check_util.validate_list( - filter_size, element_type=int, length=rank, name='filter_size') - max_iter = check_util.validate_type(max_iter, int, name='max_iter') - tol = check_util.validate_type(tol, float, name='tol') - - d_sum = tf.math.reduce_sum(images, axis=spatial_axes, keepdims=True) - d_sum /= tf.norm(d_sum, axis=coil_axis, keepdims=True) - - r = tf.math.reduce_sum( - tf.math.conj(d_sum) * images, axis=coil_axis, keepdims=True) - - eps = tf.cast( - tnp.finfo(images.dtype).eps * tf.math.reduce_mean(tf.math.abs(images)), - images.dtype) - - State = collections.namedtuple('State', ['i', 'maps', 'r', 'd']) - - def _cond(i, state): - return tf.math.logical_and(i < max_iter, state.d >= tol) - - def _body(i, state): - prev_r = state.r - r = state.r - - r = tf.math.conj(r) - - maps = images * r - smooth_maps = _apply_uniform_filter(maps, filter_size) - d = smooth_maps * tf.math.conj(smooth_maps) - - # Sum over coils. - r = tf.math.reduce_sum(d, axis=coil_axis, keepdims=True) - - r = tf.math.sqrt(r) - r = tf.math.reciprocal(r + eps) - - maps = smooth_maps * r - - d = images * tf.math.conj(maps) - r = tf.math.reduce_sum(d, axis=coil_axis, keepdims=True) - - d = maps * r - - d_sum = tf.math.reduce_sum(d, axis=spatial_axes, keepdims=True) - d_sum /= tf.norm(d_sum, axis=coil_axis, keepdims=True) - - im_t = tf.math.reduce_sum( - tf.math.conj(d_sum) * maps, axis=coil_axis, keepdims=True) - im_t /= (tf.cast(tf.math.abs(im_t), images.dtype) + eps) - r *= im_t - im_t = tf.math.conj(im_t) - maps = maps * im_t - - diff_r = r - prev_r - d = tf.math.abs(tf.norm(diff_r) / tf.norm(r)) - - return i + 1, State(i=i + 1, maps=maps, r=r, d=d) - - i = tf.constant(0, dtype=tf.int32) - state = State(i=i, - maps=tf.zeros_like(images), - r=r, - d=tf.constant(1.0, dtype=images.dtype.real_dtype)) - [i, state] = tf.while_loop(_cond, _body, [i, state]) - - return tf.reshape(state.maps, images.shape) - - -def _estimate_coil_sensitivities_espirit(kspace, - calib_size=24, - kernel_size=6, - num_maps=2, - null_threshold=0.02, - eigen_threshold=0.95, - image_shape=None): - """Estimate coil sensitivity maps using the ESPIRiT method. - - For the parameters, see `estimate_coil_sensitivities`. - """ - kspace = tf.convert_to_tensor(kspace) - rank = kspace.shape.rank - 1 - spatial_axes = list(range(rank)) - num_coils = tf.shape(kspace)[-1] - if image_shape is None: - image_shape = kspace.shape[:-1] - if calib_size is None: - calib_size = image_shape.as_list() - - calib_size = check_util.validate_list( - calib_size, element_type=int, length=rank, name='calib_size') - kernel_size = check_util.validate_list( - kernel_size, element_type=int, length=rank, name='kernel_size') - - with tf.control_dependencies([ - tf.debugging.assert_greater(calib_size, kernel_size, message=( - f"`calib_size` must be greater than `kernel_size`, but got " - f"{calib_size} and {kernel_size}"))]): - kspace = tf.identity(kspace) - - # Get calibration region. - calib = array_ops.central_crop(kspace, calib_size + [-1]) - - # Construct the calibration block Hankel matrix. - conv_size = [cs - ks + 1 for cs, ks in zip(calib_size, kernel_size)] - calib_matrix = tf.zeros([_prod(conv_size), _prod(kernel_size) * num_coils], - dtype=calib.dtype) - idx = 0 - for nd_inds in np.ndindex(*conv_size): - slices = [slice(ii, ii + ks) for ii, ks in zip(nd_inds, kernel_size)] - calib_matrix = tf.tensor_scatter_nd_update( - calib_matrix, [[idx]], tf.reshape(calib[slices], [1, -1])) - idx += 1 - - # Compute SVD decomposition, threshold singular values and reshape V to create - # k-space kernel matrix. - s, _, v = tf.linalg.svd(calib_matrix, full_matrices=True) - num_values = tf.math.count_nonzero(s >= s[0] * null_threshold) - v = v[:, :num_values] - kernel = tf.reshape(v, kernel_size + [num_coils, -1]) - - # Rotate kernel to order by maximum variance. - perm = list(range(kernel.shape.rank)) - perm[-2], perm[-1] = perm[-1], perm[-2] - kernel = tf.transpose(kernel, perm) - kernel = tf.reshape(kernel, [-1, num_coils]) - _, _, rot_matrix = tf.linalg.svd(kernel, full_matrices=False) - kernel = tf.linalg.matmul(kernel, rot_matrix) - kernel = tf.reshape(kernel, kernel_size + [-1, num_coils]) - kernel = tf.transpose(kernel, perm) - - # Compute inverse FFT of k-space kernel. - kernel = tf.reverse(kernel, spatial_axes) - kernel = tf.math.conj(kernel) - - kernel_image = fft_ops.fftn(kernel, - shape=image_shape, - axes=list(range(rank)), - shift=True) - - kernel_image /= tf.cast(tf.sqrt(tf.cast(tf.math.reduce_prod(kernel_size), - kernel_image.dtype.real_dtype)), - kernel_image.dtype) - - values, maps, _ = tf.linalg.svd(kernel_image, full_matrices=False) - - # Apply phase modulation. - maps *= tf.math.exp(tf.complex(tf.constant(0.0, dtype=maps.dtype.real_dtype), - -tf.math.angle(maps[..., 0:1, :]))) - - # Undo rotation. - maps = tf.linalg.matmul(rot_matrix, maps) - - # Keep only the requested number of maps. - values = values[..., :num_maps] - maps = maps[..., :num_maps] - - # Apply thresholding. - mask = tf.expand_dims(values >= eigen_threshold, -2) - maps *= tf.cast(mask, maps.dtype) - - # If possible, set static number of maps. - if isinstance(num_maps, int): - maps_shape = maps.shape.as_list() - maps_shape[-1] = num_maps - maps = tf.ensure_shape(maps, maps_shape) - - return maps - - -@api_util.export("coils.compress_coils") -def compress_coils(kspace, - coil_axis=-1, - out_coils=None, - method='svd', - **kwargs): - """Coil compression gateway. - - This function estimates a coil compression matrix and uses it to compress - `kspace`. If you would like to reuse a coil compression matrix or need to - calibrate the compression using different data, use - `tfmri.coils.CoilCompressorSVD`. - - This function supports the following coil compression methods: - - * **SVD**: Based on direct singular-value decomposition (SVD) of *k*-space - data [1]_. This coil compression method supports Cartesian and - non-Cartesian data. This method is resilient to noise, but does not - achieve optimal compression if there are fully-sampled dimensions. - - .. * **Geometric**: Performs local compression along fully-sampled dimensions - .. to improve compression. This method only supports Cartesian data. This - .. method can suffer from low SNR in sections of k-space. - .. * **ESPIRiT**: Performs local compression along fully-sampled dimensions - .. and is robust to noise. This method only supports Cartesian data. - - Args: - kspace: A `Tensor`. The multi-coil *k*-space data. Must have type - `complex64` or `complex128`. Must have shape `[..., Cin]`, where `...` are - the encoding dimensions and `Cin` is the number of coils. Alternatively, - the position of the coil axis may be different as long as the `coil_axis` - argument is set accordingly. If `method` is `"svd"`, `kspace` can be - Cartesian or non-Cartesian. If `method` is `"geometric"` or `"espirit"`, - `kspace` must be Cartesian. - coil_axis: An `int`. Defaults to -1. - out_coils: An `int`. The desired number of virtual output coils. - method: A `string`. The coil compression algorithm. Must be `"svd"`. - **kwargs: Additional method-specific keyword arguments to be passed to the - coil compressor. - - Returns: - A `Tensor` containing the compressed *k*-space data. Has shape - `[..., Cout]`, where `Cout` is determined based on `out_coils` or - other inputs and `...` are the unmodified encoding dimensions. - - References: - .. [1] Huang, F., Vijayakumar, S., Li, Y., Hertel, S. and Duensing, G.R. - (2008). A software channel compression technique for faster reconstruction - with many channels. Magn Reson Imaging, 26(1): 133-141. - .. [2] Zhang, T., Pauly, J.M., Vasanawala, S.S. and Lustig, M. (2013), Coil - compression for accelerated imaging with Cartesian sampling. Magn - Reson Med, 69: 571-582. https://doi.org/10.1002/mrm.24267 - .. [3] Bahri, D., Uecker, M., & Lustig, M. (2013). ESPIRIT-based coil - compression for cartesian sampling. In Proceedings of the 21st - Annual Meeting of ISMRM, Salt Lake City, Utah, USA (Vol. 47). - """ - # pylint: disable=missing-raises-doc - kspace = tf.convert_to_tensor(kspace) - tf.debugging.assert_rank_at_least(kspace, 2, message=( - f"Argument `kspace` must have rank of at least 2, but got shape: " - f"{kspace.shape}")) - coil_axis = check_util.validate_type(coil_axis, int, name='coil_axis') - method = check_util.validate_enum( - method, {'svd', 'geometric', 'espirit'}, name='method') - - # Calculate the compression matrix, unless one was already provided. - if method == 'svd': - return CoilCompressorSVD(coil_axis=coil_axis, - out_coils=out_coils, - **kwargs).fit_transform(kspace) - - raise NotImplementedError(f"Method {method} not implemented.") - - -class _CoilCompressor(): - """Base class for coil compressors. - - Args: - coil_axis: An `int`. The axis of the coil dimension. - out_coils: An `int`. The desired number of virtual output coils. - """ - def __init__(self, coil_axis=-1, out_coils=None): - self._coil_axis = coil_axis - self._out_coils = out_coils - - @abc.abstractmethod - def fit(self, kspace): - pass - - @abc.abstractmethod - def transform(self, kspace): - pass - - def fit_transform(self, kspace): - return self.fit(kspace).transform(kspace) - - -@api_util.export("coils.CoilCompressorSVD") -class CoilCompressorSVD(_CoilCompressor): - """SVD-based coil compression. - - This class implements the SVD-based coil compression method [1]_. - - Use this class to compress multi-coil *k*-space data. The method `fit` must - be used first to calculate the coil compression matrix. The method `transform` - can then be used to compress *k*-space data. If the data to be used for - fitting is the same data to be transformed, you can also use the method - `fit_transform` to fit and transform the data in one step. - - Args: - coil_axis: An `int`. Defaults to -1. - out_coils: An `int`. The desired number of virtual output coils. Cannot be - used together with `variance_ratio`. - variance_ratio: A `float` between 0.0 and 1.0. The percentage of total - variance to be retained. The number of virtual coils is automatically - selected to retain at least this percentage of variance. Cannot be used - together with `out_coils`. - - References: - .. [1] Huang, F., Vijayakumar, S., Li, Y., Hertel, S. and Duensing, G.R. - (2008). A software channel compression technique for faster reconstruction - with many channels. Magn Reson Imaging, 26(1): 133-141. - """ - def __init__(self, coil_axis=-1, out_coils=None, variance_ratio=None): - if out_coils is not None and variance_ratio is not None: - raise ValueError("Cannot specify both `out_coils` and `variance_ratio`.") - super().__init__(coil_axis=coil_axis, out_coils=out_coils) - self._variance_ratio = variance_ratio - self._singular_values = None - self._explained_variance = None - self._explained_variance_ratio = None - - def fit(self, kspace): - """Fits the coil compression matrix. - - Args: - kspace: A `Tensor`. The multi-coil *k*-space data. Must have type - `complex64` or `complex128`. - - Returns: - The fitted `CoilCompressorSVD` object. - """ - kspace = tf.convert_to_tensor(kspace) - - # Move coil axis to innermost dimension if not already there. - kspace, _ = self._permute_coil_axis(kspace) - - # Flatten the encoding dimensions. - num_coils = tf.shape(kspace)[-1] - kspace = tf.reshape(kspace, [-1, num_coils]) - num_samples = tf.shape(kspace)[0] - - # Compute singular-value decomposition. - s, u, v = tf.linalg.svd(kspace) - - # Compresion matrix. - self._matrix = tf.cond(num_samples > num_coils, lambda: v, lambda: u) - - # Get variance. - self._singular_values = s - self._explained_variance = s ** 2 / tf.cast(num_samples - 1, s.dtype) - total_variance = tf.math.reduce_sum(self._explained_variance) - self._explained_variance_ratio = self._explained_variance / total_variance - - # Get output coils from variance ratio. - if self._variance_ratio is not None: - cum_variance = tf.math.cumsum(self._explained_variance_ratio, axis=0) - self._out_coils = tf.math.count_nonzero( - cum_variance <= self._variance_ratio) - - # Remove unnecessary virtual coils. - if self._out_coils is not None: - self._matrix = self._matrix[:, :self._out_coils] - - # If possible, set static number of output coils. - if isinstance(self._out_coils, int): - self._matrix = tf.ensure_shape(self._matrix, [None, self._out_coils]) - - return self - - def transform(self, kspace): - """Applies the coil compression matrix to the input *k*-space. - - Args: - kspace: A `Tensor`. The multi-coil *k*-space data. Must have type - `complex64` or `complex128`. - - Returns: - The transformed k-space. - """ - kspace = tf.convert_to_tensor(kspace) - kspace, inv_perm = self._permute_coil_axis(kspace) - - # Some info. - encoding_dimensions = tf.shape(kspace)[:-1] - num_coils = tf.shape(kspace)[-1] - out_coils = tf.shape(self._matrix)[-1] - - # Flatten the encoding dimensions. - kspace = tf.reshape(kspace, [-1, num_coils]) - - # Apply compression. - kspace = tf.linalg.matmul(kspace, self._matrix) - - # Restore data shape. - kspace = tf.reshape( - kspace, - tf.concat([encoding_dimensions, [out_coils]], 0)) - - if inv_perm is not None: - kspace = tf.transpose(kspace, inv_perm) - - return kspace - - def _permute_coil_axis(self, kspace): - """Permutes the coil axis to the last dimension. - - Args: - kspace: A `Tensor`. The multi-coil *k*-space data. - - Returns: - A tuple of the permuted k-space and the inverse permutation. - """ - if self._coil_axis != -1: - rank = kspace.shape.rank # Rank must be known statically. - canonical_coil_axis = ( - self._coil_axis + rank if self._coil_axis < 0 else self._coil_axis) - perm = ( - [ax for ax in range(rank) if not ax == canonical_coil_axis] + - [canonical_coil_axis]) - kspace = tf.transpose(kspace, perm) - inv_perm = tf.math.invert_permutation(perm) - return kspace, inv_perm - return kspace, None - - @property - def singular_values(self): - """The singular values associated with each virtual coil.""" - return self._singular_values - - @property - def explained_variance(self): - """The variance explained by each virtual coil.""" - return self._explained_variance - - @property - def explained_variance_ratio(self): - """The percentage of variance explained by each virtual coil.""" - return self._explained_variance_ratio - - -def _apply_uniform_filter(tensor, size=5): - """Apply a uniform filter. - - Args: - tensor: A `Tensor`. Must have shape `spatial_shape + [channels]`. - size: An `int`. The size of the filter. Defaults to 5. - - Returns: - A `Tensor`. Has the same type as `tensor`. - """ - rank = tensor.shape.rank - 1 - - # Compute filters. - if isinstance(size, int): - size = [size] * rank - filters_shape = size + [1, 1] - filters = tf.ones(filters_shape, dtype=tensor.dtype.real_dtype) - filters /= _prod(size) - - # Select appropriate convolution function. - conv_nd = { - 1: tf.nn.conv1d, - 2: tf.nn.conv2d, - 3: tf.nn.conv3d}[rank] - - # Move channels dimension to batch dimension. - tensor = tf.transpose(tensor) - - # Add a channels dimension, as required by `tf.nn.conv*` functions. - tensor = tf.expand_dims(tensor, -1) - - if tensor.dtype.is_complex: - # For complex input, we filter the real and imaginary parts separately. - tensor_real = tf.math.real(tensor) - tensor_imag = tf.math.imag(tensor) - - output_real = conv_nd(tensor_real, filters, [1] * (rank + 2), 'SAME') - output_imag = conv_nd(tensor_imag, filters, [1] * (rank + 2), 'SAME') - - output = tf.dtypes.complex(output_real, output_imag) - else: - output = conv_nd(tensor, filters, [1] * (rank + 2), 'SAME') - - # Remove channels dimension. - output = output[..., 0] - - # Move channels dimension back to last dimension. - output = tf.transpose(output) - - return output - - -_prod = lambda iterable: functools.reduce(lambda x, y: x * y, iterable) diff --git a/tensorflow_mri/python/ops/coil_ops_test.py b/tensorflow_mri/python/ops/coil_ops_test.py deleted file mode 100755 index 7a37c8b7..00000000 --- a/tensorflow_mri/python/ops/coil_ops_test.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for module `coil_ops`.""" - -import itertools - -from absl.testing import parameterized -import tensorflow as tf - -from tensorflow_mri.python.ops import coil_ops -from tensorflow_mri.python.ops import image_ops -from tensorflow_mri.python.util import io_util -from tensorflow_mri.python.util import test_util - -# Many tests on this file have high tolerance for numerical errors, likely due -# to issues with `tf.linalg.svd`. TODO: come up with a better solution. - -class SensMapsTest(test_util.TestCase): - """Tests for ops related to estimation of coil sensitivity maps.""" - - @classmethod - def setUpClass(cls): - - super().setUpClass() - cls.data = io_util.read_hdf5('tests/data/coil_ops_data.h5') - - @test_util.run_in_graph_and_eager_modes - def test_walsh(self): - """Test Walsh's method.""" - # GPU results are close, but about 1-2% of values show deviations up to - # 1e-3. This is probably related to TF issue: - # https://github.com/tensorflow/tensorflow/issues/45756 - # In the meantime, we run these tests on the CPU only. Same applies to all - # other tests in this class. - with tf.device('/cpu:0'): - maps = coil_ops.estimate_coil_sensitivities( - self.data['images'], method='walsh') - - self.assertAllClose(maps, self.data['maps/walsh'], rtol=1e-2, atol=1e-2) - - @test_util.run_in_graph_and_eager_modes - def test_walsh_transposed(self): - """Test Walsh's method with a transposed array.""" - with tf.device('/cpu:0'): - maps = coil_ops.estimate_coil_sensitivities( - tf.transpose(self.data['images'], [2, 0, 1]), - coil_axis=0, method='walsh') - - self.assertAllClose(maps, tf.transpose(self.data['maps/walsh'], [2, 0, 1]), - rtol=1e-2, atol=1e-2) - - @test_util.run_in_graph_and_eager_modes - def test_inati(self): - """Test Inati's method.""" - with tf.device('/cpu:0'): - maps = coil_ops.estimate_coil_sensitivities( - self.data['images'], method='inati') - - self.assertAllClose(maps, self.data['maps/inati'], rtol=1e-4, atol=1e-4) - - @test_util.run_in_graph_and_eager_modes - def test_espirit(self): - """Test ESPIRiT method.""" - with tf.device('/cpu:0'): - maps = coil_ops.estimate_coil_sensitivities( - self.data['kspace'], method='espirit') - - self.assertAllClose(maps, self.data['maps/espirit'], rtol=1e-2, atol=1e-2) - - @test_util.run_in_graph_and_eager_modes - def test_espirit_transposed(self): - """Test ESPIRiT method with a transposed array.""" - with tf.device('/cpu:0'): - maps = coil_ops.estimate_coil_sensitivities( - tf.transpose(self.data['kspace'], [2, 0, 1]), - coil_axis=0, method='espirit') - - self.assertAllClose( - maps, tf.transpose(self.data['maps/espirit'], [2, 0, 1, 3]), - rtol=1e-2, atol=1e-2) - - @test_util.run_in_graph_and_eager_modes - def test_walsh_3d(self): - """Test Walsh method with 3D image.""" - with tf.device('/cpu:0'): - image = image_ops.phantom(shape=[64, 64, 64], num_coils=4) - # Currently only testing if it runs. - maps = coil_ops.estimate_coil_sensitivities(image, # pylint: disable=unused-variable - coil_axis=0, - method='walsh') - - -class CoilCombineTest(test_util.TestCase): - """Tests for coil combination op.""" - - @parameterized.product(coil_axis=[0, -1], - keepdims=[True, False]) - @test_util.run_in_graph_and_eager_modes - def test_sos(self, coil_axis, keepdims): # pylint: disable=missing-param-doc - """Test sum of squares combination.""" - - images = self._random_complex((20, 20, 8)) - - combined = coil_ops.combine_coils( - images, coil_axis=coil_axis, keepdims=keepdims) - - ref = tf.math.sqrt( - tf.math.reduce_sum(images * tf.math.conj(images), - axis=coil_axis, keepdims=keepdims)) - - self.assertAllEqual(combined.shape, ref.shape) - self.assertAllClose(combined, ref) - - - @parameterized.product(coil_axis=[0, -1], - keepdims=[True, False]) - @test_util.run_in_graph_and_eager_modes - def test_adaptive(self, coil_axis, keepdims): # pylint: disable=missing-param-doc - """Test adaptive combination.""" - - images = self._random_complex((20, 20, 8)) - maps = self._random_complex((20, 20, 8)) - - combined = coil_ops.combine_coils( - images, maps=maps, coil_axis=coil_axis, keepdims=keepdims) - - ref = tf.math.reduce_sum(images * tf.math.conj(maps), - axis=coil_axis, keepdims=keepdims) - - ref /= tf.math.reduce_sum(maps * tf.math.conj(maps), - axis=coil_axis, keepdims=keepdims) - - self.assertAllEqual(combined.shape, ref.shape) - self.assertAllClose(combined, ref) - - def setUp(self): - super().setUp() - tf.random.set_seed(0) - - def _random_complex(self, shape): - return tf.dtypes.complex( - tf.random.normal(shape), - tf.random.normal(shape)) - - -class CoilCompressionTest(test_util.TestCase): - """Tests for coil compression op.""" - - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.data = io_util.read_hdf5('tests/data/coil_ops_data.h5') - - @test_util.run_in_graph_and_eager_modes - def test_coil_compression_svd(self): - """Test SVD coil compression.""" - kspace = self.data['cc/kspace'] - result = self.data['cc/result/svd'] - - cc_kspace = coil_ops.compress_coils(kspace) - - self.assertAllClose(cc_kspace, result, rtol=1e-2, atol=1e-2) - - @test_util.run_in_graph_and_eager_modes - def test_coil_compression_svd_two_step(self): - """Test SVD coil compression using two-step API.""" - kspace = self.data['cc/kspace'] - result = self.data['cc/result/svd'] - - compressor = coil_ops.CoilCompressorSVD(out_coils=16) - compressor = compressor.fit(kspace) - cc_kspace = compressor.transform(kspace) - self.assertAllClose(cc_kspace, result[..., :16], rtol=1e-2, atol=1e-2) - - @test_util.run_in_graph_and_eager_modes - def test_coil_compression_svd_transposed(self): - """Test SVD coil compression using two-step API.""" - kspace = self.data['cc/kspace'] - result = self.data['cc/result/svd'] - - kspace = tf.transpose(kspace, [2, 0, 1]) - cc_kspace = coil_ops.compress_coils(kspace, coil_axis=0) - cc_kspace = tf.transpose(cc_kspace, [1, 2, 0]) - - self.assertAllClose(cc_kspace, result, rtol=1e-2, atol=1e-2) - - @test_util.run_in_graph_and_eager_modes - def test_coil_compression_svd_basic(self): - """Test coil compression using SVD method with basic arrays.""" - shape = (20, 20, 8) - data = tf.dtypes.complex( - tf.random.stateless_normal(shape, [32, 43]), - tf.random.stateless_normal(shape, [321, 321])) - - params = { - 'out_coils': [None, 4], - 'variance_ratio': [None, 0.75]} - - values = itertools.product(*params.values()) - params = [dict(zip(params.keys(), v)) for v in values] - - for p in params: - with self.subTest(**p): - if p['out_coils'] is not None and p['variance_ratio'] is not None: - with self.assertRaisesRegex( - ValueError, - "Cannot specify both `out_coils` and `variance_ratio`"): - coil_ops.compress_coils(data, **p) - continue - - # Test op. - compressed_data = coil_ops.compress_coils(data, **p) - - # Flatten input data. - encoding_dims = tf.shape(data)[:-1] - input_coils = tf.shape(data)[-1] - data = tf.reshape(data, (-1, tf.shape(data)[-1])) - samples = tf.shape(data)[0] - - # Calculate compression matrix. - # This should be equivalent to TF line below. Not sure why - # not. Giving up. - # u, s, vh = np.linalg.svd(data, full_matrices=False) - # v = vh.T.conj() - s, u, v = tf.linalg.svd(data, full_matrices=False) - matrix = tf.cond(samples > input_coils, lambda v=v: v, lambda u=u: u) - - out_coils = input_coils - if p['variance_ratio'] and not p['out_coils']: - variance = s ** 2 / 399.0 - out_coils = tf.math.count_nonzero( - tf.math.cumsum(variance / tf.math.reduce_sum(variance), axis=0) <= - p['variance_ratio']) - if p['out_coils']: - out_coils = p['out_coils'] - matrix = matrix[:, :out_coils] - - ref_data = tf.matmul(data, matrix) - ref_data = tf.reshape( - ref_data, tf.concat([encoding_dims, [out_coils]], 0)) - - self.assertAllClose(compressed_data, ref_data) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_mri/python/ops/convex_ops.py b/tensorflow_mri/python/ops/convex_ops.py index cd21bdb1..20b11961 100644 --- a/tensorflow_mri/python/ops/convex_ops.py +++ b/tensorflow_mri/python/ops/convex_ops.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,14 +20,16 @@ import numpy as np import tensorflow as tf +from tensorflow_mri.python.linalg import conjugate_gradient +from tensorflow_mri.python.linalg import linear_operator_finite_difference +from tensorflow_mri.python.linalg import linear_operator_wavelet from tensorflow_mri.python.ops import array_ops -from tensorflow_mri.python.util import deprecation -from tensorflow_mri.python.ops import linalg_ops from tensorflow_mri.python.ops import math_ops from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util from tensorflow_mri.python.util import linalg_ext -from tensorflow_mri.python.util import linalg_imaging +from tensorflow_mri.python.linalg import linear_operator +from tensorflow_mri.python.util import deprecation from tensorflow_mri.python.util import tensor_util @@ -36,8 +38,8 @@ class ConvexFunction(): r"""Base class defining a [batch of] convex function[s]. Represents a closed proper convex function - :math:`f : \mathbb{R}^{n}\rightarrow \mathbb{R}` or - :math:`f : \mathbb{C}^{n}\rightarrow \mathbb{R}`. + $f : \mathbb{R}^{n}\rightarrow \mathbb{R}$ or + $f : \mathbb{C}^{n}\rightarrow \mathbb{R}$. Subclasses should implement the `_call` and `_prox` methods to define the forward pass and the proximal mapping, respectively. Gradients are @@ -289,8 +291,8 @@ def _check_input_dtype(self, arg): class ConvexFunctionAffineMappingComposition(ConvexFunction): """Composes a convex function and an affine mapping. - Represents :math:`f(Ax + b)`, where :math:`f` is a `ConvexFunction`, - :math:`A` is a `LinearOperator` and :math:`b` is a constant `Tensor`. + Represents $f(Ax + b)$, where $f$ is a `ConvexFunction`, + $A$ is a `LinearOperator` and $b$ is a constant `Tensor`. Args: function: A `ConvexFunction`. @@ -348,8 +350,8 @@ class ConvexFunctionLinearOperatorComposition( # pylint: disable=abstract-metho ConvexFunctionAffineMappingComposition): r"""Composes a convex function and a linear operator. - Represents :math:`f(Ax)`, where :math:`f` is a `ConvexFunction` and - :math:`A` is a `LinearOperator`. + Represents $f(Ax)$, where $f$ is a `ConvexFunction` and + $A$ is a `LinearOperator`. Args: function: A `ConvexFunction`. @@ -433,7 +435,7 @@ class ConvexFunctionIndicatorL1Ball(ConvexFunctionIndicatorBall): # pylint: dis name: A name for this `ConvexFunction`. References: - .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and + 1. Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and Trends in optimization, 1(3), 127-239. """ def __init__(self, @@ -457,7 +459,7 @@ class ConvexFunctionIndicatorL2Ball(ConvexFunctionIndicatorBall): # pylint: dis name: A name for this `ConvexFunction`. References: - .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and + 1. Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and Trends in optimization, 1(3), 127-239. """ def __init__(self, @@ -483,7 +485,7 @@ class ConvexFunctionNorm(ConvexFunction): # pylint: disable=abstract-method name: A name for this `ConvexFunction`. References: - .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and + 1. Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and Trends in optimization, 1(3), 127-239. """ def __init__(self, @@ -543,7 +545,7 @@ class ConvexFunctionL1Norm(ConvexFunctionNorm): # pylint: disable=abstract-meth name: A name for this `ConvexFunction`. References: - .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and + 1. Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and Trends in optimization, 1(3), 127-239. """ def __init__(self, @@ -567,7 +569,7 @@ class ConvexFunctionL2Norm(ConvexFunctionNorm): # pylint: disable=abstract-meth name: A name for this `ConvexFunction`. References: - .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and + 1. Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and Trends in optimization, 1(3), 127-239. """ def __init__(self, @@ -591,7 +593,7 @@ class ConvexFunctionL2NormSquared(ConvexFunction): # pylint: disable=abstract-m name: A name for this `ConvexFunction`. References: - .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and + 1. Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and Trends in optimization, 1(3), 127-239. """ def __init__(self, @@ -617,15 +619,15 @@ def _prox(self, x, scale=None): class ConvexFunctionTikhonov(ConvexFunctionAffineMappingComposition): # pylint: disable=abstract-method r"""A `ConvexFunction` representing a Tikhonov regularization term. - For a given input :math:`x`, computes - :math:`\lambda \left\| T(x - x_0) \right\|_2^2`, where :math:`\lambda` is a - scaling factor, :math:`T` is any linear operator and :math:`x_0` is + For a given input $x$, computes + $\lambda \left\| T(x - x_0) \right\|_2^2$, where $\lambda$ is a + scaling factor, $T$ is any linear operator and $x_0$ is a prior estimate. Args: - transform: A `tf.linalg.LinearOperator`. The Tikhonov operator :math:`T`. + transform: A `tf.linalg.LinearOperator`. The Tikhonov operator $T$. Defaults to the identity operator. - prior: A `tf.Tensor`. The prior estimate :math:`x_0`. Defaults to 0. + prior: A `tf.Tensor`. The prior estimate $x_0$. Defaults to 0. domain_dimension: A scalar integer `tf.Tensor`. The dimension of the domain. scale: A `float`. The scaling factor. dtype: A `tf.DType`. The dtype of the inputs. Defaults to `float32`. @@ -671,8 +673,8 @@ def prior(self): class ConvexFunctionTotalVariation(ConvexFunctionLinearOperatorComposition): # pylint: disable=abstract-method r"""A `ConvexFunction` representing a total variation regularization term. - For a given input :math:`x`, computes :math:`\lambda \left\| Dx \right\|_1`, - where :math:`\lambda` is a scaling factor and :math:`D` is the finite + For a given input $x$, computes $\lambda \left\| Dx \right\|_1$, + where $\lambda$ is a scaling factor and $D$ is the finite difference operator. Args: @@ -703,8 +705,9 @@ def __init__(self, # `LinearOperatorFiniteDifference` operates along one axis only. So for # multiple axes, we create one operator for each axis and vertically stack # them. - operators = [linalg_ops.LinearOperatorFiniteDifference( - domain_shape, axis=axis, dtype=dtype) for axis in axes] + operators = [ + linear_operator_finite_difference.LinearOperatorFiniteDifference( + domain_shape, axis=axis, dtype=dtype) for axis in axes] operator = linalg_ext.LinearOperatorVerticalStack(operators) function = ConvexFunctionL1Norm( domain_dimension=operator.range_dimension_tensor(), @@ -719,8 +722,8 @@ def __init__(self, class ConvexFunctionL1Wavelet(ConvexFunctionLinearOperatorComposition): # pylint: disable=abstract-method r"""A `ConvexFunction` representing an L1 wavelet regularization term. - For a given input :math:`x`, computes :math:`\lambda \left\| Dx \right\|_1`, - where :math:`\lambda` is a scaling factor and :math:`D` is a wavelet + For a given input $x$, computes $\lambda \left\| Dx \right\|_1$, + where $\lambda$ is a scaling factor and $D$ is a wavelet decomposition operator (see `tfmri.linalg.LinearOperatorWavelet`). Args: @@ -749,12 +752,12 @@ def __init__(self, scale=None, dtype=tf.dtypes.float32, name=None): - operator = linalg_ops.LinearOperatorWavelet(domain_shape, - wavelet, - mode=mode, - level=level, - axes=axes, - dtype=dtype) + operator = linear_operator_wavelet.LinearOperatorWavelet(domain_shape, + wavelet, + mode=mode, + level=level, + axes=axes, + dtype=dtype) function = ConvexFunctionL1Norm( domain_dimension=operator.range_dimension_tensor(), scale=scale, @@ -773,7 +776,7 @@ def _shape_tensor(self): class ConvexFunctionQuadratic(ConvexFunction): # pylint: disable=abstract-method r"""A `ConvexFunction` representing a generic quadratic function. - Represents :math:`f(x) = \frac{1}{2} x^{T} A x + b^{T} x + c`. + Represents $f(x) = \frac{1}{2} x^{T} A x + b^{T} x + c$. Args: quadratic_coefficient: A `tf.Tensor` or a `tf.linalg.LinearOperator` @@ -834,7 +837,8 @@ def _prox(self, x, scale=None, solver_kwargs=None): # pylint: disable=arguments rhs -= self._linear_coefficient solver_kwargs = solver_kwargs or {} - state = linalg_ops.conjugate_gradient(self._operator, rhs, **solver_kwargs) + state = conjugate_gradient.conjugate_gradient( + self._operator, rhs, **solver_kwargs) return state.x @@ -899,26 +903,26 @@ def constant_coefficient(self): class ConvexFunctionLeastSquares(ConvexFunctionQuadratic): # pylint: disable=abstract-method r"""A `ConvexFunction` representing a least squares function. - Represents :math:`f(x) = \frac{1}{2} {\left \| A x - b \right \|}_{2}^{2}`. + Represents $f(x) = \frac{1}{2} {\left \| A x - b \right \|}_{2}^{2}$. Minimizing `f(x)` is equivalent to finding a solution to the linear system - :math:`Ax - b`. + $Ax - b$. Args: operator: A `tf.Tensor` or a `tfmri.linalg.LinearOperator` representing a - matrix :math:`A` with shape `[..., m, n]`. The linear system operator. + matrix $A$ with shape `[..., m, n]`. The linear system operator. rhs: A `Tensor` representing a vector `b` with shape `[..., m]`. The right-hand side of the linear system. gram_operator: A `tf.Tensor` or a `tfmri.linalg.LinearOperator` representing the Gram matrix of `operator`. This may be used to provide a specialized - implementation of the Gram matrix :math:`A^H A`. Defaults to `None`, in + implementation of the Gram matrix $A^H A$. Defaults to `None`, in which case a naive implementation of the Gram matrix is derived from `operator`. scale: A `float`. A scaling factor. Defaults to 1.0. name: A name for this `ConvexFunction`. """ def __init__(self, operator, rhs, gram_operator=None, scale=None, name=None): - if isinstance(operator, linalg_imaging.LinalgImagingMixin): + if isinstance(operator, linear_operator.LinearOperatorMixin): rhs = operator.flatten_range_shape(rhs) if gram_operator: quadratic_coefficient = gram_operator diff --git a/tensorflow_mri/python/ops/convex_ops_test.py b/tensorflow_mri/python/ops/convex_ops_test.py index dbdb99df..9e1f9c3a 100644 --- a/tensorflow_mri/python/ops/convex_ops_test.py +++ b/tensorflow_mri/python/ops/convex_ops_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/ops/fft_ops.py b/tensorflow_mri/python/ops/fft_ops.py index a1ce9371..b30c27b7 100644 --- a/tensorflow_mri/python/ops/fft_ops.py +++ b/tensorflow_mri/python/ops/fft_ops.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,6 +20,13 @@ from tensorflow_mri.python.ops import array_ops from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util +from tensorflow_mri.python.util import sys_util + + +if sys_util.is_op_library_enabled(): + # Load library in order to register the FFT kernels. + _mri_ops = tf.load_op_library( + tf.compat.v1.resource_loader.get_path_to_datafile('_mri_ops.so')) @api_util.export("signal.fft") @@ -30,8 +37,9 @@ def fftn(x, shape=None, axes=None, norm='backward', shift=False): number of axes in an `M`-dimensional array by means of the Fast Fourier Transform (FFT). - .. note:: + ```{note} `N` must be 1, 2 or 3. + ``` Args: x: A `Tensor`. Must be one of the following types: `complex64`, @@ -80,8 +88,9 @@ def ifftn(x, shape=None, axes=None, norm='backward', shift=False): Transform over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). - .. note:: + ```{note} `N` must be 1, 2 or 3. + ``` Args: x: A `Tensor`. Must be one of the following types: `complex64`, diff --git a/tensorflow_mri/python/ops/fft_ops_test.py b/tensorflow_mri/python/ops/fft_ops_test.py index 5e4e7ea2..b78c4a95 100644 --- a/tensorflow_mri/python/ops/fft_ops_test.py +++ b/tensorflow_mri/python/ops/fft_ops_test.py @@ -1,4 +1,19 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,18 +28,617 @@ # limitations under the License. # ============================================================================== """Tests for module `fft_ops`.""" +# pylint: disable=missing-function-docstring,unused-argument,missing-class-docstring,no-else-return import distutils import itertools +import unittest +from absl.testing import parameterized import numpy as np import tensorflow as tf +from tensorflow.core.protobuf import config_pb2 +from tensorflow.python.eager import context +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import gen_spectral_ops +from tensorflow.python.ops import gradient_checker_v2 +from tensorflow.python.ops import math_ops +from tensorflow.python.platform import test from tensorflow_mri.python.ops import fft_ops from tensorflow_mri.python.util import test_util -class FFTOpsTest(test_util.TestCase): +VALID_FFT_RANKS = (1, 2, 3) + + +class BaseFFTOpsTest(test.TestCase): + """Base class for FFT tests.""" + def _compare(self, x, rank, fft_length=None, use_placeholder=False, + rtol=1e-4, atol=1e-4): + self._compare_forward(x, rank, fft_length, use_placeholder, rtol, atol) + self._compare_backward(x, rank, fft_length, use_placeholder, rtol, atol) + + def _compare_forward(self, x, rank, fft_length=None, use_placeholder=False, + rtol=1e-4, atol=1e-4): + x_np = self._np_fft(x, rank, fft_length) + if use_placeholder: + x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype)) + x_tf = self._tf_fft(x_ph, rank, fft_length, feed_dict={x_ph: x}) + else: + x_tf = self._tf_fft(x, rank, fft_length) + + self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol) + + def _compare_backward(self, x, rank, fft_length=None, use_placeholder=False, + rtol=1e-4, atol=1e-4): + x_np = self._np_ifft(x, rank, fft_length) + if use_placeholder: + x_ph = array_ops.placeholder(dtype=dtypes.as_dtype(x.dtype)) + x_tf = self._tf_ifft(x_ph, rank, fft_length, feed_dict={x_ph: x}) + else: + x_tf = self._tf_ifft(x, rank, fft_length) + + self.assertAllClose(x_np, x_tf, rtol=rtol, atol=atol) + + def _check_memory_fail(self, x, rank): + config = config_pb2.ConfigProto() + config.gpu_options.per_process_gpu_memory_fraction = 1e-2 + with self.cached_session(config=config, force_gpu=True): + self._tf_fft(x, rank, fft_length=None) + + def _check_grad_complex(self, func, x, y, result_is_complex=True, + rtol=1e-2, atol=1e-2): + with self.cached_session(): + + def f(inx, iny): + inx.set_shape(x.shape) + iny.set_shape(y.shape) + # func is a forward or inverse, real or complex, batched or unbatched + # FFT function with a complex input. + z = func(math_ops.complex(inx, iny)) + # loss = sum(|z|^2) + loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z))) + return loss + + ((x_jacob_t, y_jacob_t), (x_jacob_n, y_jacob_n)) = ( + gradient_checker_v2.compute_gradient(f, [x, y], delta=1e-2)) + + self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol) + self.assertAllClose(y_jacob_t, y_jacob_n, rtol=rtol, atol=atol) + + def _check_grad_real(self, func, x, rtol=1e-2, atol=1e-2): + def f(inx): + inx.set_shape(x.shape) + # func is a forward RFFT function (batched or unbatched). + z = func(inx) + # loss = sum(|z|^2) + loss = math_ops.reduce_sum(math_ops.real(z * math_ops.conj(z))) + return loss + + (x_jacob_t,), (x_jacob_n,) = gradient_checker_v2.compute_gradient( + f, [x], delta=1e-2) + self.assertAllClose(x_jacob_t, x_jacob_n, rtol=rtol, atol=atol) + + +@test_util.run_all_in_graph_and_eager_modes +class FFTNTest(BaseFFTOpsTest, parameterized.TestCase): + """Tests for `fftn`.""" + def _tf_fft(self, x, rank, fft_length=None, feed_dict=None): + # fft_length unused for complex FFTs. + with self.cached_session() as sess: + return sess.run(self._tf_fft_for_rank(rank)(x), feed_dict=feed_dict) + + def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None): + # fft_length unused for complex FFTs. + with self.cached_session() as sess: + return sess.run(self._tf_ifft_for_rank(rank)(x), feed_dict=feed_dict) + + def _np_fft(self, x, rank, fft_length=None): + if rank == 1: + return np.fft.fft2(x, s=fft_length, axes=(-1,)) + elif rank == 2: + return np.fft.fft2(x, s=fft_length, axes=(-2, -1)) + elif rank == 3: + return np.fft.fft2(x, s=fft_length, axes=(-3, -2, -1)) + else: + raise ValueError("invalid rank") + + def _np_ifft(self, x, rank, fft_length=None): + if rank == 1: + return np.fft.ifft2(x, s=fft_length, axes=(-1,)) + elif rank == 2: + return np.fft.ifft2(x, s=fft_length, axes=(-2, -1)) + elif rank == 3: + return np.fft.ifft2(x, s=fft_length, axes=(-3, -2, -1)) + else: + raise ValueError("invalid rank") + + def _tf_fft_for_rank(self, rank): + if rank == 1: + return tf.signal.fft + elif rank == 2: + return tf.signal.fft2d + elif rank == 3: + return tf.signal.fft3d + else: + raise ValueError("invalid rank") + + def _tf_ifft_for_rank(self, rank): + if rank == 1: + return tf.signal.ifft + elif rank == 2: + return tf.signal.ifft2d + elif rank == 3: + return tf.signal.ifft3d + else: + raise ValueError("invalid rank") + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(3), (np.complex64, np.complex128))) + def test_empty(self, rank, extra_dims, np_type): + dims = rank + extra_dims + x = np.zeros((0,) * dims).astype(np_type) + self.assertEqual(x.shape, self._tf_fft(x, rank).shape) + self.assertEqual(x.shape, self._tf_ifft(x, rank).shape) + + @parameterized.parameters( + itertools.product(VALID_FFT_RANKS, range(3), + (np.complex64, np.complex128))) + def test_basic(self, rank, extra_dims, np_type): + dims = rank + extra_dims + tol = 1e-4 if np_type == np.complex64 else 1e-8 + self._compare( + np.mod(np.arange(np.power(4, dims)), 10).reshape( + (4,) * dims).astype(np_type), rank, rtol=tol, atol=tol) + + @parameterized.parameters(itertools.product( + (1,), range(3), (np.complex64, np.complex128))) + def test_large_batch(self, rank, extra_dims, np_type): + dims = rank + extra_dims + tol = 1e-4 if np_type == np.complex64 else 5e-5 + self._compare( + np.mod(np.arange(np.power(128, dims)), 10).reshape( + (128,) * dims).astype(np_type), rank, rtol=tol, atol=tol) + + # TODO(yangzihao): Disable before we can figure out a way to + # properly test memory fail for large batch fft. + # def test_large_batch_memory_fail(self): + # if test.is_gpu_available(cuda_only=True): + # rank = 1 + # for dims in range(rank, rank + 3): + # self._check_memory_fail( + # np.mod(np.arange(np.power(128, dims)), 64).reshape( + # (128,) * dims).astype(np.complex64), rank) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(3), (np.complex64, np.complex128))) + def test_placeholder(self, rank, extra_dims, np_type): + if context.executing_eagerly(): + return + tol = 1e-4 if np_type == np.complex64 else 1e-8 + dims = rank + extra_dims + self._compare( + np.mod(np.arange(np.power(4, dims)), 10).reshape( + (4,) * dims).astype(np_type), + rank, use_placeholder=True, rtol=tol, atol=tol) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(3), (np.complex64, np.complex128))) + def test_random(self, rank, extra_dims, np_type): + tol = 1e-4 if np_type == np.complex64 else 5e-6 + dims = rank + extra_dims + def gen(shape): + n = np.prod(shape) + re = np.random.uniform(size=n) + im = np.random.uniform(size=n) + return (re + im * 1j).reshape(shape) + + self._compare(gen((4,) * dims).astype(np_type), rank, + rtol=tol, atol=tol) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, + # Check a variety of sizes (power-of-2, odd, etc.) + [128, 256, 512, 1024, 127, 255, 511, 1023], + (np.complex64, np.complex128))) + def test_random_1d(self, rank, dim, np_type): + has_gpu = test.is_gpu_available(cuda_only=True) + tol = {(np.complex64, True): 1e-4, + (np.complex64, False): 1e-2, + (np.complex128, True): 1e-4, + (np.complex128, False): 1e-2}[(np_type, has_gpu)] + def gen(shape): + n = np.prod(shape) + re = np.random.uniform(size=n) + im = np.random.uniform(size=n) + return (re + im * 1j).reshape(shape) + + self._compare(gen((dim,)).astype(np_type), 1, rtol=tol, atol=tol) + + def test_error(self): + # TODO(rjryan): Fix this test under Eager. + if context.executing_eagerly(): + return + for rank in VALID_FFT_RANKS: + for dims in range(0, rank): + x = np.zeros((1,) * dims).astype(np.complex64) + with self.assertRaisesWithPredicateMatch( + ValueError, "Shape must be .*rank {}.*".format(rank)): + self._tf_fft(x, rank) + with self.assertRaisesWithPredicateMatch( + ValueError, "Shape must be .*rank {}.*".format(rank)): + self._tf_ifft(x, rank) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(2), (np.float32, np.float64))) + def test_grad_simple(self, rank, extra_dims, np_type): + tol = 1e-4 if np_type == np.float32 else 1e-10 + dims = rank + extra_dims + re = np.ones(shape=(4,) * dims, dtype=np_type) / 10.0 + im = np.zeros(shape=(4,) * dims, dtype=np_type) + self._check_grad_complex(self._tf_fft_for_rank(rank), re, im, + rtol=tol, atol=tol) + self._check_grad_complex(self._tf_ifft_for_rank(rank), re, im, + rtol=tol, atol=tol) + + @unittest.skip("16.86% flaky") + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(2), (np.float32, np.float64))) + def test_grad_random(self, rank, extra_dims, np_type): + dims = rank + extra_dims + tol = 1e-2 if np_type == np.float32 else 1e-10 + re = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1 + im = np.random.rand(*((3,) * dims)).astype(np_type) * 2 - 1 + self._check_grad_complex(self._tf_fft_for_rank(rank), re, im, + rtol=tol, atol=tol) + self._check_grad_complex(self._tf_ifft_for_rank(rank), re, im, + rtol=tol, atol=tol) + + +@test_util.run_all_in_graph_and_eager_modes +# @test_util.disable_xla("b/155276727") +class RFFTOpsTest(BaseFFTOpsTest, parameterized.TestCase): + + def _tf_fft(self, x, rank, fft_length=None, feed_dict=None): + with self.cached_session() as sess: + return sess.run( + self._tf_fft_for_rank(rank)(x, fft_length), feed_dict=feed_dict) + + def _tf_ifft(self, x, rank, fft_length=None, feed_dict=None): + with self.cached_session() as sess: + return sess.run( + self._tf_ifft_for_rank(rank)(x, fft_length), feed_dict=feed_dict) + + def _np_fft(self, x, rank, fft_length=None): + if rank == 1: + return np.fft.rfft2(x, s=fft_length, axes=(-1,)) + elif rank == 2: + return np.fft.rfft2(x, s=fft_length, axes=(-2, -1)) + elif rank == 3: + return np.fft.rfft2(x, s=fft_length, axes=(-3, -2, -1)) + else: + raise ValueError("invalid rank") + + def _np_ifft(self, x, rank, fft_length=None): + if rank == 1: + return np.fft.irfft2(x, s=fft_length, axes=(-1,)) + elif rank == 2: + return np.fft.irfft2(x, s=fft_length, axes=(-2, -1)) + elif rank == 3: + return np.fft.irfft2(x, s=fft_length, axes=(-3, -2, -1)) + else: + raise ValueError("invalid rank") + + def _tf_fft_for_rank(self, rank): + if rank == 1: + return tf.signal.rfft + elif rank == 2: + return tf.signal.rfft2d + elif rank == 3: + return tf.signal.rfft3d + else: + raise ValueError("invalid rank") + + def _tf_ifft_for_rank(self, rank): + if rank == 1: + return tf.signal.irfft + elif rank == 2: + return tf.signal.irfft2d + elif rank == 3: + return tf.signal.irfft3d + else: + raise ValueError("invalid rank") + + # rocFFT requires/assumes that the input to the irfft transform + # is of the form that is a valid output from the rfft transform + # (i.e. it cannot be a set of random numbers) + # So for ROCm, call rfft and use its output as the input for testing irfft + def _generate_valid_irfft_input(self, c2r, np_ctype, r2c, np_rtype, rank, + fft_length): + if test.is_built_with_rocm(): + return self._np_fft(r2c.astype(np_rtype), rank, fft_length) + else: + return c2r.astype(np_ctype) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(3), (np.float32, np.float64))) + + def test_empty(self, rank, extra_dims, np_rtype): + np_ctype = np.complex64 if np_rtype == np.float32 else np.complex128 + dims = rank + extra_dims + x = np.zeros((0,) * dims).astype(np_rtype) + self.assertEqual(x.shape, self._tf_fft(x, rank).shape) + x = np.zeros((0,) * dims).astype(np_ctype) + self.assertEqual(x.shape, self._tf_ifft(x, rank).shape) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(3), (5, 6), (np.float32, np.float64))) + def test_basic(self, rank, extra_dims, size, np_rtype): + np_ctype = np.complex64 if np_rtype == np.float32 else np.complex128 + tol = 1e-4 if np_rtype == np.float32 else 5e-5 + dims = rank + extra_dims + inner_dim = size // 2 + 1 + r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape( + (size,) * dims) + fft_length = (size,) * rank + self._compare_forward( + r2c.astype(np_rtype), rank, fft_length, rtol=tol, atol=tol) + c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim), + 10).reshape((size,) * (dims - 1) + (inner_dim,)) + c2r = self._generate_valid_irfft_input(c2r, np_ctype, r2c, np_rtype, rank, + fft_length) + self._compare_backward(c2r, rank, fft_length, rtol=tol, atol=tol) + + @parameterized.parameters(itertools.product( + (1,), range(3), (64, 128), (np.float32, np.float64))) + def test_large_batch(self, rank, extra_dims, size, np_rtype): + np_ctype = np.complex64 if np_rtype == np.float32 else np.complex128 + tol = 1e-4 if np_rtype == np.float32 else 1e-5 + dims = rank + extra_dims + inner_dim = size // 2 + 1 + r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape( + (size,) * dims) + fft_length = (size,) * rank + self._compare_forward( + r2c.astype(np_rtype), rank, fft_length, rtol=tol, atol=tol) + c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim), + 10).reshape((size,) * (dims - 1) + (inner_dim,)) + c2r = self._generate_valid_irfft_input(c2r, np_ctype, r2c, np_rtype, rank, + fft_length) + self._compare_backward(c2r, rank, fft_length, rtol=tol, atol=tol) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(3), (5, 6), (np.float32, np.float64))) + def test_placeholder(self, rank, extra_dims, size, np_rtype): + if context.executing_eagerly(): + return + np_ctype = np.complex64 if np_rtype == np.float32 else np.complex128 + tol = 1e-4 if np_rtype == np.float32 else 1e-8 + dims = rank + extra_dims + inner_dim = size // 2 + 1 + r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape( + (size,) * dims) + fft_length = (size,) * rank + self._compare_forward( + r2c.astype(np_rtype), + rank, + fft_length, + use_placeholder=True, + rtol=tol, + atol=tol) + c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim), + 10).reshape((size,) * (dims - 1) + (inner_dim,)) + c2r = self._generate_valid_irfft_input(c2r, np_ctype, r2c, np_rtype, rank, + fft_length) + self._compare_backward( + c2r, rank, fft_length, use_placeholder=True, rtol=tol, atol=tol) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(3), (5, 6), (np.float32, np.float64))) + def test_fft_lenth_truncate(self, rank, extra_dims, size, np_rtype): + """Test truncation (FFT size < dimensions).""" + if test.is_built_with_rocm() and (rank == 3): + # TODO(rocm): fix me + # rfft fails for rank == 3 on ROCm + self.skipTest("Test fails on ROCm...fix me") + np_ctype = np.complex64 if np_rtype == np.float32 else np.complex128 + tol = 1e-4 if np_rtype == np.float32 else 8e-5 + dims = rank + extra_dims + inner_dim = size // 2 + 1 + r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape( + (size,) * dims) + c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim), + 10).reshape((size,) * (dims - 1) + (inner_dim,)) + fft_length = (size - 2,) * rank + self._compare_forward(r2c.astype(np_rtype), rank, fft_length, + rtol=tol, atol=tol) + c2r = self._generate_valid_irfft_input(c2r, np_ctype, r2c, np_rtype, rank, + fft_length) + self._compare_backward(c2r, rank, fft_length, rtol=tol, atol=tol) + # Confirm it works with unknown shapes as well. + if not context.executing_eagerly(): + self._compare_forward( + r2c.astype(np_rtype), + rank, + fft_length, + use_placeholder=True, + rtol=tol, atol=tol) + self._compare_backward( + c2r, rank, fft_length, use_placeholder=True, rtol=tol, atol=tol) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(3), (5, 6), (np.float32, np.float64))) + def test_fft_lenth_pad(self, rank, extra_dims, size, np_rtype): + """Test padding (FFT size > dimensions).""" + np_ctype = np.complex64 if np_rtype == np.float32 else np.complex128 + tol = 1e-4 if np_rtype == np.float32 else 8e-5 + dims = rank + extra_dims + inner_dim = size // 2 + 1 + r2c = np.mod(np.arange(np.power(size, dims)), 10).reshape( + (size,) * dims) + c2r = np.mod(np.arange(np.power(size, dims - 1) * inner_dim), + 10).reshape((size,) * (dims - 1) + (inner_dim,)) + fft_length = (size + 2,) * rank + self._compare_forward(r2c.astype(np_rtype), rank, fft_length, + rtol=tol, atol=tol) + c2r = self._generate_valid_irfft_input(c2r, np_ctype, r2c, np_rtype, rank, + fft_length) + self._compare_backward(c2r.astype(np_ctype), rank, fft_length, + rtol=tol, atol=tol) + # Confirm it works with unknown shapes as well. + if not context.executing_eagerly(): + self._compare_forward( + r2c.astype(np_rtype), + rank, + fft_length, + use_placeholder=True, + rtol=tol, atol=tol) + self._compare_backward( + c2r.astype(np_ctype), + rank, + fft_length, + use_placeholder=True, + rtol=tol, atol=tol) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(3), (5, 6), (np.float32, np.float64))) + def test_random(self, rank, extra_dims, size, np_rtype): + def gen_real(shape): + n = np.prod(shape) + re = np.random.uniform(size=n) + ret = re.reshape(shape) + return ret + + def gen_complex(shape): + n = np.prod(shape) + re = np.random.uniform(size=n) + im = np.random.uniform(size=n) + ret = (re + im * 1j).reshape(shape) + return ret + np_ctype = np.complex64 if np_rtype == np.float32 else np.complex128 + tol = 1e-4 if np_rtype == np.float32 else 1e-5 + dims = rank + extra_dims + r2c = gen_real((size,) * dims) + inner_dim = size // 2 + 1 + fft_length = (size,) * rank + self._compare_forward( + r2c.astype(np_rtype), rank, fft_length, rtol=tol, atol=tol) + complex_dims = (size,) * (dims - 1) + (inner_dim,) + c2r = gen_complex(complex_dims) + c2r = self._generate_valid_irfft_input(c2r, np_ctype, r2c, np_rtype, rank, + fft_length) + self._compare_backward(c2r, rank, fft_length, rtol=tol, atol=tol) + + def test_error(self): + # TODO(rjryan): Fix this test under Eager. + if context.executing_eagerly(): + return + for rank in VALID_FFT_RANKS: + for dims in range(0, rank): + x = np.zeros((1,) * dims).astype(np.complex64) + with self.assertRaisesWithPredicateMatch( + ValueError, "Shape .* must have rank at least {}".format(rank)): + self._tf_fft(x, rank) + with self.assertRaisesWithPredicateMatch( + ValueError, "Shape .* must have rank at least {}".format(rank)): + self._tf_ifft(x, rank) + for dims in range(rank, rank + 2): + x = np.zeros((1,) * rank) + + # Test non-rank-1 fft_length produces an error. + fft_length = np.zeros((1, 1)).astype(np.int32) + with self.assertRaisesWithPredicateMatch(ValueError, + "Shape .* must have rank 1"): + self._tf_fft(x, rank, fft_length) + with self.assertRaisesWithPredicateMatch(ValueError, + "Shape .* must have rank 1"): + self._tf_ifft(x, rank, fft_length) + + # Test wrong fft_length length. + fft_length = np.zeros((rank + 1,)).astype(np.int32) + with self.assertRaisesWithPredicateMatch( + ValueError, "Dimension must be .*but is {}.*".format(rank + 1)): + self._tf_fft(x, rank, fft_length) + with self.assertRaisesWithPredicateMatch( + ValueError, "Dimension must be .*but is {}.*".format(rank + 1)): + self._tf_ifft(x, rank, fft_length) + + # Test that calling the kernel directly without padding to fft_length + # produces an error. + rffts_for_rank = { + 1: [gen_spectral_ops.rfft, gen_spectral_ops.irfft], + 2: [gen_spectral_ops.rfft2d, gen_spectral_ops.irfft2d], + 3: [gen_spectral_ops.rfft3d, gen_spectral_ops.irfft3d] + } + rfft_fn, irfft_fn = rffts_for_rank[rank] + with self.assertRaisesWithPredicateMatch( + errors.InvalidArgumentError, + "Input dimension .* must have length of at least 6 but got: 5"): + x = np.zeros((5,) * rank).astype(np.float32) + fft_length = [6] * rank + with self.cached_session(): + self.evaluate(rfft_fn(x, fft_length)) + + with self.assertRaisesWithPredicateMatch( + errors.InvalidArgumentError, + "Input dimension .* must have length of at least .* but got: 3"): + x = np.zeros((3,) * rank).astype(np.complex64) + fft_length = [6] * rank + with self.cached_session(): + self.evaluate(irfft_fn(x, fft_length)) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(2), (5, 6), (np.float32, np.float64))) + def test_grad_simple(self, rank, extra_dims, size, np_rtype): + # rfft3d/irfft3d do not have gradients yet. + if rank == 3: + return + dims = rank + extra_dims + tol = 1e-3 if np_rtype == np.float32 else 1e-10 + re = np.ones(shape=(size,) * dims, dtype=np_rtype) + im = -np.ones(shape=(size,) * dims, dtype=np_rtype) + self._check_grad_real(self._tf_fft_for_rank(rank), re, + rtol=tol, atol=tol) + if test.is_built_with_rocm(): + # Fails on ROCm because of irfft peculairity + return + self._check_grad_complex( + self._tf_ifft_for_rank(rank), re, im, result_is_complex=False, + rtol=tol, atol=tol) + + @parameterized.parameters(itertools.product( + VALID_FFT_RANKS, range(2), (5, 6), (np.float32, np.float64))) + def test_grad_random(self, rank, extra_dims, size, np_rtype): + # rfft3d/irfft3d do not have gradients yet. + if rank == 3: + return + dims = rank + extra_dims + tol = 1e-2 if np_rtype == np.float32 else 1e-10 + re = np.random.rand(*((size,) * dims)).astype(np_rtype) * 2 - 1 + im = np.random.rand(*((size,) * dims)).astype(np_rtype) * 2 - 1 + self._check_grad_real(self._tf_fft_for_rank(rank), re, + rtol=tol, atol=tol) + if test.is_built_with_rocm(): + # Fails on ROCm because of irfft peculairity + return + self._check_grad_complex( + self._tf_ifft_for_rank(rank), re, im, result_is_complex=False, + rtol=tol, atol=tol) + + def test_invalid_args(self): + # Test case for GitHub issue 55263 + a = np.empty([6, 0]) + b = np.array([1, -1]) + with self.assertRaisesRegex(errors.InvalidArgumentError, "must >= 0"): + with self.session(): + v = tf.signal.rfft2d(input_tensor=a, fft_length=b) + self.evaluate(v) + + +class FFTNTest(test_util.TestCase): """Tests for FFT ops.""" # pylint: disable=missing-function-docstring diff --git a/tensorflow_mri/python/ops/geom_ops.py b/tensorflow_mri/python/ops/geom_ops.py deleted file mode 100644 index 213dc164..00000000 --- a/tensorflow_mri/python/ops/geom_ops.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Geometry operations.""" - -import tensorflow as tf - -from tensorflow_graphics.geometry.transformation import rotation_matrix_2d -from tensorflow_graphics.geometry.transformation import rotation_matrix_3d - - -def rotate_2d(points, euler): - """Rotates an array of 2D coordinates. - - Args: - points: A `Tensor` of shape `[A1, A2, ..., An, 2]`, where the last dimension - represents a 2D point. - euler: A `Tensor` of shape `[A1, A2, ..., An, 1]`, where the last dimension - represents an angle in radians. - - Returns: - A `Tensor` of shape `[A1, A2, ..., An, 2]`, where the last dimension - represents a 2D point. - """ - return rotation_matrix_2d.rotate( - points, rotation_matrix_2d.from_euler(euler)) - - -def rotate_3d(points, euler): - """Rotates an array of 3D coordinates. - - Args: - points: A `Tensor` of shape `[A1, A2, ..., An, 3]`, where the last dimension - represents a 3D point. - euler: A `Tensor` of shape `[A1, A2, ..., An, 3]`, where the last dimension - represents the three Euler angles. - - Returns: - A `Tensor` of shape `[A1, A2, ..., An, 3]`, where the last dimension - represents a 3D point. - """ - return rotation_matrix_3d.rotate( - points, rotation_matrix_3d.from_euler(euler)) - - -def euler_to_rotation_matrix_3d(angles, order='XYZ', name='rotation_matrix_3d'): - r"""Convert an Euler angle representation to a rotation matrix. - - The resulting matrix is $$\mathbf{R} = \mathbf{R}_z\mathbf{R}_y\mathbf{R}_x$$. - - .. note:: - In the following, A1 to An are optional batch dimensions. - - Args: - angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension - represents the three Euler angles. `[A1, ..., An, 0]` is the angle about - `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and - `[A1, ..., An, 2]` is the angle about `z` in radians. - order: A `str`. The order in which the rotations are applied. Defaults to - `"XYZ"`. - name: A name for this op that defaults to "rotation_matrix_3d_from_euler". - - Returns: - A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions - represent a 3d rotation matrix. - - Raises: - ValueError: If the shape of `angles` is not supported. - """ - with tf.name_scope(name): - angles = tf.convert_to_tensor(value=angles) - - if angles.shape[-1] != 3: - raise ValueError(f"The last dimension of `angles` must have size 3, " - f"but got shape: {angles.shape}") - - sin_angles = tf.math.sin(angles) - cos_angles = tf.math.cos(angles) - return _build_matrix_from_sines_and_cosines( - sin_angles, cos_angles, order=order) - - -def _build_matrix_from_sines_and_cosines(sin_angles, cos_angles, order='XYZ'): - """Builds a rotation matrix from sines and cosines of Euler angles. - - .. note:: - In the following, A1 to An are optional batch dimensions. - - Args: - sin_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension - represents the sine of the Euler angles. - cos_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension - represents the cosine of the Euler angles. - order: A `str`. The order in which the rotations are applied. Defaults to - `"XYZ"`. - - Returns: - A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions - represent a 3d rotation matrix. - - Raises: - ValueError: If any of the input arguments has an invalid value. - """ - sin_angles.shape.assert_is_compatible_with(cos_angles.shape) - output_shape = tf.concat((tf.shape(sin_angles)[:-1], (3, 3)), -1) - - sx, sy, sz = tf.unstack(sin_angles, axis=-1) - cx, cy, cz = tf.unstack(cos_angles, axis=-1) - ones = tf.ones_like(sx) - zeros = tf.zeros_like(sx) - # rx - m00 = ones - m01 = zeros - m02 = zeros - m10 = zeros - m11 = cx - m12 = -sx - m20 = zeros - m21 = sx - m22 = cx - rx = tf.stack((m00, m01, m02, - m10, m11, m12, - m20, m21, m22), - axis=-1) - rx = tf.reshape(rx, output_shape) - # ry - m00 = cy - m01 = zeros - m02 = sy - m10 = zeros - m11 = ones - m12 = zeros - m20 = -sy - m21 = zeros - m22 = cy - ry = tf.stack((m00, m01, m02, - m10, m11, m12, - m20, m21, m22), - axis=-1) - ry = tf.reshape(ry, output_shape) - # rz - m00 = cz - m01 = -sz - m02 = zeros - m10 = sz - m11 = cz - m12 = zeros - m20 = zeros - m21 = zeros - m22 = ones - rz = tf.stack((m00, m01, m02, - m10, m11, m12, - m20, m21, m22), - axis=-1) - rz = tf.reshape(rz, output_shape) - - matrix = tf.eye(output_shape[-2], output_shape[-1], - batch_shape=output_shape[:-2]) - - for r in order.upper(): - if r == 'X': - matrix = rx @ matrix - elif r == 'Y': - matrix = ry @ matrix - elif r == 'Z': - matrix = rz @ matrix - else: - raise ValueError(f"Invalid value for `order`: {order}") - - return matrix diff --git a/tensorflow_mri/python/ops/image_ops.py b/tensorflow_mri/python/ops/image_ops.py index 755871bd..7b8f8c29 100644 --- a/tensorflow_mri/python/ops/image_ops.py +++ b/tensorflow_mri/python/ops/image_ops.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,24 +26,19 @@ import numpy as np import tensorflow as tf +from tensorflow_mri.python.geometry import rotation_2d +from tensorflow_mri.python.geometry import rotation_3d from tensorflow_mri.python.ops import array_ops -from tensorflow_mri.python.ops import geom_ops from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util -from tensorflow_mri.python.util import deprecation @api_util.export("image.psnr") -@deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) def psnr(img1, img2, max_val=None, batch_dims=None, image_dims=None, - rank=None, name='psnr'): """Computes the peak signal-to-noise ratio (PSNR) between two N-D images. @@ -75,11 +70,6 @@ def psnr(img1, `(rank of inputs) - batch_dims - 1`. Defaults to `None`. `image_dims` can always be inferred if `batch_dims` was specified, so you only need to provide one of the two. - rank: An `int`. The number of spatial dimensions. Must be 2 or 3. Defaults - to `tf.rank(img1) - 2`. In other words, if rank is not explicitly set, - `img1` and `img2` should have shape `[batch, height, width, channels]` - if processing 2D images or `[batch, depth, height, width, channels]` if - processing 3D images. name: Namespace to embed the computation in. Returns: @@ -87,9 +77,6 @@ def psnr(img1, `tf.float32` and shape `batch_shape`. """ with tf.name_scope(name): - image_dims = deprecation.deprecated_argument_lookup( - 'image_dims', image_dims, 'rank', rank) - img1 = tf.convert_to_tensor(img1) img2 = tf.convert_to_tensor(img2) # Default `max_val` to maximum dynamic range for the input dtype. @@ -103,7 +90,7 @@ def psnr(img1, img2 = tf.image.convert_image_dtype(img2, tf.float32) # Resolve batch and image dimensions. - batch_dims, image_dims = _resolve_batch_and_image_dims( + batch_dims, image_dims = resolve_batch_and_image_dims( img1, batch_dims, image_dims) mse = tf.math.reduce_mean( @@ -174,10 +161,6 @@ def psnr3d(img1, img2, max_val, name='psnr3d'): @api_util.export("image.ssim") -@deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) def ssim(img1, img2, max_val=None, @@ -187,7 +170,6 @@ def ssim(img1, k2=0.03, batch_dims=None, image_dims=None, - rank=None, name='ssim'): """Computes the structural similarity index (SSIM) between two N-D images. @@ -228,11 +210,6 @@ def ssim(img1, `(rank of inputs) - batch_dims - 1`. Defaults to `None`. `image_dims` can always be inferred if `batch_dims` was specified, so you only need to provide one of the two. - rank: An `int`. The number of spatial dimensions. Must be 2 or 3. Defaults - to `tf.rank(img1) - 2`. In other words, if rank is not explicitly set, - `img1` and `img2` should have shape `[batch, height, width, channels]` - if processing 2D images or `[batch, depth, height, width, channels]` if - processing 3D images. name: Namespace to embed the computation in. Returns: @@ -240,15 +217,12 @@ def ssim(img1, value for each image in the batch. References: - .. [1] Zhou Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image + 1. Zhou Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality assessment: from error visibility to structural similarity," in IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, April 2004, doi: 10.1109/TIP.2003.819861. """ with tf.name_scope(name): - image_dims = deprecation.deprecated_argument_lookup( - 'image_dims', image_dims, 'rank', rank) - img1 = tf.convert_to_tensor(img1) img2 = tf.convert_to_tensor(img2) # Default `max_val` to maximum dynamic range for the input dtype. @@ -262,7 +236,7 @@ def ssim(img1, img2 = tf.image.convert_image_dtype(img2, tf.float32) # Resolve batch and image dimensions. - batch_dims, image_dims = _resolve_batch_and_image_dims( + batch_dims, image_dims = resolve_batch_and_image_dims( img1, batch_dims, image_dims) # Check shapes. @@ -318,7 +292,7 @@ def ssim2d(img1, value for each image in the batch. References: - .. [1] Zhou Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image + 1. Zhou Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality assessment: from error visibility to structural similarity," in IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, April 2004, doi: 10.1109/TIP.2003.819861. @@ -375,7 +349,7 @@ def ssim3d(img1, value for each image in the batch. References: - .. [1] Zhou Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image + 1. Zhou Wang, A. C. Bovik, H. R. Sheikh and E. P. Simoncelli, "Image quality assessment: from error visibility to structural similarity," in IEEE Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, April 2004, doi: 10.1109/TIP.2003.819861. @@ -396,10 +370,6 @@ def ssim3d(img1, @api_util.export("image.ssim_multiscale") -@deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) def ssim_multiscale(img1, img2, max_val=None, @@ -410,7 +380,6 @@ def ssim_multiscale(img1, k2=0.03, batch_dims=None, image_dims=None, - rank=None, name='ssim_multiscale'): """Computes the multiscale SSIM (MS-SSIM) between two N-D images. @@ -458,11 +427,6 @@ def ssim_multiscale(img1, `(rank of inputs) - batch_dims - 1`. Defaults to `None`. `image_dims` can always be inferred if `batch_dims` was specified, so you only need to provide one of the two. - rank: An `int`. The number of spatial dimensions. Must be 2 or 3. Defaults - to `tf.rank(img1) - 2`. In other words, if rank is not explicitly set, - `img1` and `img2` should have shape `[batch, height, width, channels]` - if processing 2D images or `[batch, depth, height, width, channels]` if - processing 3D images. name: Namespace to embed the computation in. Returns: @@ -470,15 +434,12 @@ def ssim_multiscale(img1, value for each image in the batch. References: - .. [1] Z. Wang, E. P. Simoncelli and A. C. Bovik, "Multiscale structural + 1. Z. Wang, E. P. Simoncelli and A. C. Bovik, "Multiscale structural similarity for image quality assessment," The Thrity-Seventh Asilomar Conference on Signals, Systems & Computers, 2003, 2003, pp. 1398-1402 Vol.2, doi: 10.1109/ACSSC.2003.1292216. """ with tf.name_scope(name): - image_dims = deprecation.deprecated_argument_lookup( - 'image_dims', image_dims, 'rank', rank) - # Convert to tensor if needed. img1 = tf.convert_to_tensor(img1, name='img1') img2 = tf.convert_to_tensor(img2, name='img2') @@ -493,7 +454,7 @@ def ssim_multiscale(img1, img2 = tf.image.convert_image_dtype(img2, tf.dtypes.float32) # Resolve batch and image dimensions. - batch_dims, image_dims = _resolve_batch_and_image_dims( + batch_dims, image_dims = resolve_batch_and_image_dims( img1, batch_dims, image_dims) # Shape checking. @@ -636,7 +597,7 @@ def ssim2d_multiscale(img1, value for each image in the batch. References: - .. [1] Z. Wang, E. P. Simoncelli and A. C. Bovik, "Multiscale structural + 1. Z. Wang, E. P. Simoncelli and A. C. Bovik, "Multiscale structural similarity for image quality assessment," The Thrity-Seventh Asilomar Conference on Signals, Systems & Computers, 2003, 2003, pp. 1398-1402 Vol.2, doi: 10.1109/ACSSC.2003.1292216. @@ -702,7 +663,7 @@ def ssim3d_multiscale(img1, value for each image in the batch. References: - .. [1] Z. Wang, E. P. Simoncelli and A. C. Bovik, "Multiscale structural + 1. Z. Wang, E. P. Simoncelli and A. C. Bovik, "Multiscale structural similarity for image quality assessment," The Thrity-Seventh Asilomar Conference on Signals, Systems & Computers, 2003, 2003, pp. 1398-1402 Vol.2, doi: 10.1109/ACSSC.2003.1292216. @@ -933,11 +894,11 @@ def image_gradients(image, method='sobel', norm=False, """ with tf.name_scope(name or 'image_gradients'): image = tf.convert_to_tensor(image) - batch_dims, image_dims = _resolve_batch_and_image_dims( + batch_dims, image_dims = resolve_batch_and_image_dims( image, batch_dims, image_dims) kernels = _gradient_operators( - method, norm=norm, rank=image_dims, dtype=image.dtype.real_dtype) + method, norm=norm, image_dims=image_dims, dtype=image.dtype.real_dtype) return _filter_image(image, kernels) @@ -980,19 +941,20 @@ def gradient_magnitude(image, method='sobel', norm=False, return tf.norm(gradients, axis=-1) -def _gradient_operators(method, norm=False, rank=2, dtype=tf.float32): +def _gradient_operators(method, norm=False, image_dims=2, dtype=tf.float32): """Returns a set of operators to compute image gradients. Args: method: A `str`. The gradient operator. Must be one of `'prewitt'`, `'sobel'` or `'scharr'`. norm: A `boolean`. If `True`, returns normalized kernels. - rank: An `int`. The dimensionality of the requested kernels. Defaults to 2. + image_dims: An `int`. The dimensionality of the requested kernels. + Defaults to 2. dtype: The `dtype` of the returned kernels. Defaults to `tf.float32`. Returns: A `Tensor` of shape `[num_kernels] + kernel_shape`, where `kernel_shape` is - `[3] * rank`. + `[3] * image_dims`. Raises: ValueError: If passed an invalid `method`. @@ -1011,15 +973,15 @@ def _gradient_operators(method, norm=False, rank=2, dtype=tf.float32): if norm: avg_operator /= tf.math.reduce_sum(tf.math.abs(avg_operator)) diff_operator /= tf.math.reduce_sum(tf.math.abs(diff_operator)) - kernels = [None] * rank - for d in range(rank): - kernels[d] = tf.ones([3] * rank, dtype=tf.float32) - for i in range(rank): + kernels = [None] * image_dims + for d in range(image_dims): + kernels[d] = tf.ones([3] * image_dims, dtype=tf.float32) + for i in range(image_dims): if i == d: operator_1d = diff_operator else: operator_1d = avg_operator - operator_shape = [1] * rank + operator_shape = [1] * image_dims operator_shape[i] = operator_1d.shape[0] operator_1d = tf.reshape(operator_1d, operator_shape) kernels[d] *= operator_1d @@ -1102,16 +1064,11 @@ def _filter_image(image, kernels): @api_util.export("image.gmsd") -@deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `image_dims` instead.', - ('rank', None)) def gmsd(img1, img2, max_val=1.0, batch_dims=None, image_dims=None, - rank=None, name=None): """Computes the gradient magnitude similarity deviation (GMSD). @@ -1140,11 +1097,6 @@ def gmsd(img1, `image.shape.rank - batch_dims - 1`. Defaults to `None`. `image_dims` can always be inferred if `batch_dims` was specified, so you only need to provide one of the two. - rank: An `int`. The number of spatial dimensions. Must be 2 or 3. Defaults - to `tf.rank(img1) - 2`. In other words, if rank is not explicitly set, - `img1` and `img2` should have shape `[batch, height, width, channels]` - if processing 2D images or `[batch, depth, height, width, channels]` if - processing 3D images. name: Namespace to embed the computation in. Returns: @@ -1152,15 +1104,13 @@ def gmsd(img1, returned tensor has type `tf.float32` and shape `batch_shape`. References: - .. [1] W. Xue, L. Zhang, X. Mou and A. C. Bovik, "Gradient Magnitude + 1. W. Xue, L. Zhang, X. Mou and A. C. Bovik, "Gradient Magnitude Similarity Deviation: A Highly Efficient Perceptual Image Quality Index," in IEEE Transactions on Image Processing, vol. 23, no. 2, pp. 684-695, Feb. 2014, doi: 10.1109/TIP.2013.2293423. """ with tf.name_scope(name or 'gmsd'): # Check and prepare inputs. - image_dims = deprecation.deprecated_argument_lookup( - 'image_dims', image_dims, 'rank', rank) iqa_inputs = _validate_iqa_inputs( img1, img2, max_val, batch_dims, image_dims) img1, img2 = iqa_inputs.img1, iqa_inputs.img2 @@ -1225,12 +1175,16 @@ def gmsd2d(img1, img2, max_val=1.0, name=None): returned tensor has type `tf.float32` and shape `batch_shape`. References: - .. [1] W. Xue, L. Zhang, X. Mou and A. C. Bovik, "Gradient Magnitude + 1. W. Xue, L. Zhang, X. Mou and A. C. Bovik, "Gradient Magnitude Similarity Deviation: A Highly Efficient Perceptual Image Quality Index," in IEEE Transactions on Image Processing, vol. 23, no. 2, pp. 684-695, Feb. 2014, doi: 10.1109/TIP.2013.2293423. """ - return gmsd(img1, img2, max_val=max_val, rank=2, name=(name or 'gmsd2d')) + return gmsd(img1, + img2, + max_val=max_val, + image_dims=2, + name=(name or 'gmsd2d')) @api_util.export("image.gmsd3d") @@ -1255,12 +1209,16 @@ def gmsd3d(img1, img2, max_val=1.0, name=None): returned tensor has type `tf.float32` and shape `batch_shape`. References: - .. [1] W. Xue, L. Zhang, X. Mou and A. C. Bovik, "Gradient Magnitude + 1. W. Xue, L. Zhang, X. Mou and A. C. Bovik, "Gradient Magnitude Similarity Deviation: A Highly Efficient Perceptual Image Quality Index," in IEEE Transactions on Image Processing, vol. 23, no. 2, pp. 684-695, Feb. 2014, doi: 10.1109/TIP.2013.2293423. """ - return gmsd(img1, img2, max_val=max_val, rank=3, name=(name or 'gmsd3d')) + return gmsd(img1, + img2, + max_val=max_val, + image_dims=3, + name=(name or 'gmsd3d')) def _validate_iqa_inputs(img1, img2, max_val, batch_dims, image_dims): @@ -1322,7 +1280,7 @@ def _validate_iqa_inputs(img1, img2, max_val, batch_dims, image_dims): img2 = tf.image.convert_image_dtype(img2, tf.float32) # Resolve batch and image dimensions. - batch_dims, image_dims = _resolve_batch_and_image_dims( + batch_dims, image_dims = resolve_batch_and_image_dims( img1, batch_dims, image_dims) # Check that the image shapes are compatible. @@ -1692,13 +1650,13 @@ def phantom(phantom_type='modified_shepp_logan', # pylint: disable=dangerous-de ValueError: If the requested ND phantom is not defined. References: - .. [1] Shepp, L. A., & Logan, B. F. (1974). The Fourier reconstruction of a + 1. Shepp, L. A., & Logan, B. F. (1974). The Fourier reconstruction of a head section. IEEE Transactions on nuclear science, 21(3), 21-43. - .. [2] Toft, P. (1996). The radon transform. Theory and Implementation + 2. Toft, P. (1996). The radon transform. Theory and Implementation (Ph. D. Dissertation)(Copenhagen: Technical University of Denmark). - .. [3] Kak, A. C., & Slaney, M. (2001). Principles of computerized + 3. Kak, A. C., & Slaney, M. (2001). Principles of computerized tomographic imaging. Society for Industrial and Applied Mathematics. - .. [4] Koay, C. G., Sarlls, J. E., & Özarslan, E. (2007). Three‐dimensional + 4. Koay, C. G., Sarlls, J. E., & Özarslan, E. (2007). Three‐dimensional analytical magnetic resonance imaging phantom in the Fourier domain. Magnetic Resonance in Medicine, 58(2), 430-436. """ @@ -1740,7 +1698,8 @@ def phantom(phantom_type='modified_shepp_logan', # pylint: disable=dangerous-de if isinstance(obj, Ellipse): # Apply translation and rotation to coordinates. - tx = geom_ops.rotate_2d(x - obj.pos, tf.cast(obj.phi, x.dtype)) + tx = rotation_2d.Rotation2D.from_euler(tf.cast(obj.phi, x.dtype)).rotate( + x - obj.pos) # Use object equation to generate a mask. mask = tf.math.reduce_sum( (tx ** 2) / (tf.convert_to_tensor(obj.size) ** 2), -1) <= 1.0 @@ -1748,7 +1707,8 @@ def phantom(phantom_type='modified_shepp_logan', # pylint: disable=dangerous-de image = tf.where(mask, image + obj.rho, image) elif isinstance(obj, Ellipsoid): # Apply translation and rotation to coordinates. - tx = geom_ops.rotate_3d(x - obj.pos, tf.cast(obj.phi, x.dtype)) + tx = rotation_3d.Rotation3D.from_euler(tf.cast(obj.phi, x.dtype)).rotate( + x - obj.pos) # Use object equation to generate a mask. mask = tf.math.reduce_sum( (tx ** 2) / (tf.convert_to_tensor(obj.size) ** 2), -1) <= 1.0 @@ -1974,7 +1934,7 @@ def extract_and_scale_complex_part(value, part, max_val): return value -def _resolve_batch_and_image_dims(image, batch_dims, image_dims): +def resolve_batch_and_image_dims(image, batch_dims, image_dims): """Resolves `batch_dims` and `image_dims` for a given `image`. Args: diff --git a/tensorflow_mri/python/ops/image_ops_test.py b/tensorflow_mri/python/ops/image_ops_test.py index 80f54c0e..40564995 100644 --- a/tensorflow_mri/python/ops/image_ops_test.py +++ b/tensorflow_mri/python/ops/image_ops_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ def test_psnr_2d_scalar(self): img1 = tf.expand_dims(img1, -1) img2 = tf.expand_dims(img2, -1) - result = image_ops.psnr(img1, img2, max_val=255, rank=2) + result = image_ops.psnr(img1, img2, max_val=255, image_dims=2) self.assertAllClose(result, 22.73803845) result = image_ops.psnr2d(img1, img2, max_val=255) @@ -60,7 +60,7 @@ def test_psnr_2d_trivial_batch(self): img1 = tf.expand_dims(img1, 0) img2 = tf.expand_dims(img2, 0) - result = image_ops.psnr(img1, img2, max_val=255, rank=2) + result = image_ops.psnr(img1, img2, max_val=255, image_dims=2) self.assertAllClose(result, [22.73803845]) @test_util.run_in_graph_and_eager_modes @@ -94,7 +94,7 @@ def test_psnr_2d_nd_batch(self): [17.80788841, 18.18428580], [18.06558658, 17.16817389]] - result = image_ops.psnr(img1, img2, max_val=255, rank=2) + result = image_ops.psnr(img1, img2, max_val=255, image_dims=2) self.assertAllClose(result, ref) @test_util.run_in_graph_and_eager_modes @@ -132,7 +132,7 @@ def test_psnr_3d_scalar(self): img1 = tf.expand_dims(img1, -1) img2 = tf.expand_dims(img2, -1) - result = image_ops.psnr(img1, img2, rank=3) + result = image_ops.psnr(img1, img2, image_dims=3) self.assertAllClose(result, 32.3355765) @test_util.run_in_graph_and_eager_modes @@ -170,7 +170,7 @@ def test_psnr_3d_mdbatch(self): img1 = tf.reshape(img1, (3, 2) + img1.shape[1:]) img2 = tf.reshape(img2, (3, 2) + img2.shape[1:]) - result = image_ops.psnr(img1, img2, max_val=255, rank=3) + result = image_ops.psnr(img1, img2, max_val=255, image_dims=3) self.assertAllClose(result, ref, rtol=1e-3, atol=1e-3) result = image_ops.psnr3d(img1, img2, max_val=255) @@ -190,7 +190,7 @@ def test_psnr_3d_multichannel(self): img1 = tf.transpose(img1, [0, 2, 3, 4, 1]) img2 = tf.transpose(img2, [0, 2, 3, 4, 1]) - result = image_ops.psnr(img1, img2, max_val=255, rank=3) + result = image_ops.psnr(img1, img2, max_val=255, image_dims=3) self.assertAllClose(result, ref, rtol=1e-4, atol=1e-4) def test_psnr_invalid_rank(self): @@ -228,7 +228,7 @@ def test_msssim_2d_scalar(self): img1 = tf.expand_dims(img1, -1) img2 = tf.expand_dims(img2, -1) - result = image_ops.ssim_multiscale(img1, img2, max_val=255, rank=2) + result = image_ops.ssim_multiscale(img1, img2, max_val=255, image_dims=2) self.assertAllClose(result, 0.8270784) result = image_ops.ssim2d_multiscale(img1, img2, max_val=255) @@ -245,7 +245,7 @@ def test_msssim_2d_trivial_batch(self): img1 = tf.expand_dims(img1, 0) img2 = tf.expand_dims(img2, 0) - result = image_ops.ssim_multiscale(img1, img2, max_val=255, rank=2) + result = image_ops.ssim_multiscale(img1, img2, max_val=255, image_dims=2) self.assertAllClose(result, [0.8270784]) @test_util.run_in_graph_and_eager_modes @@ -279,7 +279,7 @@ def test_msssim_2d_nd_batch(self): [0.71863150, 0.76113180], [0.77840980, 0.71724670]] - result = image_ops.ssim_multiscale(img1, img2, max_val=255, rank=2) + result = image_ops.ssim_multiscale(img1, img2, max_val=255, image_dims=2) self.assertAllClose(result, ref, rtol=1e-5, atol=1e-5) result = image_ops.ssim2d_multiscale(img1, img2, max_val=255) @@ -330,7 +330,7 @@ def test_msssim_3d_scalar(self): # img1 = tf.expand_dims(img1, -1) # img2 = tf.expand_dims(img2, -1) - # result = image_ops.ssim_multiscale(img1, img2, rank=3) + # result = image_ops.ssim_multiscale(img1, img2, image_dims=3) # self.assertAllClose(result, 0.96301770) @@ -579,7 +579,7 @@ def test_2d_scalar_batch(self): img1 = tf.expand_dims(img1, -1) img2 = tf.expand_dims(img2, -1) - result = self.test_fn(img1, img2, max_val=255, rank=2) + result = self.test_fn(img1, img2, max_val=255, image_dims=2) self.assertAllClose(result, self.expected[test_name], rtol=1e-5, atol=1e-5) @@ -604,7 +604,7 @@ def test_2d_trivial_batch(self): img1 = tf.expand_dims(img1, 0) img2 = tf.expand_dims(img2, 0) - result = self.test_fn(img1, img2, max_val=255, rank=2) + result = self.test_fn(img1, img2, max_val=255, image_dims=2) self.assertAllClose(result, self.expected[test_name], rtol=1e-5, atol=1e-5) @@ -648,7 +648,7 @@ def test_2d_nd_batch(self): img1 = tf.reshape(img1, (3, 2) + img1.shape[1:]) img2 = tf.reshape(img2, (3, 2) + img2.shape[1:]) - result = self.test_fn(img1, img2, max_val=255, rank=2) + result = self.test_fn(img1, img2, max_val=255, image_dims=2) self.assertAllClose(result, self.expected[test_name], rtol=1e-4, atol=1e-4) @@ -686,7 +686,7 @@ def test_3d_scalar_batch(self): img1 = tf.expand_dims(img1, -1) img2 = tf.expand_dims(img2, -1) - result = self.test_fn(img1, img2, rank=3) + result = self.test_fn(img1, img2, image_dims=3) self.assertAllClose(result, self.expected[test_name]) @test_util.run_in_graph_and_eager_modes @@ -786,7 +786,7 @@ def test_default_3d(self): result = image_ops.phantom(shape=[128, 128, 128]) self.assertAllClose(result, expected) - @parameterized.product(rank=[2, 3], + @parameterized.product(image_dims=[2, 3], dtype=[tf.float32, tf.complex64]) @test_util.run_in_graph_and_eager_modes def test_parallel_imaging(self, rank, dtype): # pylint: disable=missing-param-doc @@ -870,7 +870,7 @@ def _np_birdcage_sensitivities(self, shape, r=1.5, nzz=8, dtype=np.complex64): # class TestResolveBatchAndImageDims(test_util.TestCase): - """Tests for `_resolve_batch_and_image_dims`.""" + """Tests for `resolve_batch_and_image_dims`.""" # pylint: disable=missing-function-docstring @parameterized.parameters( # rank, batch_dims, image_dims, expected_batch_dims, expected_image_dims @@ -885,7 +885,7 @@ def test_resolve_batch_and_image_dims( self, rank, input_batch_dims, input_image_dims, expected_batch_dims, expected_image_dims): image = tf.zeros((4,) * rank) - batch_dims, image_dims = image_ops._resolve_batch_and_image_dims( # pylint: disable=protected-access + batch_dims, image_dims = image_ops.resolve_batch_and_image_dims( # pylint: disable=protected-access image, input_batch_dims, input_image_dims) self.assertEqual(expected_batch_dims, batch_dims) self.assertEqual(expected_image_dims, image_dims) diff --git a/tensorflow_mri/python/ops/linalg_ops.py b/tensorflow_mri/python/ops/linalg_ops.py deleted file mode 100644 index 1bd99788..00000000 --- a/tensorflow_mri/python/ops/linalg_ops.py +++ /dev/null @@ -1,1497 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Linear algebra operations. - -This module contains linear operators and solvers. -""" - -import collections -import functools - -import tensorflow as tf -import tensorflow_nufft as tfft - -from tensorflow_mri.python.ops import array_ops -from tensorflow_mri.python.ops import fft_ops -from tensorflow_mri.python.ops import math_ops -from tensorflow_mri.python.ops import wavelet_ops -from tensorflow_mri.python.util import api_util -from tensorflow_mri.python.util import check_util -from tensorflow_mri.python.util import linalg_imaging -from tensorflow_mri.python.util import tensor_util - - -@api_util.export("linalg.LinearOperatorNUFFT") -class LinearOperatorNUFFT(linalg_imaging.LinearOperator): # pylint: disable=abstract-method - """Linear operator acting like a nonuniform DFT matrix. - - Args: - domain_shape: A 1D integer `tf.Tensor`. The domain shape of this - operator. This is usually the shape of the image but may include - additional dimensions. - trajectory: A `tf.Tensor` of type `float32` or `float64`. Contains the - sampling locations or *k*-space trajectory. Must have shape - `[..., M, N]`, where `N` is the rank (number of dimensions), `M` is - the number of samples and `...` is the batch shape, which can have any - number of dimensions. - density: A `tf.Tensor` of type `float32` or `float64`. Contains the - sampling density at each point in `trajectory`. Must have shape - `[..., M]`, where `M` is the number of samples and `...` is the batch - shape, which can have any number of dimensions. Defaults to `None`, in - which case the density is assumed to be 1.0 in all locations. - norm: A `str`. The FFT normalization mode. Must be `None` (no normalization) - or `'ortho'`. - name: An optional `str`. The name of this operator. - - Notes: - In MRI, sampling density compensation is typically performed during the - adjoint transform. However, in order to maintain the validity of the linear - operator, this operator applies the compensation orthogonally, i.e., - it scales the data by the square root of `density` in both forward and - adjoint transforms. If you are using this operator to compute the adjoint - and wish to apply the full compensation, you can do so via the - `precompensate` method. - - >>> import tensorflow as tf - >>> import tensorflow_mri as tfmri - >>> # Create some data. - >>> image_shape = (128, 128) - >>> image = image_ops.phantom(shape=image_shape, dtype=tf.complex64) - >>> trajectory = tfmri.sampling.radial_trajectory( - >>> 128, 128, flatten_encoding_dims=True) - >>> density = tfmri.sampling.radial_density( - >>> 128, 128, flatten_encoding_dims=True) - >>> # Create a NUFFT operator. - >>> linop = tfmri.linalg.LinearOperatorNUFFT( - >>> image_shape, trajectory=trajectory, density=density) - >>> # Create k-space. - >>> kspace = tfmri.signal.nufft(image, trajectory) - >>> # This reconstructs the image applying only partial compensation - >>> # (square root of weights). - >>> image = linop.transform(kspace, adjoint=True) - >>> # This reconstructs the image with full compensation. - >>> image = linop.transform(linop.precompensate(kspace), adjoint=True) - """ - def __init__(self, - domain_shape, - trajectory, - density=None, - norm='ortho', - name="LinearOperatorNUFFT"): - - parameters = dict( - domain_shape=domain_shape, - trajectory=trajectory, - norm=norm, - name=name - ) - - # Get domain shapes. - self._domain_shape_static, self._domain_shape_dynamic = ( - tensor_util.static_and_dynamic_shapes_from_shape(domain_shape)) - - # Validate the remaining inputs. - self.trajectory = check_util.validate_tensor_dtype( - tf.convert_to_tensor(trajectory), 'floating', 'trajectory') - self.norm = check_util.validate_enum(norm, {None, 'ortho'}, 'norm') - - # We infer the operation's rank from the trajectory. - self._rank_static = self.trajectory.shape[-1] - self._rank_dynamic = tf.shape(self.trajectory)[-1] - # The domain rank is >= the operation rank. - domain_rank_static = self._domain_shape_static.rank - domain_rank_dynamic = tf.shape(self._domain_shape_dynamic)[0] - # The difference between this operation's rank and the domain rank is the - # number of extra dims. - extra_dims_static = domain_rank_static - self._rank_static - extra_dims_dynamic = domain_rank_dynamic - self._rank_dynamic - - # The grid shape are the last `rank` dimensions of domain_shape. We don't - # need the static grid shape. - self._grid_shape = self._domain_shape_dynamic[-self._rank_dynamic:] - - # We need to do some work to figure out the batch shapes. This operator - # could have a batch shape, if the trajectory has a batch shape. However, - # we allow the user to include one or more batch dimensions in the domain - # shape, if they so wish. Therefore, not all batch dimensions in the - # trajectory are necessarily part of the batch shape. - - # The total number of dimensions in `trajectory` is equal to - # `batch_dims + extra_dims + 2`. - # Compute the true batch shape (i.e., the batch dimensions that are - # NOT included in the domain shape). - batch_dims_dynamic = tf.rank(self.trajectory) - extra_dims_dynamic - 2 - if (self.trajectory.shape.rank is not None and - extra_dims_static is not None): - # We know the total number of dimensions in `trajectory` and we know - # the number of extra dims, so we can compute the number of batch dims - # statically. - batch_dims_static = self.trajectory.shape.rank - extra_dims_static - 2 - else: - # We are missing at least some information, so the number of batch - # dimensions is unknown. - batch_dims_static = None - - self._batch_shape_dynamic = tf.shape(self.trajectory)[:batch_dims_dynamic] - if batch_dims_static is not None: - self._batch_shape_static = self.trajectory.shape[:batch_dims_static] - else: - self._batch_shape_static = tf.TensorShape(None) - - # Compute the "extra" shape. This shape includes those dimensions which - # are not part of the NUFFT (e.g., they are effectively batch dimensions), - # but which are included in the domain shape rather than in the batch shape. - extra_shape_dynamic = self._domain_shape_dynamic[:-self._rank_dynamic] - if self._rank_static is not None: - extra_shape_static = self._domain_shape_static[:-self._rank_static] - else: - extra_shape_static = tf.TensorShape(None) - - # Check that the "extra" shape in `domain_shape` and `trajectory` are - # compatible for broadcasting. - shape1, shape2 = extra_shape_static, self.trajectory.shape[:-2] - try: - tf.broadcast_static_shape(shape1, shape2) - except ValueError as err: - raise ValueError( - f"The \"batch\" shapes in `domain_shape` and `trajectory` are not " - f"compatible for broadcasting: {shape1} vs {shape2}") from err - - # Compute the range shape. - self._range_shape_dynamic = tf.concat( - [extra_shape_dynamic, tf.shape(self.trajectory)[-2:-1]], 0) - self._range_shape_static = extra_shape_static.concatenate( - self.trajectory.shape[-2:-1]) - - # Statically check that density can be broadcasted with trajectory. - if density is not None: - try: - tf.broadcast_static_shape(self.trajectory.shape[:-1], density.shape) - except ValueError as err: - raise ValueError( - f"The \"batch\" shapes in `trajectory` and `density` are not " - f"compatible for broadcasting: {self.trajectory.shape[:-1]} vs " - f"{density.shape}") from err - self.density = tf.convert_to_tensor(density) - self.weights = tf.math.reciprocal_no_nan(self.density) - self._weights_sqrt = tf.cast( - tf.math.sqrt(self.weights), - tensor_util.get_complex_dtype(self.trajectory.dtype)) - else: - self.density = None - self.weights = None - - super().__init__(tensor_util.get_complex_dtype(self.trajectory.dtype), - is_non_singular=None, - is_self_adjoint=None, - is_positive_definite=None, - is_square=None, - name=name, - parameters=parameters) - - # Compute normalization factors. - if self.norm == 'ortho': - norm_factor = tf.math.reciprocal( - tf.math.sqrt(tf.cast(tf.math.reduce_prod(self._grid_shape), - self.dtype))) - self._norm_factor_forward = norm_factor - self._norm_factor_adjoint = norm_factor - - def _transform(self, x, adjoint=False): - if adjoint: - if self.density is not None: - x *= self._weights_sqrt - x = fft_ops.nufft(x, self.trajectory, - grid_shape=self._grid_shape, - transform_type='type_1', - fft_direction='backward') - if self.norm is not None: - x *= self._norm_factor_adjoint - else: - x = fft_ops.nufft(x, self.trajectory, - transform_type='type_2', - fft_direction='forward') - if self.norm is not None: - x *= self._norm_factor_forward - if self.density is not None: - x *= self._weights_sqrt - return x - - def precompensate(self, x): - if self.density is not None: - return x * self._weights_sqrt - return x - - def _domain_shape(self): - return self._domain_shape_static - - def _domain_shape_tensor(self): - return self._domain_shape_dynamic - - def _range_shape(self): - return self._range_shape_static - - def _range_shape_tensor(self): - return self._range_shape_dynamic - - def _batch_shape(self): - return self._batch_shape_static - - def _batch_shape_tensor(self): - return self._batch_shape_dynamic - - @property - def rank(self): - return self._rank_static - - def rank_tensor(self): - return self._rank_dynamic - - -@api_util.export("linalg.LinearOperatorGramNUFFT") -class LinearOperatorGramNUFFT(LinearOperatorNUFFT): # pylint: disable=abstract-method - """Linear operator acting like the Gram matrix of an NUFFT operator. - - If :math:`F` is a `tfmri.linalg.LinearOperatorNUFFT`, then this operator - applies :math:`F^H F`. This operator is self-adjoint. - - Args: - domain_shape: A 1D integer `tf.Tensor`. The domain shape of this - operator. This is usually the shape of the image but may include - additional dimensions. - trajectory: A `tf.Tensor` of type `float32` or `float64`. Contains the - sampling locations or *k*-space trajectory. Must have shape - `[..., M, N]`, where `N` is the rank (number of dimensions), `M` is - the number of samples and `...` is the batch shape, which can have any - number of dimensions. - density: A `tf.Tensor` of type `float32` or `float64`. Contains the - sampling density at each point in `trajectory`. Must have shape - `[..., M]`, where `M` is the number of samples and `...` is the batch - shape, which can have any number of dimensions. Defaults to `None`, in - which case the density is assumed to be 1.0 in all locations. - norm: A `str`. The FFT normalization mode. Must be `None` (no normalization) - or `'ortho'`. - toeplitz: A `boolean`. If `True`, uses the Toeplitz approach [1] - to compute :math:`F^H F x`, where :math:`F` is the NUFFT operator. - If `False`, the same operation is performed using the standard - NUFFT operation. The Toeplitz approach might be faster than the direct - approach but is slightly less accurate. This argument is only relevant - for non-Cartesian reconstruction and will be ignored for Cartesian - problems. - name: An optional `str`. The name of this operator. - - References: - [1] Fessler, J. A., Lee, S., Olafsson, V. T., Shi, H. R., & Noll, D. C. - (2005). Toeplitz-based iterative image reconstruction for MRI with - correction for magnetic field inhomogeneity. IEEE Transactions on Signal - Processing, 53(9), 3393-3402. - """ - def __init__(self, - domain_shape, - trajectory, - density=None, - norm='ortho', - toeplitz=False, - name="LinearOperatorNUFFT"): - super().__init__( - domain_shape=domain_shape, - trajectory=trajectory, - density=density, - norm=norm, - name=name - ) - - self.toeplitz = toeplitz - if self.toeplitz: - # Compute the FFT shift for adjoint NUFFT computation. - self._fft_shift = tf.cast(self._grid_shape // 2, self.dtype.real_dtype) - # Compute the Toeplitz kernel. - self._toeplitz_kernel = self._compute_toeplitz_kernel() - # Kernel shape (without batch dimensions). - self._kernel_shape = tf.shape(self._toeplitz_kernel)[-self.rank_tensor():] - - def _transform(self, x, adjoint=False): # pylint: disable=unused-argument - """Applies this linear operator.""" - # This operator is self-adjoint, so `adjoint` arg is unused. - if self.toeplitz: - # Using specialized Toeplitz implementation. - return self._transform_toeplitz(x) - # Using standard NUFFT implementation. - return super()._transform(super()._transform(x), adjoint=True) - - def _transform_toeplitz(self, x): - """Applies this linear operator using the Toeplitz approach.""" - input_shape = tf.shape(x) - fft_axes = tf.range(-self.rank_tensor(), 0) - x = fft_ops.fftn(x, axes=fft_axes, shape=self._kernel_shape) - x *= self._toeplitz_kernel - x = fft_ops.ifftn(x, axes=fft_axes) - x = tf.slice(x, tf.zeros([tf.rank(x)], dtype=tf.int32), input_shape) - return x - - def _compute_toeplitz_kernel(self): - """Computes the kernel for the Toeplitz approach.""" - trajectory = self.trajectory - weights = self.weights - if self.rank is None: - raise NotImplementedError( - f"The rank of {self.name} must be known statically.") - - if weights is None: - # If no weights were passed, use ones. - weights = tf.ones(tf.shape(trajectory)[:-1], dtype=self.dtype.real_dtype) - # Cast weights to complex dtype. - weights = tf.cast(tf.math.sqrt(weights), self.dtype) - - # Compute N-D kernel recursively. Begin with last axis. - last_axis = self.rank - 1 - kernel = self._compute_kernel_recursive(trajectory, weights, last_axis) - - # Make sure that the kernel is symmetric/Hermitian/self-adjoint. - kernel = self._enforce_kernel_symmetry(kernel) - - # Additional normalization by sqrt(2 ** rank). This is required because - # we are using FFTs with twice the length of the original image. - if self.norm == 'ortho': - kernel *= tf.cast(tf.math.sqrt(2.0 ** self.rank), kernel.dtype) - - # Put the kernel in Fourier space. - fft_axes = list(range(-self.rank, 0)) - fft_norm = self.norm or "backward" - return fft_ops.fftn(kernel, axes=fft_axes, norm=fft_norm) - - def _compute_kernel_recursive(self, trajectory, weights, axis): - """Recursively computes the kernel for the Toeplitz approach. - - This function works by computing the two halves of the kernel along each - axis. The "left" half is computed using the input trajectory. The "right" - half is computed using the trajectory flipped along the current axis, and - then reversed. Then the two halves are concatenated, with a block of zeros - inserted in between. If there is more than one axis, the process is repeated - recursively for each axis. - - This function calls the adjoint NUFFT 2 ** N times, where N is the number - of dimensions. NOTE: this could be optimized to 2 ** (N - 1) calls. - - Args: - trajectory: A `tf.Tensor` containing the current *k*-space trajectory. - weights: A `tf.Tensor` containing the current density compensation - weights. - axis: An `int` denoting the current axis. - - Returns: - A `tf.Tensor` containing the kernel. - - Raises: - NotImplementedError: If the rank of the operator is not known statically. - """ - # Account for the batch dimensions. We do not need to do the recursion - # for these. - batch_dims = self.batch_shape.rank - if batch_dims is None: - raise NotImplementedError( - f"The number of batch dimensions of {self.name} must be known " - f"statically.") - # The current axis without the batch dimensions. - image_axis = axis + batch_dims - if axis == 0: - # Outer-most axis. Compute left half, then use Hermitian symmetry to - # compute right half. - # TODO(jmontalt): there should be a way to compute the NUFFT only once. - kernel_left = self._nufft_adjoint(weights, trajectory) - flippings = tf.tensor_scatter_nd_update( - tf.ones([self.rank_tensor()]), [[axis]], [-1]) - kernel_right = self._nufft_adjoint(weights, trajectory * flippings) - else: - # We still have two or more axes to process. Compute left and right kernels - # by calling this function recursively. We call ourselves twice, first - # with current frequencies, then with negated frequencies along current - # axes. - kernel_left = self._compute_kernel_recursive( - trajectory, weights, axis - 1) - flippings = tf.tensor_scatter_nd_update( - tf.ones([self.rank_tensor()]), [[axis]], [-1]) - kernel_right = self._compute_kernel_recursive( - trajectory * flippings, weights, axis - 1) - - # Remove zero frequency and reverse. - kernel_right = tf.reverse(array_ops.slice_along_axis( - kernel_right, image_axis, 1, tf.shape(kernel_right)[image_axis] - 1), - [image_axis]) - - # Create block of zeros to be inserted between the left and right halves of - # the kernel. - zeros_shape = tf.concat([ - tf.shape(kernel_left)[:image_axis], [1], - tf.shape(kernel_left)[(image_axis + 1):]], 0) - zeros = tf.zeros(zeros_shape, dtype=kernel_left.dtype) - - # Concatenate the left and right halves of kernel, with a block of zeros in - # the middle. - kernel = tf.concat([kernel_left, zeros, kernel_right], image_axis) - return kernel - - def _nufft_adjoint(self, x, trajectory=None): - """Applies the adjoint NUFFT operator. - - We use this instead of `super()._transform(x, adjoint=True)` because we - need to be able to change the trajectory and to apply an FFT shift. - - Args: - x: A `tf.Tensor` containing the input data (typically the weights or - ones). - trajectory: A `tf.Tensor` containing the *k*-space trajectory, which - may have been flipped and therefore different from the original. If - `None`, the original trajectory is used. - - Returns: - A `tf.Tensor` containing the result of the adjoint NUFFT. - """ - # Apply FFT shift. - x *= tf.math.exp(tf.dtypes.complex( - tf.constant(0, dtype=self.dtype.real_dtype), - tf.math.reduce_sum(trajectory * self._fft_shift, -1))) - # Temporarily update trajectory. - if trajectory is not None: - temp = self.trajectory - self.trajectory = trajectory - x = super()._transform(x, adjoint=True) - if trajectory is not None: - self.trajectory = temp - return x - - def _enforce_kernel_symmetry(self, kernel): - """Enforces Hermitian symmetry on an input kernel. - - Args: - kernel: A `tf.Tensor`. An approximately Hermitian kernel. - - Returns: - A Hermitian-symmetric kernel. - """ - kernel_axes = list(range(-self.rank, 0)) - reversed_kernel = tf.roll( - tf.reverse(kernel, kernel_axes), - shift=tf.ones([tf.size(kernel_axes)], dtype=tf.int32), - axis=kernel_axes) - return (kernel + tf.math.conj(reversed_kernel)) / 2 - - def _range_shape(self): - # Override the NUFFT operator's range shape. The range shape for this - # operator is the same as the domain shape. - return self._domain_shape() - - def _range_shape_tensor(self): - return self._domain_shape_tensor() - - -@api_util.export("linalg.LinearOperatorFiniteDifference") -class LinearOperatorFiniteDifference(linalg_imaging.LinearOperator): # pylint: disable=abstract-method - """Linear operator representing a finite difference matrix. - - Args: - domain_shape: A 1D `tf.Tensor` or a `list` of `int`. The domain shape of - this linear operator. - axis: An `int`. The axis along which the finite difference is taken. - Defaults to -1. - dtype: A `tf.dtypes.DType`. The data type for this operator. Defaults to - `float32`. - name: A `str`. A name for this operator. - """ - def __init__(self, - domain_shape, - axis=-1, - dtype=tf.dtypes.float32, - name="LinearOperatorFiniteDifference"): - - parameters = dict( - domain_shape=domain_shape, - axis=axis, - dtype=dtype, - name=name - ) - - # Compute the static and dynamic shapes and save them for later use. - self._domain_shape_static, self._domain_shape_dynamic = ( - tensor_util.static_and_dynamic_shapes_from_shape(domain_shape)) - - # Validate axis and canonicalize to negative. This ensures the correct - # axis is selected in the presence of batch dimensions. - self.axis = check_util.validate_static_axes( - axis, self._domain_shape_static.rank, - min_length=1, - max_length=1, - canonicalize="negative", - scalar_to_list=False) - - # Compute range shape statically. The range has one less element along - # the difference axis than the domain. - range_shape_static = self._domain_shape_static.as_list() - if range_shape_static[self.axis] is not None: - range_shape_static[self.axis] -= 1 - range_shape_static = tf.TensorShape(range_shape_static) - self._range_shape_static = range_shape_static - - # Now compute dynamic range shape. First concatenate the leading axes with - # the updated difference dimension. Then, iff the difference axis is not - # the last one, concatenate the trailing axes. - range_shape_dynamic = self._domain_shape_dynamic - range_shape_dynamic = tf.concat([ - range_shape_dynamic[:self.axis], - [range_shape_dynamic[self.axis] - 1]], 0) - if self.axis != -1: - range_shape_dynamic = tf.concat([ - range_shape_dynamic, - range_shape_dynamic[self.axis + 1:]], 0) - self._range_shape_dynamic = range_shape_dynamic - - super().__init__(dtype, - is_non_singular=None, - is_self_adjoint=None, - is_positive_definite=None, - is_square=None, - name=name, - parameters=parameters) - - def _transform(self, x, adjoint=False): - - if adjoint: - paddings1 = [[0, 0]] * x.shape.rank - paddings2 = [[0, 0]] * x.shape.rank - paddings1[self.axis] = [1, 0] - paddings2[self.axis] = [0, 1] - x1 = tf.pad(x, paddings1) # pylint: disable=no-value-for-parameter - x2 = tf.pad(x, paddings2) # pylint: disable=no-value-for-parameter - x = x1 - x2 - else: - slice1 = [slice(None)] * x.shape.rank - slice2 = [slice(None)] * x.shape.rank - slice1[self.axis] = slice(1, None) - slice2[self.axis] = slice(None, -1) - x1 = x[tuple(slice1)] - x2 = x[tuple(slice2)] - x = x1 - x2 - - return x - - def _domain_shape(self): - return self._domain_shape_static - - def _range_shape(self): - return self._range_shape_static - - def _domain_shape_tensor(self): - return self._domain_shape_dynamic - - def _range_shape_tensor(self): - return self._range_shape_dynamic - - -@api_util.export("linalg.LinearOperatorWavelet") -class LinearOperatorWavelet(linalg_imaging.LinearOperator): # pylint: disable=abstract-method - """Linear operator representing a wavelet decomposition matrix. - - Args: - domain_shape: A 1D `tf.Tensor` or a `list` of `int`. The domain shape of - this linear operator. - wavelet: A `str` or a `pywt.Wavelet`_, or a `list` thereof. When passed a - `list`, different wavelets are applied along each axis in `axes`. - mode: A `str`. The padding or signal extension mode. Must be one of the - values supported by `tfmri.signal.wavedec`. Defaults to `'symmetric'`. - level: An `int` >= 0. The decomposition level. If `None` (default), - the maximum useful level of decomposition will be used (see - `tfmri.signal.max_wavelet_level`). - axes: A `list` of `int`. The axes over which the DWT is computed. Axes refer - only to domain dimensions without regard for the batch dimensions. - Defaults to `None` (all domain dimensions). - dtype: A `tf.dtypes.DType`. The data type for this operator. Defaults to - `float32`. - name: A `str`. A name for this operator. - """ - def __init__(self, - domain_shape, - wavelet, - mode='symmetric', - level=None, - axes=None, - dtype=tf.dtypes.float32, - name="LinearOperatorWavelet"): - # Set parameters. - parameters = dict( - domain_shape=domain_shape, - wavelet=wavelet, - mode=mode, - level=level, - axes=axes, - dtype=dtype, - name=name - ) - - # Get the static and dynamic shapes and save them for later use. - self._domain_shape_static, self._domain_shape_dynamic = ( - tensor_util.static_and_dynamic_shapes_from_shape(domain_shape)) - # At the moment, the wavelet implementation relies on shapes being - # statically known. - if not self._domain_shape_static.is_fully_defined(): - raise ValueError(f"static `domain_shape` must be fully defined, " - f"but got {self._domain_shape_static}") - static_rank = self._domain_shape_static.rank - - # Set arguments. - self.wavelet = wavelet - self.mode = mode - self.level = level - self.axes = check_util.validate_static_axes(axes, - rank=static_rank, - min_length=1, - canonicalize="negative", - must_be_unique=True, - scalar_to_list=True, - none_means_all=True) - - # Compute the coefficient slices needed for adjoint (wavelet - # reconstruction). - x = tf.ensure_shape(tf.zeros(self._domain_shape_dynamic, dtype=dtype), - self._domain_shape_static) - x = wavelet_ops.wavedec(x, wavelet=self.wavelet, mode=self.mode, - level=self.level, axes=self.axes) - y, self._coeff_slices = wavelet_ops.coeffs_to_tensor(x, axes=self.axes) - - # Get the range shape. - self._range_shape_static = y.shape - self._range_shape_dynamic = tf.shape(y) - - # Call base class. - super().__init__(dtype, - is_non_singular=None, - is_self_adjoint=None, - is_positive_definite=None, - is_square=None, - name=name, - parameters=parameters) - - def _transform(self, x, adjoint=False): - # While `wavedec` and `waverec` can transform only a subset of axes (and - # thus theoretically support batches), there is a caveat due to the - # `coeff_slices` object required by `waverec`. This object contains - # information relevant to a specific batch shape. While we could recompute - # this object for every input batch shape, it is easier to just process - # each batch independently. - if x.shape.rank is not None and self._domain_shape_static.rank is not None: - # Rank of input and this operator are known statically, so we can infer - # the number of batch dimensions statically too. - batch_dims = x.shape.rank - self._domain_shape_static.rank - else: - # We need to obtain the number of batch dimensions dynamically. - batch_dims = tf.rank(x) - tf.shape(self._domain_shape_dynamic)[0] - # Transform each batch. - x = array_ops.map_fn( - functools.partial(self._transform_batch, adjoint=adjoint), - x, batch_dims=batch_dims) - return x - - def _transform_batch(self, x, adjoint=False): - if adjoint: - x = wavelet_ops.tensor_to_coeffs(x, self._coeff_slices) - x = wavelet_ops.waverec(x, wavelet=self.wavelet, mode=self.mode, - axes=self.axes) - else: - x = wavelet_ops.wavedec(x, wavelet=self.wavelet, mode=self.mode, - level=self.level, axes=self.axes) - x, _ = wavelet_ops.coeffs_to_tensor(x, axes=self.axes) - return x - - def _domain_shape(self): - return self._domain_shape_static - - def _range_shape(self): - return self._range_shape_static - - def _domain_shape_tensor(self): - return self._domain_shape_dynamic - - def _range_shape_tensor(self): - return self._range_shape_dynamic - - -@api_util.export("linalg.LinearOperatorMRI") -class LinearOperatorMRI(linalg_imaging.LinearOperator): # pylint: disable=abstract-method - """Linear operator representing an MRI encoding matrix. - - The MRI operator, :math:`A`, maps a [batch of] images, :math:`x` to a - [batch of] measurement data (*k*-space), :math:`b`. - - .. math:: - A x = b - - This object may represent an undersampled MRI operator and supports - Cartesian and non-Cartesian *k*-space sampling. The user may provide a - sampling `mask` to represent an undersampled Cartesian operator, or a - `trajectory` to represent a non-Cartesian operator. - - This object may represent a multicoil MRI operator by providing coil - `sensitivities`. Note that `mask`, `trajectory` and `density` should never - have a coil dimension, including in the case of multicoil imaging. The coil - dimension will be handled automatically. - - The domain shape of this operator is `extra_shape + image_shape`. The range - of this operator is `extra_shape + [num_coils] + image_shape`, for - Cartesian imaging, or `extra_shape + [num_coils] + [num_samples]`, for - non-Cartesian imaging. `[num_coils]` is optional and only present for - multicoil operators. This operator supports batches of images and will - vectorize operations when possible. - - Args: - image_shape: A `tf.TensorShape` or a list of `ints`. The shape of the images - that this operator acts on. Must have length 2 or 3. - extra_shape: An optional `tf.TensorShape` or list of `ints`. Additional - dimensions that should be included within the operator domain. Note that - `extra_shape` is not needed to reconstruct independent batches of images. - However, it is useful when this operator is used as part of a - reconstruction that performs computation along non-spatial dimensions, - e.g. for temporal regularization. Defaults to `None`. - mask: An optional `tf.Tensor` of type `tf.bool`. The sampling mask. Must - have shape `[..., *S]`, where `S` is the `image_shape` and `...` is - the batch shape, which can have any number of dimensions. If `mask` is - passed, this operator represents an undersampled MRI operator. - trajectory: An optional `tf.Tensor` of type `float32` or `float64`. Must - have shape `[..., M, N]`, where `N` is the rank (number of spatial - dimensions), `M` is the number of samples in the encoded space and `...` - is the batch shape, which can have any number of dimensions. If - `trajectory` is passed, this operator represents a non-Cartesian MRI - operator. - density: An optional `tf.Tensor` of type `float32` or `float64`. The - sampling densities. Must have shape `[..., M]`, where `M` is the number of - samples and `...` is the batch shape, which can have any number of - dimensions. This input is only relevant for non-Cartesian MRI operators. - If passed, the non-Cartesian operator will include sampling density - compensation. If `None`, the operator will not perform sampling density - compensation. - sensitivities: An optional `tf.Tensor` of type `complex64` or `complex128`. - The coil sensitivity maps. Must have shape `[..., C, *S]`, where `S` - is the `image_shape`, `C` is the number of coils and `...` is the batch - shape, which can have any number of dimensions. - phase: An optional `tf.Tensor` of type `float32` or `float64`. A phase - estimate for the image. If provided, this operator will be - phase-constrained. - fft_norm: FFT normalization mode. Must be `None` (no normalization) - or `'ortho'`. Defaults to `'ortho'`. - sens_norm: A `boolean`. Whether to normalize coil sensitivities. Defaults to - `True`. - dynamic_domain: A `str`. The domain of the dynamic dimension, if present. - Must be one of `'time'` or `'frequency'`. May only be provided together - with a non-scalar `extra_shape`. The dynamic dimension is the last - dimension of `extra_shape`. The `'time'` mode (default) should be - used for regular dynamic reconstruction. The `'frequency'` mode should be - used for reconstruction in x-f space. - dtype: A `tf.dtypes.DType`. The dtype of this operator. Must be `complex64` - or `complex128`. Defaults to `complex64`. - name: An optional `str`. The name of this operator. - """ - def __init__(self, - image_shape, - extra_shape=None, - mask=None, - trajectory=None, - density=None, - sensitivities=None, - phase=None, - fft_norm='ortho', - sens_norm=True, - dynamic_domain=None, - dtype=tf.complex64, - name=None): - # pylint: disable=invalid-unary-operand-type - parameters = dict( - image_shape=image_shape, - extra_shape=extra_shape, - mask=mask, - trajectory=trajectory, - density=density, - sensitivities=sensitivities, - phase=phase, - fft_norm=fft_norm, - sens_norm=sens_norm, - dynamic_domain=dynamic_domain, - dtype=dtype, - name=name) - - # Set dtype. - dtype = tf.as_dtype(dtype) - if dtype not in (tf.complex64, tf.complex128): - raise ValueError( - f"`dtype` must be `complex64` or `complex128`, but got: {str(dtype)}") - - # Set image shape, rank and extra shape. - image_shape = tf.TensorShape(image_shape) - rank = image_shape.rank - if rank not in (2, 3): - raise ValueError( - f"Rank must be 2 or 3, but got: {rank}") - if not image_shape.is_fully_defined(): - raise ValueError( - f"`image_shape` must be fully defined, but got {image_shape}") - self._rank = rank - self._image_shape = image_shape - self._image_axes = list(range(-self._rank, 0)) # pylint: disable=invalid-unary-operand-type - self._extra_shape = tf.TensorShape(extra_shape or []) - - # Set initial batch shape, then update according to inputs. - batch_shape = self._extra_shape - batch_shape_tensor = tensor_util.convert_shape_to_tensor(batch_shape) - - # Set sampling mask after checking dtype and static shape. - if mask is not None: - mask = tf.convert_to_tensor(mask) - if mask.dtype != tf.bool: - raise TypeError( - f"`mask` must have dtype `bool`, but got: {str(mask.dtype)}") - if not mask.shape[-self._rank:].is_compatible_with(self._image_shape): - raise ValueError( - f"Expected the last dimensions of `mask` to be compatible with " - f"{self._image_shape}], but got: {mask.shape[-self._rank:]}") - batch_shape = tf.broadcast_static_shape( - batch_shape, mask.shape[:-self._rank]) - batch_shape_tensor = tf.broadcast_dynamic_shape( - batch_shape_tensor, tf.shape(mask)[:-self._rank]) - self._mask = mask - - # Set sampling trajectory after checking dtype and static shape. - if trajectory is not None: - if mask is not None: - raise ValueError("`mask` and `trajectory` cannot be both passed.") - trajectory = tf.convert_to_tensor(trajectory) - if trajectory.dtype != dtype.real_dtype: - raise TypeError( - f"Expected `trajectory` to have dtype `{str(dtype.real_dtype)}`, " - f"but got: {str(trajectory.dtype)}") - if trajectory.shape[-1] != self._rank: - raise ValueError( - f"Expected the last dimension of `trajectory` to be " - f"{self._rank}, but got {trajectory.shape[-1]}") - batch_shape = tf.broadcast_static_shape( - batch_shape, trajectory.shape[:-2]) - batch_shape_tensor = tf.broadcast_dynamic_shape( - batch_shape_tensor, tf.shape(trajectory)[:-2]) - self._trajectory = trajectory - - # Set sampling density after checking dtype and static shape. - if density is not None: - if self._trajectory is None: - raise ValueError("`density` must be passed with `trajectory`.") - density = tf.convert_to_tensor(density) - if density.dtype != dtype.real_dtype: - raise TypeError( - f"Expected `density` to have dtype `{str(dtype.real_dtype)}`, " - f"but got: {str(density.dtype)}") - if density.shape[-1] != self._trajectory.shape[-2]: - raise ValueError( - f"Expected the last dimension of `density` to be " - f"{self._trajectory.shape[-2]}, but got {density.shape[-1]}") - batch_shape = tf.broadcast_static_shape( - batch_shape, density.shape[:-1]) - batch_shape_tensor = tf.broadcast_dynamic_shape( - batch_shape_tensor, tf.shape(density)[:-1]) - self._density = density - - # Set sensitivity maps after checking dtype and static shape. - if sensitivities is not None: - sensitivities = tf.convert_to_tensor(sensitivities) - if sensitivities.dtype != dtype: - raise TypeError( - f"Expected `sensitivities` to have dtype `{str(dtype)}`, but got: " - f"{str(sensitivities.dtype)}") - if not sensitivities.shape[-self._rank:].is_compatible_with( - self._image_shape): - raise ValueError( - f"Expected the last dimensions of `sensitivities` to be " - f"compatible with {self._image_shape}, but got: " - f"{sensitivities.shape[-self._rank:]}") - batch_shape = tf.broadcast_static_shape( - batch_shape, sensitivities.shape[:-(self._rank + 1)]) - batch_shape_tensor = tf.broadcast_dynamic_shape( - batch_shape_tensor, tf.shape(sensitivities)[:-(self._rank + 1)]) - self._sensitivities = sensitivities - - if phase is not None: - phase = tf.convert_to_tensor(phase) - if phase.dtype != dtype.real_dtype: - raise TypeError( - f"Expected `phase` to have dtype `{str(dtype.real_dtype)}`, " - f"but got: {str(phase.dtype)}") - if not phase.shape[-self._rank:].is_compatible_with( - self._image_shape): - raise ValueError( - f"Expected the last dimensions of `phase` to be " - f"compatible with {self._image_shape}, but got: " - f"{phase.shape[-self._rank:]}") - batch_shape = tf.broadcast_static_shape( - batch_shape, phase.shape[:-self._rank]) - batch_shape_tensor = tf.broadcast_dynamic_shape( - batch_shape_tensor, tf.shape(phase)[:-self._rank]) - self._phase = phase - - # Set batch shapes. - self._batch_shape_value = batch_shape - self._batch_shape_tensor_value = batch_shape_tensor - - # If multicoil, add coil dimension to mask, trajectory and density. - if self._sensitivities is not None: - if self._mask is not None: - self._mask = tf.expand_dims(self._mask, axis=-(self._rank + 1)) - if self._trajectory is not None: - self._trajectory = tf.expand_dims(self._trajectory, axis=-3) - if self._density is not None: - self._density = tf.expand_dims(self._density, axis=-2) - if self._phase is not None: - self._phase = tf.expand_dims(self._phase, axis=-(self._rank + 1)) - - # Save some tensors for later use during computation. - if self._mask is not None: - self._mask_linop_dtype = tf.cast(self._mask, dtype) - if self._density is not None: - self._dens_weights_sqrt = tf.cast( - tf.math.sqrt(tf.math.reciprocal_no_nan(self._density)), dtype) - if self._phase is not None: - self._phase_rotator = tf.math.exp( - tf.complex(tf.constant(0.0, dtype=phase.dtype), phase)) - - # Set normalization. - self._fft_norm = check_util.validate_enum( - fft_norm, {None, 'ortho'}, 'fft_norm') - if self._fft_norm == 'ortho': # Compute normalization factors. - self._fft_norm_factor = tf.math.reciprocal( - tf.math.sqrt(tf.cast(self._image_shape.num_elements(), dtype))) - - # Normalize coil sensitivities. - self._sens_norm = sens_norm - if self._sensitivities is not None and self._sens_norm: - self._sensitivities = math_ops.normalize_no_nan( - self._sensitivities, axis=-(self._rank + 1)) - - # Set dynamic domain. - if dynamic_domain is not None and self._extra_shape.rank == 0: - raise ValueError( - "Argument `dynamic_domain` requires a non-scalar `extra_shape`.") - if dynamic_domain is not None: - self._dynamic_domain = check_util.validate_enum( - dynamic_domain, {'time', 'frequency'}, name='dynamic_domain') - else: - self._dynamic_domain = None - - # This variable is used by `LinearOperatorGramMRI` to disable the NUFFT. - self._skip_nufft = False - - super().__init__(dtype, name=name, parameters=parameters) - - def _transform(self, x, adjoint=False): - """Transform [batch] input `x`. - - Args: - x: A `tf.Tensor` of type `self.dtype` and shape - `[..., *self.domain_shape]` containing images, if `adjoint` is `False`, - or a `tf.Tensor` of type `self.dtype` and shape - `[..., *self.range_shape]` containing *k*-space data, if `adjoint` is - `True`. - adjoint: A `boolean` indicating whether to apply the adjoint of the - operator. - - Returns: - A `tf.Tensor` of type `self.dtype` and shape `[..., *self.range_shape]` - containing *k*-space data, if `adjoint` is `False`, or a `tf.Tensor` of - type `self.dtype` and shape `[..., *self.domain_shape]` containing - images, if `adjoint` is `True`. - """ - if adjoint: - # Apply density compensation. - if self._density is not None and not self._skip_nufft: - x *= self._dens_weights_sqrt - - # Apply adjoint Fourier operator. - if self.is_non_cartesian: # Non-Cartesian imaging, use NUFFT. - if not self._skip_nufft: - x = tfft.nufft(x, self._trajectory, - grid_shape=self._image_shape, - transform_type='type_1', - fft_direction='backward') - if self._fft_norm is not None: - x *= self._fft_norm_factor - - else: # Cartesian imaging, use FFT. - if self._mask is not None: - x *= self._mask_linop_dtype # Undersampling. - x = fft_ops.ifftn(x, axes=self._image_axes, - norm=self._fft_norm or 'forward', shift=True) - - # Apply coil combination. - if self.is_multicoil: - x *= tf.math.conj(self._sensitivities) - x = tf.math.reduce_sum(x, axis=-(self._rank + 1)) - - # Maybe remove phase from image. - if self.is_phase_constrained: - x *= tf.math.conj(self._phase_rotator) - x = tf.cast(tf.math.real(x), self.dtype) - - # Apply FFT along dynamic axis, if necessary. - if self.is_dynamic and self.dynamic_domain == 'frequency': - x = fft_ops.fftn(x, axes=[self.dynamic_axis], - norm='ortho', shift=True) - - else: # Forward operator. - - # Apply FFT along dynamic axis, if necessary. - if self.is_dynamic and self.dynamic_domain == 'frequency': - x = fft_ops.ifftn(x, axes=[self.dynamic_axis], - norm='ortho', shift=True) - - # Add phase to real-valued image if reconstruction is phase-constrained. - if self.is_phase_constrained: - x = tf.cast(tf.math.real(x), self.dtype) - x *= self._phase_rotator - - # Apply sensitivity modulation. - if self.is_multicoil: - x = tf.expand_dims(x, axis=-(self._rank + 1)) - x *= self._sensitivities - - # Apply Fourier operator. - if self.is_non_cartesian: # Non-Cartesian imaging, use NUFFT. - if not self._skip_nufft: - x = tfft.nufft(x, self._trajectory, - transform_type='type_2', - fft_direction='forward') - if self._fft_norm is not None: - x *= self._fft_norm_factor - - else: # Cartesian imaging, use FFT. - x = fft_ops.fftn(x, axes=self._image_axes, - norm=self._fft_norm or 'backward', shift=True) - if self._mask is not None: - x *= self._mask_linop_dtype # Undersampling. - - # Apply density compensation. - if self._density is not None and not self._skip_nufft: - x *= self._dens_weights_sqrt - - return x - - def _domain_shape(self): - """Returns the shape of the domain space of this operator.""" - return self._extra_shape.concatenate(self._image_shape) - - def _range_shape(self): - """Returns the shape of the range space of this operator.""" - if self.is_cartesian: - range_shape = self._image_shape.as_list() - else: - range_shape = [self._trajectory.shape[-2]] - if self.is_multicoil: - range_shape = [self.num_coils] + range_shape - return self._extra_shape.concatenate(range_shape) - - def _batch_shape(self): - """Returns the static batch shape of this operator.""" - return self._batch_shape_value[:-self._extra_shape.rank or None] # pylint: disable=invalid-unary-operand-type - - def _batch_shape_tensor(self): - """Returns the dynamic batch shape of this operator.""" - return self._batch_shape_tensor_value[:-self._extra_shape.rank or None] # pylint: disable=invalid-unary-operand-type - - @property - def image_shape(self): - """The image shape.""" - return self._image_shape - - @property - def rank(self): - """The number of spatial dimensions.""" - return self._rank - - @property - def is_cartesian(self): - """Whether this is a Cartesian MRI operator.""" - return self._trajectory is None - - @property - def is_non_cartesian(self): - """Whether this is a non-Cartesian MRI operator.""" - return self._trajectory is not None - - @property - def is_multicoil(self): - """Whether this is a multicoil MRI operator.""" - return self._sensitivities is not None - - @property - def is_phase_constrained(self): - """Whether this is a phase-constrained MRI operator.""" - return self._phase is not None - - @property - def is_dynamic(self): - """Whether this is a dynamic MRI operator.""" - return self._dynamic_domain is not None - - @property - def dynamic_domain(self): - """The dynamic domain of this operator.""" - return self._dynamic_domain - - @property - def dynamic_axis(self): - """The dynamic axis of this operator.""" - return -(self._rank + 1) if self.is_dynamic else None - - @property - def num_coils(self): - """The number of coils.""" - if self._sensitivities is None: - return None - return self._sensitivities.shape[-(self._rank + 1)] - - @property - def _composite_tensor_fields(self): - return ("image_shape", "mask", "trajectory", "density", "sensitivities", - "fft_norm") - - -@api_util.export("linalg.LinearOperatorGramMRI") -class LinearOperatorGramMRI(LinearOperatorMRI): # pylint: disable=abstract-method - """Linear operator representing an MRI encoding matrix. - - If :math:`A` is a `tfmri.linalg.LinearOperatorMRI`, then this ooperator - represents the matrix :math:`G = A^H A`. - - In certain circumstances, this operator may be able to apply the matrix - :math:`G` more efficiently than the composition :math:`G = A^H A` using - `tfmri.linalg.LinearOperatorMRI` objects. - - Args: - image_shape: A `tf.TensorShape` or a list of `ints`. The shape of the images - that this operator acts on. Must have length 2 or 3. - extra_shape: An optional `tf.TensorShape` or list of `ints`. Additional - dimensions that should be included within the operator domain. Note that - `extra_shape` is not needed to reconstruct independent batches of images. - However, it is useful when this operator is used as part of a - reconstruction that performs computation along non-spatial dimensions, - e.g. for temporal regularization. Defaults to `None`. - mask: An optional `tf.Tensor` of type `tf.bool`. The sampling mask. Must - have shape `[..., *S]`, where `S` is the `image_shape` and `...` is - the batch shape, which can have any number of dimensions. If `mask` is - passed, this operator represents an undersampled MRI operator. - trajectory: An optional `tf.Tensor` of type `float32` or `float64`. Must - have shape `[..., M, N]`, where `N` is the rank (number of spatial - dimensions), `M` is the number of samples in the encoded space and `...` - is the batch shape, which can have any number of dimensions. If - `trajectory` is passed, this operator represents a non-Cartesian MRI - operator. - density: An optional `tf.Tensor` of type `float32` or `float64`. The - sampling densities. Must have shape `[..., M]`, where `M` is the number of - samples and `...` is the batch shape, which can have any number of - dimensions. This input is only relevant for non-Cartesian MRI operators. - If passed, the non-Cartesian operator will include sampling density - compensation. If `None`, the operator will not perform sampling density - compensation. - sensitivities: An optional `tf.Tensor` of type `complex64` or `complex128`. - The coil sensitivity maps. Must have shape `[..., C, *S]`, where `S` - is the `image_shape`, `C` is the number of coils and `...` is the batch - shape, which can have any number of dimensions. - phase: An optional `tf.Tensor` of type `float32` or `float64`. A phase - estimate for the image. If provided, this operator will be - phase-constrained. - fft_norm: FFT normalization mode. Must be `None` (no normalization) - or `'ortho'`. Defaults to `'ortho'`. - sens_norm: A `boolean`. Whether to normalize coil sensitivities. Defaults to - `True`. - dynamic_domain: A `str`. The domain of the dynamic dimension, if present. - Must be one of `'time'` or `'frequency'`. May only be provided together - with a non-scalar `extra_shape`. The dynamic dimension is the last - dimension of `extra_shape`. The `'time'` mode (default) should be - used for regular dynamic reconstruction. The `'frequency'` mode should be - used for reconstruction in x-f space. - toeplitz_nufft: A `boolean`. If `True`, uses the Toeplitz approach [5] - to compute :math:`F^H F x`, where :math:`F` is the non-uniform Fourier - operator. If `False`, the same operation is performed using the standard - NUFFT operation. The Toeplitz approach might be faster than the direct - approach but is slightly less accurate. This argument is only relevant - for non-Cartesian reconstruction and will be ignored for Cartesian - problems. - dtype: A `tf.dtypes.DType`. The dtype of this operator. Must be `complex64` - or `complex128`. Defaults to `complex64`. - name: An optional `str`. The name of this operator. - """ - def __init__(self, - image_shape, - extra_shape=None, - mask=None, - trajectory=None, - density=None, - sensitivities=None, - phase=None, - fft_norm='ortho', - sens_norm=True, - dynamic_domain=None, - toeplitz_nufft=False, - dtype=tf.complex64, - name="LinearOperatorGramMRI"): - super().__init__( - image_shape, - extra_shape=extra_shape, - mask=mask, - trajectory=trajectory, - density=density, - sensitivities=sensitivities, - phase=phase, - fft_norm=fft_norm, - sens_norm=sens_norm, - dynamic_domain=dynamic_domain, - dtype=dtype, - name=name - ) - - self.toeplitz_nufft = toeplitz_nufft - if self.toeplitz_nufft and self.is_non_cartesian: - # Create a Gram NUFFT operator with Toeplitz embedding. - self._linop_gram_nufft = LinearOperatorGramNUFFT( - image_shape, trajectory=self._trajectory, density=self._density, - norm=fft_norm, toeplitz=True) - # Disable NUFFT computation on base class. The NUFFT will instead be - # performed by the Gram NUFFT operator. - self._skip_nufft = True - - def _transform(self, x, adjoint=False): - x = super()._transform(x) - if self.toeplitz_nufft: - x = self._linop_gram_nufft.transform(x) - x = super()._transform(x, adjoint=True) - return x - - def _range_shape(self): - return self._domain_shape() - - def _range_shape_tensor(self): - return self._domain_shape_tensor() - - -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -@api_util.export("linalg.conjugate_gradient") -def conjugate_gradient(operator, - rhs, - preconditioner=None, - x=None, - tol=1e-5, - max_iterations=20, - bypass_gradient=False, - name=None): - r"""Conjugate gradient solver. - - Solves a linear system of equations :math:`Ax = b` for self-adjoint, positive - definite matrix :math:`A` and right-hand side vector :math:`b`, using an - iterative, matrix-free algorithm where the action of the matrix :math:`A` is - represented by `operator`. The iteration terminates when either the number of - iterations exceeds `max_iterations` or when the residual norm has been reduced - to `tol` times its initial value, i.e. - :math:`(\left\| b - A x_k \right\| <= \mathrm{tol} \left\| b \right\|\\)`. - - .. note:: - This function is similar to - `tf.linalg.experimental.conjugate_gradient`, except it adds support for - complex-valued linear systems and for imaging operators. - - Args: - operator: A `LinearOperator` that is self-adjoint and positive definite. - rhs: A `tf.Tensor` of shape `[..., N]`. The right hand-side of the linear - system. - preconditioner: A `LinearOperator` that approximates the inverse of `A`. - An efficient preconditioner could dramatically improve the rate of - convergence. If `preconditioner` represents matrix `M`(`M` approximates - `A^{-1}`), the algorithm uses `preconditioner.apply(x)` to estimate - `A^{-1}x`. For this to be useful, the cost of applying `M` should be - much lower than computing `A^{-1}` directly. - x: A `tf.Tensor` of shape `[..., N]`. The initial guess for the solution. - tol: A float scalar convergence tolerance. - max_iterations: An `int` giving the maximum number of iterations. - bypass_gradient: A `boolean`. If `True`, the gradient with respect to `rhs` - will be computed by applying the inverse of `operator` to the upstream - gradient with respect to `x` (through CG iteration), instead of relying - on TensorFlow's automatic differentiation. This may reduce memory usage - when training neural networks, but `operator` must not have any trainable - parameters. If `False`, gradients are computed normally. For more details, - see ref. [1]. - name: A name scope for the operation. - - Returns: - A `namedtuple` representing the final state with fields - - - i: A scalar `int32` `tf.Tensor`. Number of iterations executed. - - x: A rank-1 `tf.Tensor` of shape `[..., N]` containing the computed - solution. - - r: A rank-1 `tf.Tensor` of shape `[.., M]` containing the residual vector. - - p: A rank-1 `tf.Tensor` of shape `[..., N]`. `A`-conjugate basis vector. - - gamma: \\(r \dot M \dot r\\), equivalent to \\(||r||_2^2\\) when - `preconditioner=None`. - - Raises: - ValueError: If `operator` is not self-adjoint and positive definite. - - References: - .. [1] Aggarwal, H. K., Mani, M. P., & Jacob, M. (2018). MoDL: Model-based - deep learning architecture for inverse problems. IEEE transactions on - medical imaging, 38(2), 394-405. - """ - if bypass_gradient: - if preconditioner is not None: - raise ValueError( - "preconditioner is not supported when bypass_gradient is True.") - if x is not None: - raise ValueError("x is not supported when bypass_gradient is True.") - - def _conjugate_gradient_simple(rhs): - return _conjugate_gradient_internal(operator, rhs, - tol=tol, - max_iterations=max_iterations, - name=name) - - @tf.custom_gradient - def _conjugate_gradient_internal_grad(rhs): - result = _conjugate_gradient_simple(rhs) - - def grad(*upstream_grads): - # upstream_grads has the upstream gradient for each element of the - # output tuple (i, x, r, p, gamma). - _, dx, _, _, _ = upstream_grads - return _conjugate_gradient_simple(dx).x - - return result, grad - - return _conjugate_gradient_internal_grad(rhs) - - return _conjugate_gradient_internal(operator, rhs, - preconditioner=preconditioner, - x=x, - tol=tol, - max_iterations=max_iterations, - name=name) - - -def _conjugate_gradient_internal(operator, - rhs, - preconditioner=None, - x=None, - tol=1e-5, - max_iterations=20, - name=None): - """Implementation of `conjugate_gradient`. - - For the parameters, see `conjugate_gradient`. - """ - if isinstance(operator, linalg_imaging.LinalgImagingMixin): - rhs = operator.flatten_domain_shape(rhs) - - if not (operator.is_self_adjoint and operator.is_positive_definite): - raise ValueError('Expected a self-adjoint, positive definite operator.') - - cg_state = collections.namedtuple('CGState', ['i', 'x', 'r', 'p', 'gamma']) - - def stopping_criterion(i, state): - return tf.math.logical_and( - i < max_iterations, - tf.math.reduce_any( - tf.math.real(tf.norm(state.r, axis=-1)) > tf.math.real(tol))) - - def dot(x, y): - return tf.squeeze( - tf.linalg.matvec( - x[..., tf.newaxis], - y, adjoint_a=True), axis=-1) - - def cg_step(i, state): # pylint: disable=missing-docstring - z = tf.linalg.matvec(operator, state.p) - alpha = state.gamma / dot(state.p, z) - x = state.x + alpha[..., tf.newaxis] * state.p - r = state.r - alpha[..., tf.newaxis] * z - if preconditioner is None: - q = r - else: - q = preconditioner.matvec(r) - gamma = dot(r, q) - beta = gamma / state.gamma - p = q + beta[..., tf.newaxis] * state.p - return i + 1, cg_state(i + 1, x, r, p, gamma) - - # We now broadcast initial shapes so that we have fixed shapes per iteration. - - with tf.name_scope(name or 'conjugate_gradient'): - broadcast_shape = tf.broadcast_dynamic_shape( - tf.shape(rhs)[:-1], - operator.batch_shape_tensor()) - static_broadcast_shape = tf.broadcast_static_shape( - rhs.shape[:-1], - operator.batch_shape) - if preconditioner is not None: - broadcast_shape = tf.broadcast_dynamic_shape( - broadcast_shape, - preconditioner.batch_shape_tensor()) - static_broadcast_shape = tf.broadcast_static_shape( - static_broadcast_shape, - preconditioner.batch_shape) - broadcast_rhs_shape = tf.concat([broadcast_shape, [tf.shape(rhs)[-1]]], -1) - static_broadcast_rhs_shape = static_broadcast_shape.concatenate( - [rhs.shape[-1]]) - r0 = tf.broadcast_to(rhs, broadcast_rhs_shape) - tol *= tf.norm(r0, axis=-1) - - if x is None: - x = tf.zeros( - broadcast_rhs_shape, dtype=rhs.dtype.base_dtype) - x = tf.ensure_shape(x, static_broadcast_rhs_shape) - else: - r0 = rhs - tf.linalg.matvec(operator, x) - if preconditioner is None: - p0 = r0 - else: - p0 = tf.linalg.matvec(preconditioner, r0) - gamma0 = dot(r0, p0) - i = tf.constant(0, dtype=tf.int32) - state = cg_state(i=i, x=x, r=r0, p=p0, gamma=gamma0) - _, state = tf.while_loop( - stopping_criterion, cg_step, [i, state]) - - if isinstance(operator, linalg_imaging.LinalgImagingMixin): - x = operator.expand_range_dimension(state.x) - else: - x = state.x - - return cg_state( - state.i, - x=x, - r=state.r, - p=state.p, - gamma=state.gamma) diff --git a/tensorflow_mri/python/ops/linalg_ops_test.py b/tensorflow_mri/python/ops/linalg_ops_test.py deleted file mode 100755 index 6dabf224..00000000 --- a/tensorflow_mri/python/ops/linalg_ops_test.py +++ /dev/null @@ -1,686 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for module `linalg_ops`.""" -# pylint: disable=missing-class-docstring,missing-function-docstring - -from absl.testing import parameterized -import numpy as np -import tensorflow as tf - -from tensorflow_mri.python.ops import fft_ops -from tensorflow_mri.python.ops import geom_ops -from tensorflow_mri.python.ops import image_ops -from tensorflow_mri.python.ops import linalg_ops -from tensorflow_mri.python.ops import traj_ops -from tensorflow_mri.python.ops import wavelet_ops -from tensorflow_mri.python.util import test_util - - -class LinearOperatorNUFFTTest(test_util.TestCase): - @parameterized.named_parameters( - ("normalized", "ortho"), - ("unnormalized", None) - ) - def test_general(self, norm): - shape = [8, 12] - n_points = 100 - rank = 2 - rng = np.random.default_rng() - traj = rng.uniform(low=-np.pi, high=np.pi, size=(n_points, rank)) - traj = traj.astype(np.float32) - linop = linalg_ops.LinearOperatorNUFFT(shape, traj, norm=norm) - - self.assertIsInstance(linop.domain_shape, tf.TensorShape) - self.assertIsInstance(linop.domain_shape_tensor(), tf.Tensor) - self.assertIsInstance(linop.range_shape, tf.TensorShape) - self.assertIsInstance(linop.range_shape_tensor(), tf.Tensor) - self.assertIsInstance(linop.batch_shape, tf.TensorShape) - self.assertIsInstance(linop.batch_shape_tensor(), tf.Tensor) - self.assertAllClose(shape, linop.domain_shape) - self.assertAllClose(shape, linop.domain_shape_tensor()) - self.assertAllClose([n_points], linop.range_shape) - self.assertAllClose([n_points], linop.range_shape_tensor()) - self.assertAllClose([], linop.batch_shape) - self.assertAllClose([], linop.batch_shape_tensor()) - - # Check forward. - x = (rng.uniform(size=shape).astype(np.float32) + - rng.uniform(size=shape).astype(np.float32) * 1j) - expected_forward = fft_ops.nufft(x, traj) - if norm: - expected_forward /= np.sqrt(np.prod(shape)) - result_forward = linop.transform(x) - self.assertAllClose(expected_forward, result_forward, rtol=1e-5, atol=1e-5) - - # Check adjoint. - expected_adjoint = fft_ops.nufft(result_forward, traj, grid_shape=shape, - transform_type="type_1", - fft_direction="backward") - if norm: - expected_adjoint /= np.sqrt(np.prod(shape)) - result_adjoint = linop.transform(result_forward, adjoint=True) - self.assertAllClose(expected_adjoint, result_adjoint, rtol=1e-5, atol=1e-5) - - - @parameterized.named_parameters( - ("normalized", "ortho"), - ("unnormalized", None) - ) - def test_with_batch_dim(self, norm): - shape = [8, 12] - n_points = 100 - batch_size = 4 - traj_shape = [batch_size, n_points] - rank = 2 - rng = np.random.default_rng() - traj = rng.uniform(low=-np.pi, high=np.pi, size=(*traj_shape, rank)) - traj = traj.astype(np.float32) - linop = linalg_ops.LinearOperatorNUFFT(shape, traj, norm=norm) - - self.assertIsInstance(linop.domain_shape, tf.TensorShape) - self.assertIsInstance(linop.domain_shape_tensor(), tf.Tensor) - self.assertIsInstance(linop.range_shape, tf.TensorShape) - self.assertIsInstance(linop.range_shape_tensor(), tf.Tensor) - self.assertIsInstance(linop.batch_shape, tf.TensorShape) - self.assertIsInstance(linop.batch_shape_tensor(), tf.Tensor) - self.assertAllClose(shape, linop.domain_shape) - self.assertAllClose(shape, linop.domain_shape_tensor()) - self.assertAllClose([n_points], linop.range_shape) - self.assertAllClose([n_points], linop.range_shape_tensor()) - self.assertAllClose([batch_size], linop.batch_shape) - self.assertAllClose([batch_size], linop.batch_shape_tensor()) - - # Check forward. - x = (rng.uniform(size=shape).astype(np.float32) + - rng.uniform(size=shape).astype(np.float32) * 1j) - expected_forward = fft_ops.nufft(x, traj) - if norm: - expected_forward /= np.sqrt(np.prod(shape)) - result_forward = linop.transform(x) - self.assertAllClose(expected_forward, result_forward, rtol=1e-5, atol=1e-5) - - # Check adjoint. - expected_adjoint = fft_ops.nufft(result_forward, traj, grid_shape=shape, - transform_type="type_1", - fft_direction="backward") - if norm: - expected_adjoint /= np.sqrt(np.prod(shape)) - result_adjoint = linop.transform(result_forward, adjoint=True) - self.assertAllClose(expected_adjoint, result_adjoint, rtol=1e-5, atol=1e-5) - - - @parameterized.named_parameters( - ("normalized", "ortho"), - ("unnormalized", None) - ) - def test_with_extra_dim(self, norm): - shape = [8, 12] - n_points = 100 - batch_size = 4 - traj_shape = [batch_size, n_points] - rank = 2 - rng = np.random.default_rng() - traj = rng.uniform(low=-np.pi, high=np.pi, size=(*traj_shape, rank)) - traj = traj.astype(np.float32) - linop = linalg_ops.LinearOperatorNUFFT( - [batch_size, *shape], traj, norm=norm) - - self.assertIsInstance(linop.domain_shape, tf.TensorShape) - self.assertIsInstance(linop.domain_shape_tensor(), tf.Tensor) - self.assertIsInstance(linop.range_shape, tf.TensorShape) - self.assertIsInstance(linop.range_shape_tensor(), tf.Tensor) - self.assertIsInstance(linop.batch_shape, tf.TensorShape) - self.assertIsInstance(linop.batch_shape_tensor(), tf.Tensor) - self.assertAllClose([batch_size, *shape], linop.domain_shape) - self.assertAllClose([batch_size, *shape], linop.domain_shape_tensor()) - self.assertAllClose([batch_size, n_points], linop.range_shape) - self.assertAllClose([batch_size, n_points], linop.range_shape_tensor()) - self.assertAllClose([], linop.batch_shape) - self.assertAllClose([], linop.batch_shape_tensor()) - - # Check forward. - x = (rng.uniform(size=[batch_size, *shape]).astype(np.float32) + - rng.uniform(size=[batch_size, *shape]).astype(np.float32) * 1j) - expected_forward = fft_ops.nufft(x, traj) - if norm: - expected_forward /= np.sqrt(np.prod(shape)) - result_forward = linop.transform(x) - self.assertAllClose(expected_forward, result_forward, rtol=1e-5, atol=1e-5) - - # Check adjoint. - expected_adjoint = fft_ops.nufft(result_forward, traj, grid_shape=shape, - transform_type="type_1", - fft_direction="backward") - if norm: - expected_adjoint /= np.sqrt(np.prod(shape)) - result_adjoint = linop.transform(result_forward, adjoint=True) - self.assertAllClose(expected_adjoint, result_adjoint, rtol=1e-5, atol=1e-5) - - - def test_with_density(self): - image_shape = (128, 128) - image = image_ops.phantom(shape=image_shape, dtype=tf.complex64) - trajectory = traj_ops.radial_trajectory( - 128, 128, flatten_encoding_dims=True) - density = traj_ops.radial_density( - 128, 128, flatten_encoding_dims=True) - weights = tf.cast(tf.math.sqrt(tf.math.reciprocal_no_nan(density)), - tf.complex64) - - linop = linalg_ops.LinearOperatorNUFFT( - image_shape, trajectory=trajectory) - linop_d = linalg_ops.LinearOperatorNUFFT( - image_shape, trajectory=trajectory, density=density) - - # Test forward. - kspace = linop.transform(image) - kspace_d = linop_d.transform(image) - self.assertAllClose(kspace * weights, kspace_d) - - # Test adjoint and precompensate function. - recon = linop.transform(linop.precompensate(kspace) * weights * weights, - adjoint=True) - recon_d1 = linop_d.transform(kspace_d, adjoint=True) - recon_d2 = linop_d.transform(linop_d.precompensate(kspace), adjoint=True) - self.assertAllClose(recon, recon_d1) - self.assertAllClose(recon, recon_d2) - - -class LinearOperatorGramNUFFTTest(test_util.TestCase): - @parameterized.product( - density=[False, True], - norm=[None, 'ortho'], - toeplitz=[False, True], - batch=[False, True] - ) - def test_general(self, density, norm, toeplitz, batch): - with tf.device('/cpu:0'): - image_shape = (128, 128) - image = image_ops.phantom(shape=image_shape, dtype=tf.complex64) - trajectory = traj_ops.radial_trajectory( - 128, 129, flatten_encoding_dims=True) - if density is True: - density = traj_ops.radial_density( - 128, 129, flatten_encoding_dims=True) - else: - density = None - - # If testing batches, create new inputs to generate a batch. - if batch: - image = tf.stack([image, image * 0.5]) - trajectory = tf.stack([ - trajectory, geom_ops.rotate_2d(trajectory, [np.pi / 2])]) - if density is not None: - density = tf.stack([density, density]) - - linop = linalg_ops.LinearOperatorNUFFT( - image_shape, trajectory=trajectory, density=density, norm=norm) - linop_gram = linalg_ops.LinearOperatorGramNUFFT( - image_shape, trajectory=trajectory, density=density, norm=norm, - toeplitz=toeplitz) - - recon = linop.transform(linop.transform(image), adjoint=True) - recon_gram = linop_gram.transform(image) - - if norm is None: - # Reduce the magnitude of these values to avoid the need to use a large - # tolerance. - recon /= tf.cast(tf.math.reduce_prod(image_shape), tf.complex64) - recon_gram /= tf.cast(tf.math.reduce_prod(image_shape), tf.complex64) - - self.assertAllClose(recon, recon_gram, rtol=1e-4, atol=1e-4) - - -class LinearOperatorFiniteDifferenceTest(test_util.TestCase): - """Tests for difference linear operator.""" - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.linop1 = linalg_ops.LinearOperatorFiniteDifference([4]) - cls.linop2 = linalg_ops.LinearOperatorFiniteDifference([4, 4], axis=-2) - cls.matrix1 = tf.convert_to_tensor([[-1, 1, 0, 0], - [0, -1, 1, 0], - [0, 0, -1, 1]], dtype=tf.float32) - - def test_transform(self): - """Test transform method.""" - signal = tf.random.normal([4, 4]) - result = self.linop2.transform(signal) - self.assertAllClose(result, np.diff(signal, axis=-2)) - - def test_matvec(self): - """Test matvec method.""" - signal = tf.constant([1, 2, 4, 8], dtype=tf.float32) - result = tf.linalg.matvec(self.linop1, signal) - self.assertAllClose(result, [1, 2, 4]) - self.assertAllClose(result, np.diff(signal)) - self.assertAllClose(result, tf.linalg.matvec(self.matrix1, signal)) - - signal2 = tf.range(16, dtype=tf.float32) - result = tf.linalg.matvec(self.linop2, signal2) - self.assertAllClose(result, [4] * 12) - - def test_matvec_adjoint(self): - """Test matvec with adjoint.""" - signal = tf.constant([1, 2, 4], dtype=tf.float32) - result = tf.linalg.matvec(self.linop1, signal, adjoint_a=True) - self.assertAllClose(result, - tf.linalg.matvec(tf.transpose(self.matrix1), signal)) - - def test_shapes(self): - """Test shapes.""" - self._test_all_shapes(self.linop1, [4], [3]) - self._test_all_shapes(self.linop2, [4, 4], [3, 4]) - - def _test_all_shapes(self, linop, domain_shape, range_shape): - """Test shapes.""" - self.assertIsInstance(linop.domain_shape, tf.TensorShape) - self.assertAllEqual(linop.domain_shape, domain_shape) - self.assertAllEqual(linop.domain_shape_tensor(), domain_shape) - - self.assertIsInstance(linop.range_shape, tf.TensorShape) - self.assertAllEqual(linop.range_shape, range_shape) - self.assertAllEqual(linop.range_shape_tensor(), range_shape) - - -class LinearOperatorWaveletTest(test_util.TestCase): - @parameterized.named_parameters( - # name, wavelet, level, axes, domain_shape, range_shape - ("test0", "haar", None, None, [6, 6], [7, 7]), - ("test1", "haar", 1, None, [6, 6], [6, 6]), - ("test2", "haar", None, -1, [6, 6], [6, 7]), - ("test3", "haar", None, [-1], [6, 6], [6, 7]) - ) - def test_general(self, wavelet, level, axes, domain_shape, range_shape): - # Instantiate. - linop = linalg_ops.LinearOperatorWavelet( - domain_shape, wavelet=wavelet, level=level, axes=axes) - - # Example data. - data = np.arange(np.prod(domain_shape)).reshape(domain_shape) - data = data.astype("float32") - - # Forward and adjoint. - expected_forward, coeff_slices = wavelet_ops.coeffs_to_tensor( - wavelet_ops.wavedec(data, wavelet=wavelet, level=level, axes=axes), - axes=axes) - expected_adjoint = wavelet_ops.waverec( - wavelet_ops.tensor_to_coeffs(expected_forward, coeff_slices), - wavelet=wavelet, axes=axes) - - # Test shapes. - self.assertAllClose(domain_shape, linop.domain_shape) - self.assertAllClose(domain_shape, linop.domain_shape_tensor()) - self.assertAllClose(range_shape, linop.range_shape) - self.assertAllClose(range_shape, linop.range_shape_tensor()) - - # Test transform. - result_forward = linop.transform(data) - result_adjoint = linop.transform(result_forward, adjoint=True) - self.assertAllClose(expected_forward, result_forward) - self.assertAllClose(expected_adjoint, result_adjoint) - - def test_with_batch_inputs(self): - """Test batch shape.""" - axes = [-2, -1] - data = np.arange(4 * 8 * 8).reshape(4, 8, 8).astype("float32") - linop = linalg_ops.LinearOperatorWavelet((8, 8), wavelet="haar", level=1) - - # Forward and adjoint. - expected_forward, coeff_slices = wavelet_ops.coeffs_to_tensor( - wavelet_ops.wavedec(data, wavelet='haar', level=1, axes=axes), - axes=axes) - expected_adjoint = wavelet_ops.waverec( - wavelet_ops.tensor_to_coeffs(expected_forward, coeff_slices), - wavelet='haar', axes=axes) - - result_forward = linop.transform(data) - self.assertAllClose(expected_forward, result_forward) - - result_adjoint = linop.transform(result_forward, adjoint=True) - self.assertAllClose(expected_adjoint, result_adjoint) - - -class LinearOperatorMRITest(test_util.TestCase): - """Tests for MRI linear operator.""" - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.linop1 = linalg_ops.LinearOperatorMRI([2, 2], fft_norm=None) - cls.linop2 = linalg_ops.LinearOperatorMRI( - [2, 2], mask=[[False, False], [True, True]], fft_norm=None) - cls.linop3 = linalg_ops.LinearOperatorMRI( - [2, 2], mask=[[[True, True], [False, False]], - [[False, False], [True, True]], - [[False, True], [True, False]]], fft_norm=None) - - def test_fft(self): - """Test FFT operator.""" - # Test init. - linop = linalg_ops.LinearOperatorMRI([2, 2], fft_norm=None) - - # Test matvec. - signal = tf.constant([1, 2, 4, 4], dtype=tf.complex64) - expected = [-1, 5, 1, 11] - result = tf.linalg.matvec(linop, signal) - self.assertAllClose(expected, result) - - # Test domain shape. - self.assertIsInstance(linop.domain_shape, tf.TensorShape) - self.assertAllEqual([2, 2], linop.domain_shape) - self.assertAllEqual([2, 2], linop.domain_shape_tensor()) - - # Test range shape. - self.assertIsInstance(linop.range_shape, tf.TensorShape) - self.assertAllEqual([2, 2], linop.range_shape) - self.assertAllEqual([2, 2], linop.range_shape_tensor()) - - # Test batch shape. - self.assertIsInstance(linop.batch_shape, tf.TensorShape) - self.assertAllEqual([], linop.batch_shape) - self.assertAllEqual([], linop.batch_shape_tensor()) - - def test_fft_with_mask(self): - """Test FFT operator with mask.""" - # Test init. - linop = linalg_ops.LinearOperatorMRI( - [2, 2], mask=[[False, False], [True, True]], fft_norm=None) - - # Test matvec. - signal = tf.constant([1, 2, 4, 4], dtype=tf.complex64) - expected = [0, 0, 1, 11] - result = tf.linalg.matvec(linop, signal) - self.assertAllClose(expected, result) - - # Test domain shape. - self.assertIsInstance(linop.domain_shape, tf.TensorShape) - self.assertAllEqual([2, 2], linop.domain_shape) - self.assertAllEqual([2, 2], linop.domain_shape_tensor()) - - # Test range shape. - self.assertIsInstance(linop.range_shape, tf.TensorShape) - self.assertAllEqual([2, 2], linop.range_shape) - self.assertAllEqual([2, 2], linop.range_shape_tensor()) - - # Test batch shape. - self.assertIsInstance(linop.batch_shape, tf.TensorShape) - self.assertAllEqual([], linop.batch_shape) - self.assertAllEqual([], linop.batch_shape_tensor()) - - def test_fft_with_batch_mask(self): - """Test FFT operator with batch mask.""" - # Test init. - linop = linalg_ops.LinearOperatorMRI( - [2, 2], mask=[[[True, True], [False, False]], - [[False, False], [True, True]], - [[False, True], [True, False]]], fft_norm=None) - - # Test matvec. - signal = tf.constant([1, 2, 4, 4], dtype=tf.complex64) - expected = [[-1, 5, 0, 0], [0, 0, 1, 11], [0, 5, 1, 0]] - result = tf.linalg.matvec(linop, signal) - self.assertAllClose(expected, result) - - # Test domain shape. - self.assertIsInstance(linop.domain_shape, tf.TensorShape) - self.assertAllEqual([2, 2], linop.domain_shape) - self.assertAllEqual([2, 2], linop.domain_shape_tensor()) - - # Test range shape. - self.assertIsInstance(linop.range_shape, tf.TensorShape) - self.assertAllEqual([2, 2], linop.range_shape) - self.assertAllEqual([2, 2], linop.range_shape_tensor()) - - # Test batch shape. - self.assertIsInstance(linop.batch_shape, tf.TensorShape) - self.assertAllEqual([3], linop.batch_shape) - self.assertAllEqual([3], linop.batch_shape_tensor()) - - def test_fft_norm(self): - """Test FFT normalization.""" - linop = linalg_ops.LinearOperatorMRI([2, 2], fft_norm='ortho') - x = tf.constant([1 + 2j, 2 - 2j, -1 - 6j, 3 + 4j], dtype=tf.complex64) - # With norm='ortho', subsequent application of the operator and its adjoint - # should not scale the input. - y = tf.linalg.matvec(linop.H, tf.linalg.matvec(linop, x)) - self.assertAllClose(x, y) - - def test_nufft_with_sensitivities(self): - resolution = 128 - image_shape = [resolution, resolution] - num_coils = 4 - image, sensitivities = image_ops.phantom( - shape=image_shape, num_coils=num_coils, dtype=tf.complex64, - return_sensitivities=True) - image = image_ops.phantom(shape=image_shape, dtype=tf.complex64) - trajectory = traj_ops.radial_trajectory(resolution, resolution // 2 + 1, - flatten_encoding_dims=True) - density = traj_ops.radial_density(resolution, resolution // 2 + 1, - flatten_encoding_dims=True) - - linop = linalg_ops.LinearOperatorMRI( - image_shape, trajectory=trajectory, density=density, - sensitivities=sensitivities) - - # Test shapes. - expected_domain_shape = image_shape - self.assertAllClose(expected_domain_shape, linop.domain_shape) - self.assertAllClose(expected_domain_shape, linop.domain_shape_tensor()) - expected_range_shape = [num_coils, (2 * resolution) * (resolution // 2 + 1)] - self.assertAllClose(expected_range_shape, linop.range_shape) - self.assertAllClose(expected_range_shape, linop.range_shape_tensor()) - - # Test forward. - weights = tf.cast(tf.math.sqrt(tf.math.reciprocal_no_nan(density)), - tf.complex64) - norm = tf.math.sqrt(tf.cast(tf.math.reduce_prod(image_shape), tf.complex64)) - expected = fft_ops.nufft(image * sensitivities, trajectory) * weights / norm - kspace = linop.transform(image) - self.assertAllClose(expected, kspace) - - # Test adjoint. - expected = tf.math.reduce_sum( - fft_ops.nufft( - kspace * weights, trajectory, grid_shape=image_shape, - transform_type='type_1', fft_direction='backward') / norm * - tf.math.conj(sensitivities), axis=-3) - recon = linop.transform(kspace, adjoint=True) - self.assertAllClose(expected, recon) - - -class LinearOperatorGramMRITest(test_util.TestCase): - @parameterized.product(batch=[False, True], extra=[False, True], - toeplitz_nufft=[False, True]) - def test_general(self, batch, extra, toeplitz_nufft): - resolution = 128 - image_shape = [resolution, resolution] - num_coils = 4 - image, sensitivities = image_ops.phantom( - shape=image_shape, num_coils=num_coils, dtype=tf.complex64, - return_sensitivities=True) - image = image_ops.phantom(shape=image_shape, dtype=tf.complex64) - trajectory = traj_ops.radial_trajectory(resolution, resolution // 2 + 1, - flatten_encoding_dims=True) - density = traj_ops.radial_density(resolution, resolution // 2 + 1, - flatten_encoding_dims=True) - if batch: - image = tf.stack([image, image * 2]) - if extra: - extra_shape = [2] - else: - extra_shape = None - else: - extra_shape = None - - linop = linalg_ops.LinearOperatorMRI( - image_shape, extra_shape=extra_shape, - trajectory=trajectory, density=density, - sensitivities=sensitivities) - linop_gram = linalg_ops.LinearOperatorGramMRI( - image_shape, extra_shape=extra_shape, - trajectory=trajectory, density=density, - sensitivities=sensitivities, toeplitz_nufft=toeplitz_nufft) - - # Test shapes. - expected_domain_shape = image_shape - if extra_shape is not None: - expected_domain_shape = extra_shape + image_shape - self.assertAllClose(expected_domain_shape, linop_gram.domain_shape) - self.assertAllClose(expected_domain_shape, linop_gram.domain_shape_tensor()) - self.assertAllClose(expected_domain_shape, linop_gram.range_shape) - self.assertAllClose(expected_domain_shape, linop_gram.range_shape_tensor()) - - # Test transform. - expected = linop.transform(linop.transform(image), adjoint=True) - self.assertAllClose(expected, linop_gram.transform(image), - rtol=1e-4, atol=1e-4) - - -# Copyright 2019 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -@test_util.run_all_in_graph_and_eager_modes -class ConjugateGradientTest(test_util.TestCase): - """Tests for op `conjugate_gradient`.""" - @parameterized.product(dtype=[np.float32, np.float64], - shape=[[1, 1], [4, 4], [10, 10]], - use_static_shape=[True, False]) - def test_conjugate_gradient(self, dtype, shape, use_static_shape): # pylint: disable=missing-param-doc - """Test CG method.""" - np.random.seed(1) - a_np = np.random.uniform( - low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype) - # Make a self-adjoint, positive definite. - a_np = np.dot(a_np.T, a_np) - # jacobi preconditioner - jacobi_np = np.zeros_like(a_np) - jacobi_np[range(a_np.shape[0]), range(a_np.shape[1])] = ( - 1.0 / a_np.diagonal()) - rhs_np = np.random.uniform( - low=-1.0, high=1.0, size=shape[0]).astype(dtype) - x_np = np.zeros_like(rhs_np) - tol = 1e-6 if dtype == np.float64 else 1e-3 - max_iterations = 20 - - if use_static_shape: - a = tf.constant(a_np) - rhs = tf.constant(rhs_np) - x = tf.constant(x_np) - jacobi = tf.constant(jacobi_np) - else: - a = tf.compat.v1.placeholder_with_default(a_np, shape=None) - rhs = tf.compat.v1.placeholder_with_default(rhs_np, shape=None) - x = tf.compat.v1.placeholder_with_default(x_np, shape=None) - jacobi = tf.compat.v1.placeholder_with_default(jacobi_np, shape=None) - - operator = tf.linalg.LinearOperatorFullMatrix( - a, is_positive_definite=True, is_self_adjoint=True) - preconditioners = [ - None, - # Preconditioner that does nothing beyond change shape. - tf.linalg.LinearOperatorIdentity( - a_np.shape[-1], - dtype=a_np.dtype, - is_positive_definite=True, - is_self_adjoint=True), - # Jacobi preconditioner. - tf.linalg.LinearOperatorFullMatrix( - jacobi, - is_positive_definite=True, - is_self_adjoint=True), - ] - cg_results = [] - for preconditioner in preconditioners: - cg_graph = linalg_ops.conjugate_gradient( - operator, - rhs, - preconditioner=preconditioner, - x=x, - tol=tol, - max_iterations=max_iterations) - cg_val = self.evaluate(cg_graph) - norm_r0 = np.linalg.norm(rhs_np) - norm_r = np.linalg.norm(cg_val.r) - self.assertLessEqual(norm_r, tol * norm_r0) - # Validate that we get an equally small residual norm with numpy - # using the computed solution. - r_np = rhs_np - np.dot(a_np, cg_val.x) - norm_r_np = np.linalg.norm(r_np) - self.assertLessEqual(norm_r_np, tol * norm_r0) - cg_results.append(cg_val) - - # Validate that we get same results using identity_preconditioner - # and None - self.assertEqual(cg_results[0].i, cg_results[1].i) - self.assertAlmostEqual(cg_results[0].gamma, cg_results[1].gamma) - self.assertAllClose(cg_results[0].r, cg_results[1].r, rtol=tol) - self.assertAllClose(cg_results[0].x, cg_results[1].x, rtol=tol) - self.assertAllClose(cg_results[0].p, cg_results[1].p, rtol=tol) - - def test_bypass_gradient(self): - """Tests the `bypass_gradient` argument.""" - dtype = np.float32 - shape = [4, 4] - np.random.seed(1) - a_np = np.random.uniform( - low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype) - # Make a self-adjoint, positive definite. - a_np = np.dot(a_np.T, a_np) - - rhs_np = np.random.uniform( - low=-1.0, high=1.0, size=shape[0]).astype(dtype) - - tol = 1e-3 - max_iterations = 20 - - a = tf.constant(a_np) - rhs = tf.constant(rhs_np) - operator = tf.linalg.LinearOperatorFullMatrix( - a, is_positive_definite=True, is_self_adjoint=True) - - with tf.GradientTape(persistent=True) as tape: - tape.watch(rhs) - result = linalg_ops.conjugate_gradient( - operator, - rhs, - tol=tol, - max_iterations=max_iterations) - result_bypass = linalg_ops.conjugate_gradient( - operator, - rhs, - tol=tol, - max_iterations=max_iterations, - bypass_gradient=True) - - grad = tape.gradient(result.x, rhs) - grad_bypass = tape.gradient(result_bypass.x, rhs) - self.assertAllClose(result, result_bypass) - self.assertAllClose(grad, grad_bypass, rtol=tol) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_mri/python/ops/math_ops.py b/tensorflow_mri/python/ops/math_ops.py index 28dfe95f..373a988b 100644 --- a/tensorflow_mri/python/ops/math_ops.py +++ b/tensorflow_mri/python/ops/math_ops.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -253,7 +253,7 @@ def block_soft_threshold(x, threshold, name=None): r"""Block soft thresholding operator. In the context of proximal gradient methods, this function is the proximal - operator of :math:`f = {\left\| x \right\|}_{2}` (L2 norm). + operator of $f = {\left\| x \right\|}_{2}$ (L2 norm). Args: x: A `Tensor` of shape `[..., n]`. @@ -280,7 +280,7 @@ def shrinkage(x, threshold, name=None): r"""Shrinkage operator. In the context of proximal gradient methods, this function is the proximal - operator of :math:`f = \frac{1}{2}{\left\| x \right\|}_{2}^{2}`. + operator of $f = \frac{1}{2}{\left\| x \right\|}_{2}^{2}$. Args: x: A `Tensor` of shape `[..., n]`. @@ -302,7 +302,7 @@ def soft_threshold(x, threshold, name=None): r"""Soft thresholding operator. In the context of proximal gradient methods, this function is the proximal - operator of :math:`f = {\left\| x \right\|}_{1}` (L1 norm). + operator of $f = {\left\| x \right\|}_{1}$ (L1 norm). Args: x: A `Tensor` of shape `[..., n]`. @@ -326,11 +326,12 @@ def indicator_box(x, lower_bound=-1.0, upper_bound=1.0, name=None): Returns `0` if `x` is in the box, `inf` otherwise. - The box of radius :math:`r` is defined as the set of points of - :math:`{R}^{n}` whose components are within the range :math:`[l, u]`. + The box of radius $r$ is defined as the set of points of + ${R}^{n}$ whose components are within the range $[l, u]$. - .. math:: + $$ \mathcal{C} = \left\{x \in \mathbb{R}^{n} : l \leq x_i \leq u, \forall i = 1, \dots, n \right\} + $$ Args: x: A `tf.Tensor` of shape `[..., n]`. @@ -378,13 +379,14 @@ def indicator_simplex(x, radius=1.0, name=None): Returns `0` if `x` is in the simplex, `inf` otherwise. - The simplex of radius :math:`r` is defined as the set of points of - :math:`\mathbb{R}^{n}` whose elements are nonnegative and sum up to `r`. + The simplex of radius $r$ is defined as the set of points of + $\mathbb{R}^{n}$ whose elements are nonnegative and sum up to `r`. - .. math:: + $$ \Delta_r = \left\{x \in \mathbb{R}^{n} : \sum_{i=1}^{n} x_i = r \text{ and } x_i >= 0, \forall i = 1, \dots, n \right\} + $$ - If :math:`r` is 1, the simplex is also called the unit simplex, standard + If $r$ is 1, the simplex is also called the unit simplex, standard simplex or probability simplex. Args: @@ -426,14 +428,15 @@ def indicator_ball(x, order=2, radius=1.0, name=None): Returns `0` if `x` is in the Lp ball, `inf` otherwise. - The :math:`L_p` ball of radius :math:`r` is defined as the set of points of - :math:`{R}^{n}` whose distance from the origin, as defined by the :math:`L_p` - norm, is less than or equal to :math:`r`. + The $L_p$ ball of radius $r$ is defined as the set of points of + ${R}^{n}$ whose distance from the origin, as defined by the $L_p$ + norm, is less than or equal to $r$. - .. math:: + $$ \mathcal{B}_r = \left\{x \in \mathbb{R}^{n} : \left\|x\right\|_{p} \leq r \right\} + $$ - If :math:`r` is 1, this ball is also called the unit ball of the + If $r$ is 1, this ball is also called the unit ball of the :math`L_p` norm. Args: @@ -501,7 +504,7 @@ def project_onto_simplex(x, radius=1.0, name=None): ValueError: If inputs are invalid. References: - .. [1] Duchi, J., Shalev-Shwartz, S., Singer, Y., & Chandra, T. (2008). + 1. Duchi, J., Shalev-Shwartz, S., Singer, Y., & Chandra, T. (2008). Efficient projections onto the l1-ball for learning in high dimensions. In Proceedings of the 25th International Conference on Machine Learning (pp. 272-279). @@ -556,10 +559,10 @@ def project_onto_ball(x, order=2, radius=1.0, name=None): ValueError: If inputs are invalid. References: - .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and + 1. Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and Trends in optimization, 1(3), 127-239. - .. [2] Duchi, J., Shalev-Shwartz, S., Singer, Y., & Chandra, T. (2008). + 2. Duchi, J., Shalev-Shwartz, S., Singer, Y., & Chandra, T. (2008). Efficient projections onto the l1-ball for learning in high dimensions. In Proceedings of the 25th International Conference on Machine Learning (pp. 272-279). diff --git a/tensorflow_mri/python/ops/math_ops_test.py b/tensorflow_mri/python/ops/math_ops_test.py index ffcf6aa7..421350e8 100644 --- a/tensorflow_mri/python/ops/math_ops_test.py +++ b/tensorflow_mri/python/ops/math_ops_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/ops/optimizer_ops.py b/tensorflow_mri/python/ops/optimizer_ops.py index 05367749..9cc9a79a 100644 --- a/tensorflow_mri/python/ops/optimizer_ops.py +++ b/tensorflow_mri/python/ops/optimizer_ops.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,8 +23,8 @@ import tensorflow as tf import tensorflow_probability as tfp +from tensorflow_mri.python.linalg import conjugate_gradient from tensorflow_mri.python.ops import convex_ops -from tensorflow_mri.python.ops import linalg_ops from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import linalg_ext from tensorflow_mri.python.util import prefer_static @@ -191,11 +191,11 @@ def admm_minimize(function_f, name=None): r"""Applies the ADMM algorithm to minimize a separable convex function. - Minimizes :math:`f(x) + g(z)`, subject to :math:`Ax + Bz = c`. + Minimizes $f(x) + g(z)$, subject to $Ax + Bz = c$. - If :math:`A`, :math:`B` and :math:`c` are not provided, the constraint - defaults to :math:`x - z = 0`, in which case the problem is equivalent to - minimizing :math:`f(x) + g(x)`. + If $A$, $B$ and $c$ are not provided, the constraint + defaults to $x - z = 0$, in which case the problem is equivalent to + minimizing $f(x) + g(x)$. Args: function_f: A `tfmri.convex.ConvexFunction` of shape `[..., n]` and real or @@ -218,7 +218,7 @@ def admm_minimize(function_f, of iterations of the ADMM update. linearized: A `boolean`. If `True`, use linearized variant of the ADMM algorithm. Linearized ADMM solves problems of the form - :math:`f(x) + g(Ax)` and only requires evaluation of the proximal operator + $f(x) + g(Ax)$ and only requires evaluation of the proximal operator of `g(x)`. This is useful when the proximal operator of `g(Ax)` cannot be easily evaluated, but the proximal operator of `g(x)` can. Defaults to `False`. @@ -255,7 +255,7 @@ def admm_minimize(function_f, during the search. References: - .. [1] Boyd, S., Parikh, N., & Chu, E. (2011). Distributed optimization and + 1. Boyd, S., Parikh, N., & Chu, E. (2011). Distributed optimization and statistical learning via the alternating direction method of multipliers. Now Publishers Inc. @@ -452,8 +452,8 @@ def _get_admm_update_fn(function, operator, prox_kwargs=None): r"""Returns a function for the ADMM update. The returned function evaluates the expression - :math:`{\mathop{\mathrm{argmin}}_x} \left ( f(x) + \frac{\rho}{2} \left\| Ax - v \right\|_2^2 \right )` - for a given input :math:`v` and penalty parameter :math:`\rho`. + ${\mathop{\mathrm{argmin}}_x} \left ( f(x) + \frac{\rho}{2} \left\| Ax - v \right\|_2^2 \right )$ + for a given input $v$ and penalty parameter $\rho$. This function will raise an error if the above expression cannot be easily evaluated for the specified convex function and linear operator. @@ -508,7 +508,7 @@ def _update_fn(v, rho): # pylint: disable=function-redefined rhs = (rho * tf.linalg.matvec(operator, v, adjoint_a=True) - function.linear_coefficient) # Solve the linear system using CG (see ref [1], section 4.3.4). - return linalg_ops.conjugate_gradient(ls_operator, rhs, **solver_kwargs).x + return conjugate_gradient.conjugate_gradient(ls_operator, rhs, **solver_kwargs).x return _update_fn diff --git a/tensorflow_mri/python/ops/optimizer_ops_test.py b/tensorflow_mri/python/ops/optimizer_ops_test.py index 859be9e7..af04890a 100755 --- a/tensorflow_mri/python/ops/optimizer_ops_test.py +++ b/tensorflow_mri/python/ops/optimizer_ops_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/ops/recon_ops.py b/tensorflow_mri/python/ops/recon_ops.py index 7209e557..7655d6f1 100644 --- a/tensorflow_mri/python/ops/recon_ops.py +++ b/tensorflow_mri/python/ops/recon_ops.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,130 +22,19 @@ import tensorflow as tf +from tensorflow_mri.python.coils import coil_combination +from tensorflow_mri.python.linalg import conjugate_gradient +from tensorflow_mri.python.linalg import linear_operator_gram_matrix +from tensorflow_mri.python.linalg import linear_operator_mri from tensorflow_mri.python.ops import array_ops -from tensorflow_mri.python.ops import coil_ops from tensorflow_mri.python.ops import convex_ops from tensorflow_mri.python.ops import fft_ops from tensorflow_mri.python.ops import image_ops -from tensorflow_mri.python.ops import linalg_ops from tensorflow_mri.python.ops import math_ops from tensorflow_mri.python.ops import optimizer_ops from tensorflow_mri.python.ops import signal_ops from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util -from tensorflow_mri.python.util import deprecation -from tensorflow_mri.python.util import linalg_imaging - - -@api_util.export("recon.adjoint", "recon.adj") -def reconstruct_adj(kspace, - image_shape, - mask=None, - trajectory=None, - density=None, - sensitivities=None, - phase=None, - sens_norm=True): - r"""Reconstructs an MR image using the adjoint MRI operator. - - Given *k*-space data :math:`b`, this function estimates the corresponding - image as :math:`x = A^H b`, where :math:`A` is the MRI linear operator. - - This operator supports Cartesian and non-Cartesian *k*-space data. - - Additional density compensation and intensity correction steps are applied - depending on the input arguments. - - This operator supports batched inputs. All batch shapes should be - broadcastable with each other. - - This operator supports multicoil imaging. Coil combination is triggered - when `sensitivities` is not `None`. If you have multiple coils but wish to - reconstruct each coil separately, simply set `sensitivities` to `None`. The - coil dimension will then be treated as a standard batch dimension (i.e., it - becomes part of `...`). - - Args: - kspace: A `Tensor`. The *k*-space samples. Must have type `complex64` or - `complex128`. `kspace` can be either Cartesian or non-Cartesian. A - Cartesian `kspace` must have shape - `[..., num_coils, *image_shape]`, where `...` are batch dimensions. A - non-Cartesian `kspace` must have shape `[..., num_coils, num_samples]`. - If not multicoil (`sensitivities` is `None`), then the `num_coils` axis - must be omitted. - image_shape: A `TensorShape` or a list of `ints`. Must have length 2 or 3. - The shape of the reconstructed image[s]. - mask: An optional `Tensor` of type `bool`. The sampling mask. Must have - shape `[..., image_shape]`. `mask` should be passed for reconstruction - from undersampled Cartesian *k*-space. For each point, `mask` should be - `True` if the corresponding *k*-space sample was measured and `False` - otherwise. - trajectory: An optional `Tensor` of type `float32` or `float64`. Must have - shape `[..., num_samples, rank]`. `trajectory` should be passed for - reconstruction from non-Cartesian *k*-space. - density: An optional `Tensor` of type `float32` or `float64`. The sampling - densities. Must have shape `[..., num_samples]`. This input is only - relevant for non-Cartesian MRI reconstruction. If passed, the MRI linear - operator will include sampling density compensation. If `None`, the MRI - operator will not perform sampling density compensation. - sensitivities: An optional `Tensor` of type `complex64` or `complex128`. - The coil sensitivity maps. Must have shape - `[..., num_coils, *image_shape]`. If provided, a multi-coil parallel - imaging reconstruction will be performed. - phase: An optional `Tensor` of type `float32` or `float64`. Must have shape - `[..., *image_shape]`. A phase estimate for the reconstructed image. If - provided, a phase-constrained reconstruction will be performed. This - improves the conditioning of the reconstruction problem in applications - where there is no interest in the phase data. However, artefacts may - appear if an inaccurate phase estimate is passed. - sens_norm: A `boolean`. Whether to normalize coil sensitivities. Defaults to - `True`. - - Returns: - A `Tensor`. The reconstructed image. Has the same type as `kspace` and - shape `[..., *image_shape]`, where `...` is the broadcasted batch shape of - all inputs. - - Notes: - Reconstructs an image by applying the adjoint MRI operator to the *k*-space - data. This typically involves an inverse FFT or a (density-compensated) - NUFFT, and coil combination for multicoil inputs. This type of - reconstruction is often called zero-filled reconstruction, because missing - *k*-space samples are assumed to be zero. Therefore, the resulting image is - likely to display aliasing artefacts if *k*-space is not sufficiently - sampled according to the Nyquist criterion. - """ - kspace = tf.convert_to_tensor(kspace) - - # Create the linear operator. - operator = linalg_ops.LinearOperatorMRI(image_shape, - mask=mask, - trajectory=trajectory, - density=density, - sensitivities=sensitivities, - phase=phase, - fft_norm='ortho', - sens_norm=sens_norm) - rank = operator.rank - - # Apply density compensation, if provided. - if density is not None: - dens_weights_sqrt = tf.math.sqrt(tf.math.reciprocal_no_nan(density)) - dens_weights_sqrt = tf.cast(dens_weights_sqrt, kspace.dtype) - if operator.is_multicoil: - dens_weights_sqrt = tf.expand_dims(dens_weights_sqrt, axis=-2) - kspace *= dens_weights_sqrt - - # Compute zero-filled image using the adjoint operator. - image = operator.H.transform(kspace) - - # Apply intensity correction, if requested. - if operator.is_multicoil and sens_norm: - sens_weights_sqrt = tf.math.reciprocal_no_nan( - tf.norm(sensitivities, axis=-(rank + 1), keepdims=False)) - image *= sens_weights_sqrt - - return image @api_util.export("recon.least_squares", "recon.lstsq") @@ -170,11 +59,12 @@ def reconstruct_lstsq(kspace, This is an iterative reconstruction method which formulates the image reconstruction problem as follows: - .. math:: + $$ \hat{x} = {\mathop{\mathrm{argmin}}_x} \left (\left\| Ax - y \right\|_2^2 + g(x) \right ) + $$ - where :math:`A` is the MRI `LinearOperator`, :math:`x` is the solution, `y` is - the measured *k*-space data, and :math:`g(x)` is an optional `ConvexFunction` + where $A$ is the MRI `LinearOperator`, $x$ is the solution, `y` is + the measured *k*-space data, and $g(x)$ is an optional `ConvexFunction` used for regularization. This operator supports Cartesian and non-Cartesian *k*-space data. @@ -213,7 +103,8 @@ def reconstruct_lstsq(kspace, densities. Must have shape `[..., num_samples]`. This input is only relevant for non-Cartesian MRI reconstruction. If passed, the MRI linear operator will include sampling density compensation. If `None`, the MRI - operator will not perform sampling density compensation. + operator will not perform sampling density compensation. Providing + `density` may speed up convergence but results in suboptimal SNR. sensitivities: An optional `Tensor` of type `complex64` or `complex128`. The coil sensitivity maps. Must have shape `[..., num_coils, *image_shape]`. If provided, a multi-coil parallel @@ -249,7 +140,7 @@ def reconstruct_lstsq(kspace, return_optimizer_state: A `boolean`. If `True`, returns the optimizer state along with the reconstructed image. toeplitz_nufft: A `boolean`. If `True`, uses the Toeplitz approach [5] - to compute :math:`F^H F x`, where :math:`F` is the non-uniform Fourier + to compute $F^H F x$, where $F$ is the non-uniform Fourier operator. If `False`, the same operation is performed using the standard NUFFT operation. The Toeplitz approach might be faster than the direct approach but is slightly less accurate. This argument is only relevant @@ -278,28 +169,28 @@ def reconstruct_lstsq(kspace, it may be time-consuming, depending on the characteristics of the problem. References: - .. [1] Pruessmann, K.P., Weiger, M., Börnert, P. and Boesiger, P. (2001), + 1. Pruessmann, K.P., Weiger, M., Börnert, P. and Boesiger, P. (2001), Advances in sensitivity encoding with arbitrary k-space trajectories. Magn. Reson. Med., 46: 638-651. https://doi.org/10.1002/mrm.1241 - .. [2] Block, K.T., Uecker, M. and Frahm, J. (2007), Undersampled radial MRI + 2. Block, K.T., Uecker, M. and Frahm, J. (2007), Undersampled radial MRI with multiple coils. Iterative image reconstruction using a total variation constraint. Magn. Reson. Med., 57: 1086-1098. https://doi.org/10.1002/mrm.21236 - .. [3] Feng, L., Grimm, R., Block, K.T., Chandarana, H., Kim, S., Xu, J., + 3. Feng, L., Grimm, R., Block, K.T., Chandarana, H., Kim, S., Xu, J., Axel, L., Sodickson, D.K. and Otazo, R. (2014), Golden-angle radial sparse parallel MRI: Combination of compressed sensing, parallel imaging, and golden-angle radial sampling for fast and flexible dynamic volumetric MRI. Magn. Reson. Med., 72: 707-717. https://doi.org/10.1002/mrm.24980 - .. [4] Tsao, J., Boesiger, P., & Pruessmann, K. P. (2003). k-t BLAST and + 4. Tsao, J., Boesiger, P., & Pruessmann, K. P. (2003). k-t BLAST and k-t SENSE: dynamic MRI with high frame rate exploiting spatiotemporal correlations. Magnetic Resonance in Medicine: An Official Journal of the International Society for Magnetic Resonance in Medicine, 50(5), 1031-1042. - .. [5] Fessler, J. A., Lee, S., Olafsson, V. T., Shi, H. R., & Noll, D. C. + 5. Fessler, J. A., Lee, S., Olafsson, V. T., Shi, H. R., & Noll, D. C. (2005). Toeplitz-based iterative image reconstruction for MRI with correction for magnetic field inhomogeneity. IEEE Transactions on Signal Processing, 53(9), 3393-3402. @@ -321,21 +212,21 @@ def reconstruct_lstsq(kspace, kspace = tf.convert_to_tensor(kspace) # Create the linear operator. - operator = linalg_ops.LinearOperatorMRI(image_shape, - extra_shape=extra_shape, - mask=mask, - trajectory=trajectory, - density=density, - sensitivities=sensitivities, - phase=phase, - fft_norm='ortho', - sens_norm=sens_norm, - dynamic_domain=dynamic_domain) + operator = linear_operator_mri.LinearOperatorMRI(image_shape, + extra_shape=extra_shape, + mask=mask, + trajectory=trajectory, + density=density, + sensitivities=sensitivities, + phase=phase, + fft_norm='ortho', + sens_norm=sens_norm, + dynamic_domain=dynamic_domain) rank = operator.rank # If using Toeplitz NUFFT, we need to use the specialized Gram MRI operator. if toeplitz_nufft and operator.is_non_cartesian: - gram_operator = linalg_ops.LinearOperatorGramMRI( + gram_operator = linear_operator_mri.LinearOperatorGramMRI( image_shape, extra_shape=extra_shape, mask=mask, @@ -352,8 +243,7 @@ def reconstruct_lstsq(kspace, gram_operator = None # Apply density compensation, if provided. - if density is not None: - kspace *= operator._dens_weights_sqrt # pylint: disable=protected-access + kspace = operator.preprocess(kspace, adjoint=True) initial_image = operator.H.transform(kspace) @@ -372,7 +262,7 @@ def reconstruct_lstsq(kspace, reg_operator = None reg_prior = None - operator_gm = linalg_imaging.LinearOperatorGramMatrix( + operator_gm = linear_operator_gram_matrix.LinearOperatorGramMatrix( operator, reg_parameter=reg_parameter, reg_operator=reg_operator, gram_operator=gram_operator) rhs = initial_image @@ -383,7 +273,8 @@ def reconstruct_lstsq(kspace, reg_operator.transform(reg_prior), adjoint=True) rhs += tf.cast(reg_parameter, reg_prior.dtype) * reg_prior # Solve the (maybe regularized) linear system. - result = linalg_ops.conjugate_gradient(operator_gm, rhs, **optimizer_kwargs) + result = conjugate_gradient.conjugate_gradient( + operator_gm, rhs, **optimizer_kwargs) image = result.x elif optimizer == 'admm': @@ -438,16 +329,7 @@ def _objective(x): else: raise ValueError(f"Unknown optimizer: {optimizer}") - # Apply temporal Fourier operator, if necessary. - if operator.is_dynamic and operator.dynamic_domain == 'frequency': - image = fft_ops.ifftn(image, axes=[operator.dynamic_axis], - norm='ortho', shift=True) - - # Apply intensity correction, if requested. - if operator.is_multicoil and sens_norm: - sens_weights_sqrt = tf.math.reciprocal_no_nan( - tf.norm(sensitivities, axis=-(rank + 1), keepdims=False)) - image *= sens_weights_sqrt + image = operator.postprocess(image, adjoint=True) # If necessary, filter the image to remove k-space corners. This can be # done if the trajectory has circular coverage and does not cover the k-space @@ -523,7 +405,7 @@ def reconstruct_sense(kspace, ValueError: If `kspace` and `sensitivities` have incompatible batch shapes. References: - .. [1] Pruessmann, K.P., Weiger, M., Scheidegger, M.B. and Boesiger, P. + 1. Pruessmann, K.P., Weiger, M., Scheidegger, M.B. and Boesiger, P. (1999), SENSE: Sensitivity encoding for fast MRI. Magn. Reson. Med., 42: 952-962. https://doi.org/10.1002/(SICI)1522-2594(199911)42:5<952::AID-MRM16>3.0.CO;2-S @@ -704,7 +586,7 @@ def reconstruct_grappa(kspace, the spatial shape. References: - .. [1] Griswold, M.A., Jakob, P.M., Heidemann, R.M., Nittka, M., Jellus, V., + 1. Griswold, M.A., Jakob, P.M., Heidemann, R.M., Nittka, M., Jellus, V., Wang, J., Kiefer, B. and Haase, A. (2002), Generalized autocalibrating partially parallel acquisitions (GRAPPA). Magn. Reson. Med., 47: 1202-1210. https://doi.org/10.1002/mrm.10171 @@ -853,9 +735,9 @@ def reconstruct_grappa(kspace, # Combine coils if requested. if combine_coils: - result = coil_ops.combine_coils(result, - maps=sensitivities, - coil_axis=-rank-1) + result = coil_combination.combine_coils(result, + maps=sensitivities, + coil_axis=-rank-1) return result @@ -951,15 +833,10 @@ def _flatten_last_dimensions(x): @api_util.export("recon.partial_fourier", "recon.pf") -@deprecation.deprecated_args( - deprecation.REMOVAL_DATE['0.19.0'], - 'Use argument `preserve_phase` instead.', - ('return_complex', None)) def reconstruct_pf(kspace, factors, preserve_phase=None, return_kspace=False, - return_complex=None, method='zerofill', **kwargs): """Reconstructs an MR image using partial Fourier methods. @@ -980,8 +857,6 @@ def reconstruct_pf(kspace, be complex-valued. return_kspace: A `boolean`. If `True`, returns the filled *k*-space instead of the reconstructed images. This is always complex-valued. - return_complex: A `boolean`. If `True`, returns complex instead of - real-valued images. method: A `string`. The partial Fourier reconstruction algorithm. Must be one of `"zerofill"`, `"homodyne"` (homodyne detection method) or `"pocs"` (projection onto convex sets method). @@ -1012,10 +887,10 @@ def reconstruct_pf(kspace, POCS algorithm. Defaults to `10`. References: - .. [1] Noll, D. C., Nishimura, D. G., & Macovski, A. (1991). Homodyne + 1. Noll, D. C., Nishimura, D. G., & Macovski, A. (1991). Homodyne detection in magnetic resonance imaging. IEEE transactions on medical imaging, 10(2), 154-163. - .. [2] Haacke, E. M., Lindskogj, E. D., & Lin, W. (1991). A fast, iterative, + 2. Haacke, E. M., Lindskogj, E. D., & Lin, W. (1991). A fast, iterative, partial-Fourier technique capable of local phase recovery. Journal of Magnetic Resonance (1969), 92(1), 126-145. """ @@ -1028,8 +903,6 @@ def reconstruct_pf(kspace, f"`factors` must be greater than or equal to 0.5, but got: {factors}")) tf.debugging.assert_less_equal(factors, 1.0, message=( f"`factors` must be less than or equal to 1.0, but got: {factors}")) - preserve_phase = deprecation.deprecated_argument_lookup( - 'preserve_phase', preserve_phase, 'return_complex', return_complex) if preserve_phase is None: preserve_phase = False diff --git a/tensorflow_mri/python/ops/recon_ops_test.py b/tensorflow_mri/python/ops/recon_ops_test.py index d4308d94..6fb182f8 100755 --- a/tensorflow_mri/python/ops/recon_ops_test.py +++ b/tensorflow_mri/python/ops/recon_ops_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,65 +39,6 @@ def setUpClass(cls): cls.data.update(io_util.read_hdf5('tests/data/recon_ops_data_2.h5')) cls.data.update(io_util.read_hdf5('tests/data/recon_ops_data_3.h5')) - def test_adj_fft(self): - """Test simple FFT recon.""" - kspace = self.data['fft/kspace'] - sens = self.data['fft/sens'] - image_shape = kspace.shape[-2:] - - # Test single-coil. - image = recon_ops.reconstruct_adj(kspace[0, ...], image_shape) - expected = fft_ops.ifftn(kspace[0, ...], norm='ortho', shift=True) - - self.assertAllClose(expected, image) - - # Test multi-coil. - image = recon_ops.reconstruct_adj(kspace, image_shape, sensitivities=sens) - expected = fft_ops.ifftn(kspace, axes=[-2, -1], norm='ortho', shift=True) - scale = tf.math.reduce_sum(sens * tf.math.conj(sens), axis=0) - expected = tf.math.divide_no_nan( - tf.math.reduce_sum(expected * tf.math.conj(sens), axis=0), scale) - - self.assertAllClose(expected, image) - - def test_adj_nufft(self): - """Test simple NUFFT recon.""" - kspace = self.data['nufft/kspace'] - sens = self.data['nufft/sens'] - traj = self.data['nufft/traj'] - dens = self.data['nufft/dens'] - image_shape = [144, 144] - fft_norm_factor = tf.cast(tf.math.sqrt(144. * 144.), tf.complex64) - - # Save us some typing. - inufft = lambda src, pts: tfft.nufft(src, pts, - grid_shape=[144, 144], - transform_type='type_1', - fft_direction='backward') - - # Test single-coil. - image = recon_ops.reconstruct_adj(kspace[0, ...], image_shape, - trajectory=traj, - density=dens) - - expected = inufft(kspace[0, ...] / tf.cast(dens, tf.complex64), traj) - expected /= fft_norm_factor - - self.assertAllClose(expected, image) - - # Test multi-coil. - image = recon_ops.reconstruct_adj(kspace, image_shape, - trajectory=traj, - density=dens, - sensitivities=sens) - expected = inufft(kspace / dens, traj) - expected /= fft_norm_factor - scale = tf.math.reduce_sum(sens * tf.math.conj(sens), axis=0) - expected = tf.math.divide_no_nan( - tf.math.reduce_sum(expected * tf.math.conj(sens), axis=0), scale) - - self.assertAllClose(expected, image) - @test_util.run_in_graph_and_eager_modes def test_inufft_2d(self): """Test inverse NUFFT method with 2D phantom.""" diff --git a/tensorflow_mri/python/ops/signal_ops.py b/tensorflow_mri/python/ops/signal_ops.py index 2cfb63c5..aa36342c 100644 --- a/tensorflow_mri/python/ops/signal_ops.py +++ b/tensorflow_mri/python/ops/signal_ops.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -90,7 +90,7 @@ def atanfilt(arg, cutoff=np.pi, beta=100.0, name=None): A `Tensor` of shape `arg.shape`. References: - .. [1] Pruessmann, K.P., Weiger, M., Börnert, P. and Boesiger, P. (2001), + 1. Pruessmann, K.P., Weiger, M., Börnert, P. and Boesiger, P. (2001), Advances in sensitivity encoding with arbitrary k-space trajectories. Magn. Reson. Med., 46: 638-651. https://doi.org/10.1002/mrm.1241 """ @@ -99,12 +99,79 @@ def atanfilt(arg, cutoff=np.pi, beta=100.0, name=None): return 0.5 + (1.0 / np.pi) * tf.math.atan(beta * (cutoff - arg) / cutoff) +@api_util.export("signal.rect") +def rect(arg, cutoff=np.pi, name=None): + r"""Returns the rectangular function. + + The rectangular function is defined as: + + $$ + \operatorname{rect}(x) = \Pi(t) = + \left\{\begin{array}{rl} + 0, & \text{if } |x| > \pi \\ + \frac{1}{2}, & \text{if } |x| = \pi \\ + 1, & \text{if } |x| < \pi. + \end{array}\right. + $$ + + Args: + arg: The input `tf.Tensor`. + cutoff: A scalar `tf.Tensor` in the range `[0, pi]`. + The cutoff frequency of the filter. + name: Name to use for the scope. + + Returns: + A `tf.Tensor` with the same shape and type as `arg`. + """ + with tf.name_scope(name or 'rect'): + arg = tf.convert_to_tensor(arg) + one = tf.constant(1.0, dtype=arg.dtype) + zero = tf.constant(0.0, dtype=arg.dtype) + half = tf.constant(0.5, dtype=arg.dtype) + return tf.where(tf.math.abs(arg) == cutoff, + half, tf.where(tf.math.abs(arg) < cutoff, one, zero)) + + +@api_util.export("signal.separable_window") +def separable_window(func): + """Returns a function that computes a separable window. + + This function creates a separable N-D filters as the outer product of 1D + filters along different dimensions. + + Args: + func: A 1D window function. Must have signature `func(x, *args, **kwargs)`. + + Returns: + A function that computes a separable window. Has signature + `func(x, *args, **kwargs)`, where `x` is a `tf.Tensor` of shape `[..., N]` + and each element of `args` and `kwargs is a `tf.Tensor` of shape `[N, ...]`, + which will be unpacked along the first dimension. + """ + def wrapper(x, *args, **kwargs): + # Convert each input to a tensor. + args = tuple(tf.convert_to_tensor(arg) for arg in args) + kwargs = {k: tf.convert_to_tensor(v) for k, v in kwargs.items()} + def fn(accumulator, current): + x, args, kwargs = current + return accumulator * func(x, *args, **kwargs) + # Move last axis to front. + perm = tf.concat([[tf.rank(x) - 1], tf.range(0, tf.rank(x) - 1)], 0) + x = tf.transpose(x, perm) + # Initialize as 1.0. + initializer = tf.ones_like(x[0, ...]) + return tf.foldl(fn, (x, args, kwargs), initializer=initializer) + return wrapper + + @api_util.export("signal.filter_kspace") def filter_kspace(kspace, trajectory=None, filter_fn='hamming', filter_rank=None, - filter_kwargs=None): + filter_kwargs=None, + separable=False, + name=None): """Filter *k*-space. Multiplies *k*-space by a filtering function. @@ -114,45 +181,73 @@ def filter_kspace(kspace, trajectory: A `Tensor` of shape `kspace.shape + [N]`, where `N` is the number of spatial dimensions. If `None`, `kspace` is assumed to be Cartesian. - filter_fn: A `str` (one of `'hamming'`, `'hann'` or `'atanfilt'`) or a - callable that accepts a coordinate array and returns corresponding filter - values. + filter_fn: A `str` (one of `'rect'`, `'hamming'`, `'hann'` or `'atanfilt'`) + or a callable that accepts a coordinates array and returns corresponding + filter values. The passed coordinates array will have shape `kspace.shape` + if `separable=False` and `[*kspace.shape, N]` if `separable=True`. filter_rank: An `int`. The rank of the filter. Only relevant if *k*-space is Cartesian. Defaults to `kspace.shape.rank`. filter_kwargs: A `dict`. Additional keyword arguments to pass to the filtering function. + separable: A `boolean`. If `True`, the input *k*-space will be filtered + using an N-D separable window instead of a circularly symmetric window. + If `filter_fn` has one of the default string values, the function is + automatically made separable. If `filter_fn` is a custom callable, it is + the responsibility of the user to ensure that the passed callable is + appropriate. + name: Name to use for the scope. Returns: A `Tensor` of shape `kspace.shape`. The filtered *k*-space. """ - kspace = tf.convert_to_tensor(kspace) - if trajectory is not None: - kspace, trajectory = check_util.verify_compatible_trajectory( - kspace, trajectory) - - # Make a "trajectory" for Cartesian k-spaces. - is_cartesian = trajectory is None - if is_cartesian: - filter_rank = filter_rank or kspace.shape.rank - vecs = [tf.linspace(-np.pi, np.pi - (2.0 * np.pi / s), s) - for s in kspace.shape[-filter_rank:]] # pylint: disable=invalid-unary-operand-type - trajectory = array_ops.meshgrid(*vecs) - - if not callable(filter_fn): - # filter_fn not a callable, so should be an enum value. Get the - # corresponding function. - filter_fn = check_util.validate_enum( - filter_fn, valid_values={'hamming', 'hann', 'atanfilt'}, - name='filter_fn') - filter_fn = { - 'hamming': hamming, - 'hann': hann, - 'atanfilt': atanfilt - }[filter_fn] - filter_kwargs = filter_kwargs or {} - - traj_norm = tf.norm(trajectory, axis=-1) - return kspace * tf.cast(filter_fn(traj_norm, **filter_kwargs), kspace.dtype) + with tf.name_scope(name or 'filter_kspace'): + kspace = tf.convert_to_tensor(kspace) + if trajectory is not None: + kspace, trajectory = check_util.verify_compatible_trajectory( + kspace, trajectory) + + # Make a "trajectory" for Cartesian k-spaces. + is_cartesian = trajectory is None + if is_cartesian: + filter_rank = filter_rank or kspace.shape.rank + vecs = tf.TensorArray(dtype=kspace.dtype.real_dtype, + size=filter_rank, + infer_shape=False, + clear_after_read=False) + for i in range(-filter_rank, 0): + size = tf.shape(kspace)[i] + pi = tf.cast(np.pi, kspace.dtype.real_dtype) + low = -pi + high = pi - (2.0 * pi / tf.cast(size, kspace.dtype.real_dtype)) + vecs = vecs.write(i + filter_rank, tf.linspace(low, high, size)) + trajectory = array_ops.dynamic_meshgrid(vecs) + + # For non-separable filters, use the frequency magnitude (circularly + # symmetric filter). + if not separable: + trajectory = tf.norm(trajectory, axis=-1) + + if not callable(filter_fn): + # filter_fn not a callable, so should be an enum value. Get the + # corresponding function. + filter_fn = check_util.validate_enum( + filter_fn, valid_values={'rect', 'hamming', 'hann', 'atanfilt'}, + name='filter_fn') + filter_fn = { + 'rect': rect, + 'hamming': hamming, + 'hann': hann, + 'atanfilt': atanfilt + }[filter_fn] + + if separable: + # The above functions are 1D. If `separable` is `True`, make them N-D + # by wrapping them with `separable_window`. + filter_fn = separable_window(filter_fn) + + filter_kwargs = filter_kwargs or {} # Make sure it's a dict. + filter_values = filter_fn(trajectory, **filter_kwargs) + return kspace * tf.cast(filter_values, kspace.dtype) @api_util.export("signal.crop_kspace") diff --git a/tensorflow_mri/python/ops/signal_ops_test.py b/tensorflow_mri/python/ops/signal_ops_test.py index f7976660..8fa12929 100755 --- a/tensorflow_mri/python/ops/signal_ops_test.py +++ b/tensorflow_mri/python/ops/signal_ops_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -60,6 +60,36 @@ def test_atanfilt(self): result = signal_ops.atanfilt(x) self.assertAllClose(expected, result) + def test_rect(self): + """Test rectangular function.""" + x = [-3.1, -1.3, -0.2, 0.0, 0.4, 1.0, 3.1] + expected = [0.0, 0.0, 1.0, 1.0, 1.0, 0.5, 0.0] + result = signal_ops.rect(x, cutoff=1.0) + self.assertAllClose(expected, result) + + def test_separable_rect(self): + """Test separable rectangular function.""" + x = array_ops.meshgrid( + [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0], + [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0]) + expected = [[0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0.25, 0.5 , 0.25, 0. , 0. , 0. ], + [0. , 0. , 0. , 0.5 , 1. , 0.5 , 0. , 0. , 0. ], + [0. , 0. , 0. , 0.5 , 1. , 0.5 , 0. , 0. , 0. ], + [0. , 0. , 0. , 0.5 , 1. , 0.5 , 0. , 0. , 0. ], + [0. , 0. , 0. , 0.25, 0.5 , 0.25, 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ], + [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]] + + separable_rect = signal_ops.separable_window(signal_ops.rect) + + result = separable_rect(x, (1.0, 0.5)) + self.assertAllClose(expected, result) + + result = separable_rect(x, cutoff=(1.0, 0.5)) + self.assertAllClose(expected, result) + class KSpaceFilterTest(test_util.TestCase): """Test k-space filters.""" @@ -143,5 +173,6 @@ def test_filter_custom_fn(self): kspace, trajectory=traj, filter_fn=filter_fn) self.assertAllClose(expected, result) + if __name__ == '__main__': tf.test.main() diff --git a/tensorflow_mri/python/ops/traj_ops.py b/tensorflow_mri/python/ops/traj_ops.py index 59cd3ccc..bbe6843d 100755 --- a/tensorflow_mri/python/ops/traj_ops.py +++ b/tensorflow_mri/python/ops/traj_ops.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,11 +24,10 @@ import numpy as np import tensorflow as tf import tensorflow_nufft as tfft -from tensorflow_graphics.geometry.transformation import rotation_matrix_2d # pylint: disable=wrong-import-order -from tensorflow_graphics.geometry.transformation import rotation_matrix_3d # pylint: disable=wrong-import-order +from tensorflow_mri.python.geometry import rotation_2d +from tensorflow_mri.python.geometry import rotation_3d from tensorflow_mri.python.ops import array_ops -from tensorflow_mri.python.ops import geom_ops from tensorflow_mri.python.ops import signal_ops from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util @@ -67,8 +66,7 @@ def density_grid(shape, generate a boolean sampling mask. Args: - shape: A `tf.TensorShape` or a list of `ints`. The shape of the output - density grid. + shape: A 1D integer `tf.Tensor`. The shape of the output density grid. inner_density: A `float` between 0.0 and 1.0. The density of the inner region. outer_density: A `float` between 0.0 and 1.0. The density of the outer @@ -85,13 +83,17 @@ def density_grid(shape, A tensor containing the density grid. """ with tf.name_scope(name or 'density_grid'): - shape = tf.TensorShape(shape).as_list() + shape = tf.convert_to_tensor(shape, dtype=tf.int32) + inner_density = tf.convert_to_tensor(inner_density) + outer_density = tf.convert_to_tensor(outer_density) + inner_cutoff = tf.convert_to_tensor(inner_cutoff) + outer_cutoff = tf.convert_to_tensor(outer_cutoff) transition_type = check_util.validate_enum( transition_type, ['linear', 'quadratic', 'hann'], name='transition_type') - vecs = [tf.linspace(-1.0, 1.0 - 2.0 / n, n) for n in shape] - grid = array_ops.meshgrid(*vecs) + grid = frequency_grid( + shape, max_val=tf.constant(1.0, dtype=inner_density.dtype)) radius = tf.norm(grid, axis=-1) scaled_radius = (outer_cutoff - radius) / (outer_cutoff - inner_cutoff) @@ -109,6 +111,44 @@ def density_grid(shape, return density +@api_util.export("sampling.frequency_grid") +def frequency_grid(shape, max_val=1.0): + """Returns a frequency grid. + + Creates a grid of frequencies between `-max_val` and `max_val` of the + specified shape. For even shapes, the output grid is asymmetric + with the zero-frequency component at `n // 2 + 1`. + + Args: + shape: A 1D integer `tf.Tensor`. The shape of the output frequency grid. + max_val: A `tf.Tensor`. The maximum frequency. Must be of floating point + dtype. + + Returns: + A tensor of shape [*shape, tf.size(shape)] such that `tensor[..., i]` + contains the frequencies along axis `i`. Has the same dtype as `max_val`. + """ + shape = tf.convert_to_tensor(shape, dtype=tf.int32) + max_val = tf.convert_to_tensor(max_val) + dtype = max_val.dtype + + vecs = tf.TensorArray(dtype=dtype, + size=tf.size(shape), + infer_shape=False, + clear_after_read=False) + + def _cond(i, vecs): # pylint: disable=unused-argument + return tf.less(i, tf.size(shape)) + def _body(i, vecs): + step = (2.0 * max_val) / tf.cast(shape[i], dtype) + low = -max_val + high = tf.cond(shape[i] % 2 == 0, lambda: max_val - step, lambda: max_val) + return i + 1, vecs.write(i, tf.linspace(low, high, shape[i])) + _, vecs = tf.while_loop(_cond, _body, [0, vecs]) + + return array_ops.dynamic_meshgrid(vecs) + + @api_util.export("sampling.random_mask") def random_sampling_mask(shape, density=1.0, seed=None, rng=None, name=None): """Returns a random sampling mask with the given density. @@ -137,15 +177,208 @@ def random_sampling_mask(shape, density=1.0, seed=None, rng=None, name=None): with tf.name_scope(name or 'sampling_mask'): if seed is not None and rng is not None: raise ValueError("Cannot provide both `seed` and `rng`.") + density = tf.convert_to_tensor(density) counts = tf.ones(shape, dtype=density.dtype) if seed is not None: # Use stateless RNG. mask = tf.random.stateless_binomial(shape, seed, counts, density) else: # Use stateful RNG. - rng = rng or tf.random.get_global_generator() - mask = rng.binomial(shape, counts, density) + with tf.init_scope(): + rng = rng or tf.random.get_global_generator().split(1)[0] + # As of TF 2.9, `binomial` does not have a GPU implementation. + # mask = rng.binomial(shape, counts, density) + # Therefore, we use a uniform distribution instead. If the generated + # value is less than the density, the point is sampled. + mask = tf.math.less(rng.uniform(shape, dtype=density.dtype), density) return tf.cast(mask, tf.bool) +@api_util.export("sampling.center_mask") +def center_mask(shape, center_size, name=None): + """Returns a central sampling mask. + + This function returns a boolean tensor of zeros with a central region of ones. + + ```{tip} + Use this function to extract the calibration region from a Cartesian + *k*-space. + ``` + + ```{tip} + In MRI, one of the spatial frequency dimensions (readout dimension) is + typically fully sampled. In this case, you might want to create a mask that + has one less dimension than the corresponding *k*-space (e.g., 1D mask for + 2D images or 2D mask for 3D images). + ``` + + ```{note} + The central region is always evenly shaped for even mask dimensions and + oddly shaped for odd mask dimensions. This avoids phase artefacts when + using the resulting mask to sample the frequency domain. + ``` + + Example: + + >>> mask = tfmri.sampling.center_mask([8], [4]) + >>> mask.numpy() + array([False, False, True, True, True, True, False, False]) + + Args: + shape: A 1D integer `tf.Tensor`. The shape of the output mask. + center_size: A 1D `tf.Tensor` of integer or floating point dtype. The size + of the center region. If `center_size` has integer dtype, its i-th value + must be in the range `[0, shape[i]]` and will be interpreted as the number + of samples in the center region along axis `i`. If `center_size` has + floating point dtype, its i-th value must be in the range `[0, 1]` and + will be interpreted as the fraction of samples in the center region along + axis `i`. + name: A `str`. A name for this op. + + Returns: + A boolean `tf.Tensor` containing the sampling mask. + + Raises: + TypeError: If `center_size` is not of integer or floating point dtype. + """ + with tf.name_scope(name or 'center_mask'): + shape = tf.convert_to_tensor(shape, dtype=tf.int32) + center_size = tf.convert_to_tensor(center_size) + + if not center_size.dtype.is_integer and not center_size.dtype.is_floating: + raise TypeError( + "`center_size` must be of integer of floating point dtype.") + + if center_size.dtype.is_floating: + # Input is floating point, interpret as fraction and convert to integer. + center_size = center_size * tf.cast(shape, center_size.dtype) + center_size = tf.cast(center_size + 0.5, tf.int32) + + # Make sure that `center_size` is even for even shape and odd for odd shape. + center_size = (center_size // 2) * 2 + shape % 2 + # Make sure that `center_size` is not bigger than the shape. + center_size = tf.math.minimum(center_size, shape) + + # Create mask by first creating a central region of ones, and then padding + # with zeros to the specified shape. + mask = tf.ones(center_size, dtype=tf.bool) + paddings = tf.stack([(shape - center_size) // 2, + (shape - center_size) // 2], axis=-1) + mask = tf.pad(mask, paddings, constant_values=False) + return mask + + +@api_util.export("sampling.accel_mask") +def accel_mask(shape, + acceleration, + center_size=0, + mask_type='equispaced', + offset=0, + rng=None, + name=None): + """Returns a standard accelerated sampling mask. + + The returned sampling mask has two regions: a fully sampled central region + and a partially sampled peripheral region. The peripheral region may be + sampled uniformly or randomly. + + ```{tip} + This type of mask describes the most commonly used sampling patterns in + Cartesian MRI. + ``` + + ```{tip} + In MRI, one of the spatial frequency dimensions (readout dimension) is + typically fully sampled. In this case, you might want to create a mask that + has one less dimension than the corresponding *k*-space (e.g., 1D mask for + 2D images or 2D mask for 3D images). + ``` + + ```{note} + The central region is always evenly shaped for even mask dimensions and + oddly shaped for odd mask dimensions. This avoids phase artefacts when + using the resulting mask to sample the frequency domain. + ``` + + Example: + + >>> mask = tfmri.sampling.accel_mask([8], [2], [2]) + >>> mask.numpy() + array([ True, False, True, True, True, False, True, False]) + + Args: + shape: A 1D integer `tf.Tensor`. The shape of the output mask. + acceleration: A 1D integer `tf.Tensor`. The acceleration factor on the + peripheral region along each axis. + center_size: A 1D integer `tf.Tensor`. The size of the central region + along each axis. Defaults to 0. + mask_type: A `str`. The type of sampling to use on the peripheral region. + Must be one of `'equispaced'` or `'random'`. If `'equispaced'`, the + peripheral region is sampled uniformly. If `'random'`, the peripheral + region is sampled randomly with the expected acceleration value. Defaults + to `'equispaced'`. + offset: A 1D integer `tf.Tensor`. The offset of the first sample along + each axis. Only relevant when `mask_type` is `'equispaced'`. Can also + have the value `'random'`, in which case the offset is selected randomly. + Defaults to 0. + rng: A `tf.random.Generator`. The random number generator to use. If not + provided, the global random number generator will be used. + name: A `str`. A name for this op. + + Returns: + A boolean `tf.Tensor` containing the sampling mask. + + Raises: + ValueError: If `mask_type` is not one of `'equispaced'` or `'random'`. + """ + with tf.name_scope(name or 'accel_mask'): + shape = tf.convert_to_tensor(shape, dtype=tf.int32) + acceleration = tf.convert_to_tensor(acceleration) + rank = tf.size(shape) + + # If no RNG was passed, use the global RNG. + with tf.init_scope(): + rng = rng or tf.random.get_global_generator().split(1)[0] + + # Process `offset`. + if offset == 'random': + offset = tf.map_fn(lambda maxval: rng.uniform( + [], minval=0, maxval=maxval, dtype=tf.int32), + acceleration, dtype=tf.int32) + else: + offset = tf.convert_to_tensor(offset, dtype=tf.int32) + if offset.shape.rank == 0: + offset = tf.ones([rank], dtype=tf.int32) * offset + + # Initialize mask. + mask = tf.ones(shape, dtype=tf.bool) + static_shape = mask.shape + + def fn(accum, elems): + axis, mask = accum + size, accel, off = elems + + if mask_type == 'equispaced': + mask_1d = tf.tile(tf.scatter_nd([[off]], [True], [accel]), + multiples=[(size + accel - 1) // accel])[:size] + + elif mask_type == 'random': + density = 1.0 / tf.cast(accel, tf.float32) + mask_1d = rng.uniform(shape=[size], dtype=tf.float32) < density + + else: + raise ValueError(f"Unknown mask type: {mask_type}") + + bcast_shape = tf.tensor_scatter_nd_update( + tf.ones([rank], dtype=tf.int32), [[axis]], [size]) + mask_1d = tf.reshape(mask_1d, bcast_shape) + mask &= mask_1d + return axis + 1, tf.ensure_shape(mask, static_shape) + + _, mask = tf.foldl(fn, (shape, acceleration, offset), + initializer=(0, mask)) + + return tf.math.logical_or(mask, center_mask(shape, center_size)) + + @api_util.export("sampling.radial_trajectory") def radial_trajectory(base_resolution, views=1, @@ -212,17 +445,17 @@ def radial_trajectory(base_resolution, radians/voxel, ie, values are in the range `[-pi, pi]`. References: - .. [1] Winkelmann, S., Schaeffter, T., Koehler, T., Eggers, H. and - Doessel, O. (2007), An optimal radial profile order based on the golden - ratio for time-resolved MRI. IEEE Transactions on Medical Imaging, - 26(1): 68-76, https://doi.org/10.1109/TMI.2006.885337 - .. [2] Wundrak, S., Paul, J., Ulrici, J., Hell, E., Geibel, M.-A., - Bernhardt, P., Rottbauer, W. and Rasche, V. (2016), Golden ratio sparse - MRI using tiny golden angles. Magn. Reson. Med., 75: 2372-2378. - https://doi.org/10.1002/mrm.25831 - .. [3] Wong, S.T.S. and Roos, M.S. (1994), A strategy for sampling on a - sphere applied to 3D selective RF pulse design. Magn. Reson. Med., - 32: 778-784. https://doi.org/10.1002/mrm.1910320614 + 1. Winkelmann, S., Schaeffter, T., Koehler, T., Eggers, H. and + Doessel, O. (2007), An optimal radial profile order based on the golden + ratio for time-resolved MRI. IEEE Transactions on Medical Imaging, + 26(1): 68-76, https://doi.org/10.1109/TMI.2006.885337 + 2. Wundrak, S., Paul, J., Ulrici, J., Hell, E., Geibel, M.-A., + Bernhardt, P., Rottbauer, W. and Rasche, V. (2016), Golden ratio sparse + MRI using tiny golden angles. Magn. Reson. Med., 75: 2372-2378. + https://doi.org/10.1002/mrm.25831 + 3. Wong, S.T.S. and Roos, M.S. (1994), A strategy for sampling on a + sphere applied to 3D selective RF pulse design. Magn. Reson. Med., + 32: 778-784. https://doi.org/10.1002/mrm.1910320614 """ return _kspace_trajectory('radial', {'base_resolution': base_resolution, @@ -310,7 +543,7 @@ def spiral_trajectory(base_resolution, radians/voxel, ie, values are in the range `[-pi, pi]`. References: - .. [1] Pipe, J.G. and Zwart, N.R. (2014), Spiral trajectory design: A + 1. Pipe, J.G. and Zwart, N.R. (2014), Spiral trajectory design: A flexible numerical algorithm and base analytical equations. Magn. Reson. Med, 71: 278-285. https://doi.org/10.1002/mrm.24675 """ @@ -466,8 +699,10 @@ def radial_density(base_resolution, if ordering not in orderings_2d: raise ValueError(f"Ordering `{ordering}` is not implemented.") + phases_ = phases if phases is not None else 1 + # Get angles. - angles = _trajectory_angles(views, phases or 1, ordering=ordering, + angles = _trajectory_angles(views, phases_, ordering=ordering, angle_range=angle_range, tiny_number=tiny_number) # Compute weights. @@ -579,10 +814,11 @@ def estimate_radial_density(points, readout_os=2.0): This function supports 2D and 3D ("koosh-ball") radial trajectories. - .. warning:: + ```{warning} This function assumes that `points` represents a radial trajectory, but - cannot verify that. If used with trajectories other than radial, it will + will not verify that. If used with trajectories other than radial, it will not fail but the result will be invalid. + ``` Args: points: A `Tensor`. Must be one of the following types: `float32`, @@ -638,11 +874,12 @@ def radial_waveform(base_resolution, readout_os=2.0, rank=2): # pylint: disable=unexpected-keyword-arg,no-value-for-parameter # Number of samples with oversampling. - samples = int(base_resolution * readout_os + 0.5) + samples = tf.cast(tf.cast(base_resolution, tf.float32) * + tf.cast(readout_os, tf.float32) + 0.5, dtype=tf.int32) # Compute 1D spoke. waveform = tf.range(-samples // 2, samples // 2, dtype=tf.float32) - waveform /= samples + waveform /= tf.cast(samples, waveform.dtype) # Add y/z dimensions. waveform = tf.expand_dims(waveform, axis=1) @@ -660,7 +897,13 @@ def radial_waveform(base_resolution, readout_os=2.0, rank=2): if sys_util.is_op_library_enabled(): - spiral_waveform = _mri_ops.spiral_waveform + spiral_waveform = api_util.export("sampling.spiral_waveform")( + _mri_ops.spiral_waveform) + # Set the object's module to current module for correct API import. + spiral_waveform.__module__ = __name__ +else: + # Stub to prevent import errors when the op is not available. + spiral_waveform = None def _trajectory_angles(views, @@ -683,6 +926,8 @@ def _trajectory_angles(views, raise ValueError( f"`tiny_number` must be an integer >= 2. Received: {tiny_number}") + phases_ = phases if phases is not None else 1 + # Constants. pi = math.pi pi2 = math.pi * 2.0 @@ -698,19 +943,19 @@ def _trajectory_angles(views, def _angles_2d(angle_delta, angle_max, interleave=False): # Compute azimuthal angles [0, 2 * pi] (full) or [0, pi] (half). - angles = tf.range(views * (phases or 1), dtype=tf.float32) + angles = tf.range(views * phases_, dtype=tf.float32) angles *= angle_delta angles %= angle_max if interleave: - angles = tf.transpose(tf.reshape(angles, (views, phases or 1))) + angles = tf.transpose(tf.reshape(angles, (views, phases_))) else: - angles = tf.reshape(angles, (phases or 1, views)) + angles = tf.reshape(angles, (phases_, views)) angles = tf.expand_dims(angles, -1) return angles # Get ordering. if ordering == 'linear': - angles = _angles_2d(default_max / (views * (phases or 1)), default_max, + angles = _angles_2d(default_max / (views * phases_), default_max, interleave=True) elif ordering == 'golden': angles = _angles_2d(phi * default_max, default_max) @@ -747,7 +992,7 @@ def _scan_fn(prev, curr): elif ordering == 'tiny_half': angles = _angles_2d(phi_n * pi, default_max) elif ordering == 'sphere_archimedean': - projections = views * (phases or 1) + projections = views * phases_ full_projections = 2 * projections if angle_range == 'half' else projections # Computation is sensitive to floating-point errors, so we use float64 to # ensure sufficient accuracy. @@ -759,7 +1004,7 @@ def _scan_fn(prev, curr): az = tf.math.floormod(tf.math.cumsum(az), 2.0 * math.pi) # pylint: disable=no-value-for-parameter # Interleave the readouts. def _interleave(arg): - return tf.transpose(tf.reshape(arg, (views, phases or 1))) + return tf.transpose(tf.reshape(arg, (views, phases_))) pol = _interleave(pol) az = _interleave(az) angles = tf.stack([pol, az], axis=-1) @@ -798,9 +1043,6 @@ def _rotate_waveform_2d(waveform, angles): # Prepare for broadcasting. angles = tf.expand_dims(angles, -2) - # Compute rotation matrix. - rot_matrix = rotation_matrix_2d.from_euler(angles) - # Add leading singleton dimensions to `waveform` to match the batch shape of # `angles`. This prevents a broadcasting error later. waveform = tf.reshape(waveform, @@ -808,7 +1050,7 @@ def _rotate_waveform_2d(waveform, angles): tf.shape(waveform)], 0)) # Apply rotation. - return rotation_matrix_2d.rotate(waveform, rot_matrix) + return rotation_2d.Rotation2D.from_euler(angles).rotate(waveform) def _rotate_waveform_3d(waveform, angles): @@ -829,10 +1071,10 @@ def _rotate_waveform_3d(waveform, angles): angles = tf.expand_dims(angles, -2) # Compute rotation matrix. - rot_matrix = geom_ops.euler_to_rotation_matrix_3d(angles, order='ZYX') + rot_matrix = _rotation_matrix_3d_from_euler(angles, order='ZYX') # Apply rotation to trajectory. - waveform = rotation_matrix_3d.rotate(waveform, rot_matrix) + waveform = rotation_3d.rotate(waveform, rot_matrix) return waveform @@ -886,13 +1128,13 @@ def estimate_density(points, grid_shape, method='jackson', max_iter=50): A `Tensor` of shape `[..., M]` containing the density of `points`. References: - .. [1] Jackson, J.I., Meyer, C.H., Nishimura, D.G. and Macovski, A. (1991), - Selection of a convolution function for Fourier inversion using gridding - (computerised tomography application). IEEE Transactions on Medical - Imaging, 10(3): 473-478. https://doi.org/10.1109/42.97598 - .. [2] Pipe, J.G. and Menon, P. (1999), Sampling density compensation in - MRI: Rationale and an iterative numerical solution. Magn. Reson. Med., - 41: 179-186. https://doi.org/10.1002/(SICI)1522-2594(199901)41:1<179::AID-MRM25>3.0.CO;2-V + 1. Jackson, J.I., Meyer, C.H., Nishimura, D.G. and Macovski, A. (1991), + Selection of a convolution function for Fourier inversion using gridding + (computerised tomography application). IEEE Transactions on Medical + Imaging, 10(3): 473-478. https://doi.org/10.1109/42.97598 + 2. Pipe, J.G. and Menon, P. (1999), Sampling density compensation in + MRI: Rationale and an iterative numerical solution. Magn. Reson. Med., + 41: 179-186. https://doi.org/10.1002/(SICI)1522-2594(199901)41:1<179::AID-MRM25>3.0.CO;2-V """ method = check_util.validate_enum( method, {'jackson', 'pipe'}, name='method') @@ -978,10 +1220,22 @@ def flatten_trajectory(trajectory): Returns: A reshaped `Tensor` with shape `[..., views * samples, ndim]`. """ + # Compute static output shape. batch_shape = trajectory.shape[:-3] views, samples, rank = trajectory.shape[-3:] - new_shape = batch_shape + [views*samples, rank] - return tf.reshape(trajectory, new_shape) + if views is None or samples is None: + views_times_samples = None + else: + views_times_samples = views * samples + static_flat_shape = batch_shape + [views_times_samples, rank] + + # Compute dynamic output shape. + shape = tf.shape(trajectory) + batch_shape = shape[:-3] + views, samples, rank = shape[-3], shape[-2], shape[-1] + flat_shape = tf.concat([batch_shape, [views * samples, rank]], 0) + + return tf.ensure_shape(tf.reshape(trajectory, flat_shape), static_flat_shape) @api_util.export("sampling.flatten_density") @@ -994,10 +1248,22 @@ def flatten_density(density): Returns: A reshaped `Tensor` with shape `[..., views * samples]`. """ + # Compute static output shape. batch_shape = density.shape[:-2] views, samples = density.shape[-2:] - new_shape = batch_shape + [views*samples] - return tf.reshape(density, new_shape) + if views is None or samples is None: + views_times_samples = None + else: + views_times_samples = views * samples + static_flat_shape = batch_shape + [views_times_samples] + + # Compute dynamic output shape. + shape = tf.shape(density) + batch_shape = shape[:-2] + views, samples = shape[-2], shape[-1] + flat_shape = tf.concat([batch_shape, [views * samples]], 0) + + return tf.ensure_shape(tf.reshape(density, flat_shape), static_flat_shape) @api_util.export("sampling.expand_trajectory") @@ -1038,3 +1304,132 @@ def _find_first_greater_than(x, y): x = x - y x = tf.where(x < 0, np.inf, x) return tf.math.argmin(x) + + +def _rotation_matrix_3d_from_euler(angles, order='XYZ', name='rotation_3d'): + r"""Convert an Euler angle representation to a rotation matrix. + + The resulting matrix is $$\mathbf{R} = \mathbf{R}_z\mathbf{R}_y\mathbf{R}_x$$. + + ```{note} + In the following, A1 to An are optional batch dimensions. + ``` + + Args: + angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension + represents the three Euler angles. `[A1, ..., An, 0]` is the angle about + `x` in radians `[A1, ..., An, 1]` is the angle about `y` in radians and + `[A1, ..., An, 2]` is the angle about `z` in radians. + order: A `str`. The order in which the rotations are applied. Defaults to + `"XYZ"`. + name: A name for this op that defaults to "rotation_matrix_3d_from_euler". + + Returns: + A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions + represent a 3d rotation matrix. + + Raises: + ValueError: If the shape of `angles` is not supported. + """ + with tf.name_scope(name): + angles = tf.convert_to_tensor(value=angles) + + if angles.shape[-1] != 3: + raise ValueError(f"The last dimension of `angles` must have size 3, " + f"but got shape: {angles.shape}") + + sin_angles = tf.math.sin(angles) + cos_angles = tf.math.cos(angles) + return _build_matrix_from_sines_and_cosines( + sin_angles, cos_angles, order=order) + + +def _build_matrix_from_sines_and_cosines(sin_angles, cos_angles, order='XYZ'): + """Builds a rotation matrix from sines and cosines of Euler angles. + + ```{note} + In the following, A1 to An are optional batch dimensions. + ``` + + Args: + sin_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension + represents the sine of the Euler angles. + cos_angles: A tensor of shape `[A1, ..., An, 3]`, where the last dimension + represents the cosine of the Euler angles. + order: A `str`. The order in which the rotations are applied. Defaults to + `"XYZ"`. + + Returns: + A tensor of shape `[A1, ..., An, 3, 3]`, where the last two dimensions + represent a 3d rotation matrix. + + Raises: + ValueError: If any of the input arguments has an invalid value. + """ + sin_angles.shape.assert_is_compatible_with(cos_angles.shape) + output_shape = tf.concat((tf.shape(sin_angles)[:-1], (3, 3)), -1) + + sx, sy, sz = tf.unstack(sin_angles, axis=-1) + cx, cy, cz = tf.unstack(cos_angles, axis=-1) + ones = tf.ones_like(sx) + zeros = tf.zeros_like(sx) + # rx + m00 = ones + m01 = zeros + m02 = zeros + m10 = zeros + m11 = cx + m12 = -sx + m20 = zeros + m21 = sx + m22 = cx + rx = tf.stack((m00, m01, m02, + m10, m11, m12, + m20, m21, m22), + axis=-1) + rx = tf.reshape(rx, output_shape) + # ry + m00 = cy + m01 = zeros + m02 = sy + m10 = zeros + m11 = ones + m12 = zeros + m20 = -sy + m21 = zeros + m22 = cy + ry = tf.stack((m00, m01, m02, + m10, m11, m12, + m20, m21, m22), + axis=-1) + ry = tf.reshape(ry, output_shape) + # rz + m00 = cz + m01 = -sz + m02 = zeros + m10 = sz + m11 = cz + m12 = zeros + m20 = zeros + m21 = zeros + m22 = ones + rz = tf.stack((m00, m01, m02, + m10, m11, m12, + m20, m21, m22), + axis=-1) + rz = tf.reshape(rz, output_shape) + + matrix = tf.eye(output_shape[-2], output_shape[-1], + batch_shape=output_shape[:-2]) + + for r in order.upper(): + if r == 'X': + matrix = rx @ matrix + elif r == 'Y': + matrix = ry @ matrix + elif r == 'Z': + matrix = rz @ matrix + else: + raise ValueError(f"Invalid value for `order`: {order}") + + return matrix diff --git a/tensorflow_mri/python/ops/traj_ops_test.py b/tensorflow_mri/python/ops/traj_ops_test.py index 7dbab0e9..64efc8bf 100755 --- a/tensorflow_mri/python/ops/traj_ops_test.py +++ b/tensorflow_mri/python/ops/traj_ops_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ from tensorflow_mri.python.util import test_util -class DensityGridTest(): +class DensityGridTest(test_util.TestCase): """Tests for `density_grid`.""" @parameterized.product(transition_type=['linear', 'quadratic', 'hann']) def test_density(self, transition_type): # pylint: disable=missing-function-docstring @@ -48,6 +48,165 @@ def test_density(self, transition_type): # pylint: disable=missing-function-doc self.assertAllClose(expected[transition_type], density) +class FrequencyGridTest(test_util.TestCase): + """Tests for `frequency_grid`.""" + def test_frequency_grid_even(self): + """Tests `frequency_grid` with even number of points.""" + result = traj_ops.frequency_grid([4]) + expected = [[-1.0], [-0.5], [0], [0.5]] + self.assertDTypeEqual(result, np.float32) + self.assertAllClose(expected, result) + + def test_frequency_grid_odd(self): + """Tests `frequency_grid` with odd number of points.""" + result = traj_ops.frequency_grid([5]) + expected = [[-1.0], [-0.5], [0], [0.5], [1.0]] + self.assertAllClose(expected, result) + + def test_frequency_grid_max_val(self): + """Tests `frequency_grid` with a different max value.""" + result = traj_ops.frequency_grid([4], max_val=2.0) + expected = [[-2.0], [-1.0], [0], [1.0]] + self.assertAllClose(expected, result) + + def test_frequency_grid_2d(self): + """Tests 2-dimensional `frequency_grid`.""" + result = traj_ops.frequency_grid([4, 8]) + expected = [[[-1. , -1. ], + [-1. , -0.75], + [-1. , -0.5 ], + [-1. , -0.25], + [-1. , 0. ], + [-1. , 0.25], + [-1. , 0.5 ], + [-1. , 0.75]], + [[-0.5 , -1. ], + [-0.5 , -0.75], + [-0.5 , -0.5 ], + [-0.5 , -0.25], + [-0.5 , 0. ], + [-0.5 , 0.25], + [-0.5 , 0.5 ], + [-0.5 , 0.75]], + [[ 0. , -1. ], + [ 0. , -0.75], + [ 0. , -0.5 ], + [ 0. , -0.25], + [ 0. , 0. ], + [ 0. , 0.25], + [ 0. , 0.5 ], + [ 0. , 0.75]], + [[ 0.5 , -1. ], + [ 0.5 , -0.75], + [ 0.5 , -0.5 ], + [ 0.5 , -0.25], + [ 0.5 , 0. ], + [ 0.5 , 0.25], + [ 0.5 , 0.5 ], + [ 0.5 , 0.75]]] + self.assertAllClose(expected, result) + + +class CenterMaskTest(test_util.TestCase): + """Tests for `center_mask`.""" + def test_center_mask(self): + """Tests `center_mask`.""" + result = traj_ops.center_mask([8], [4]) + expected = [0, 0, 1, 1, 1, 1, 0, 0] + self.assertAllClose(expected, result) + + result = traj_ops.center_mask([9], [5]) + expected = [0, 0, 1, 1, 1, 1, 1, 0, 0] + self.assertAllClose(expected, result) + + result = traj_ops.center_mask([8], [0.5]) + expected = [0, 0, 1, 1, 1, 1, 0, 0] + self.assertAllClose(expected, result) + + result = traj_ops.center_mask([9], [0.5]) + expected = [0, 0, 1, 1, 1, 1, 1, 0, 0] + self.assertAllClose(expected, result) + + result = traj_ops.center_mask([8], [5]) + expected = [0, 0, 1, 1, 1, 1, 0, 0] + self.assertAllClose(expected, result) + + result = traj_ops.center_mask([4, 8], [2, 4]) + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + self.assertAllClose(expected, result) + + result = traj_ops.center_mask([4, 8], [1.0, 0.5]) + expected = [[0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 0, 0]] + self.assertAllClose(expected, result) + + +class AccelMaskTest(test_util.TestCase): + """Tests for `accel_mask`.""" + def test_accel_mask(self): + """Tests `accel_mask`.""" + result = traj_ops.accel_mask([16], [4], [0]) + expected = [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([16], [4], [4]) + expected = [1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([16], [2], [6]) + expected = [1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([16], [2], [6]) + expected = [1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([16], [4], [0], offset=1) + expected = [0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([4, 8], [2, 2], [0, 0]) + expected = [[1, 0, 1, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([4, 8], [2, 2], [0, 0], offset=[1, 0]) + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 1, 0, 1, 0]] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([4, 8], [2, 3], [0, 0], offset=[1, 0]) + expected = [[0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 1, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 1, 0, 0, 1, 0]] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([4, 8], [2, 2], [2, 2]) + expected = [[1, 0, 1, 0, 1, 0, 1, 0], + [0, 0, 0, 1, 1, 0, 0, 0], + [1, 0, 1, 1, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0, 0, 0]] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([16], [4], 0) + expected = [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0] + self.assertAllClose(expected, result) + + result = traj_ops.accel_mask([16], [4]) + expected = [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0] + self.assertAllClose(expected, result) + + class RadialTrajectoryTest(test_util.TestCase): """Radial trajectory tests.""" @classmethod diff --git a/tensorflow_mri/python/ops/wavelet_ops.py b/tensorflow_mri/python/ops/wavelet_ops.py index 157da17b..dd41d318 100644 --- a/tensorflow_mri/python/ops/wavelet_ops.py +++ b/tensorflow_mri/python/ops/wavelet_ops.py @@ -1,5 +1,5 @@ # ============================================================================== -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -734,7 +734,7 @@ def dwt_max_level(shape, wavelet_or_length, axes=None): The level returned is the minimum along all axes. Examples: - >>> import tensorflow_mri as tfmri + >>> tfmri.signal.max_wavelet_level((64, 32), 'db2') 3 @@ -837,10 +837,12 @@ def coeffs_to_tensor(coeffs, padding=0, axes=None): into a single, contiguous array. Examples: + >>> import tensorflow_mri as tfmri >>> image = tfmri.image.phantom() >>> coeffs = tfmri.signal.wavedec(image, wavelet='db2', level=3) >>> tensor, slices = tfmri.signal.wavelet_coeffs_to_tensor(coeffs) + """ coeffs, axes, ndim, ndim_transform = _prepare_coeffs_axes(coeffs, axes) @@ -945,6 +947,7 @@ def tensor_to_coeffs(coeff_tensor, coeff_slices): >>> coeffs_from_arr = tfmri.signal.tensor_to_wavelet_coeffs(tensor, slices) >>> image_recon = tfmri.signal.waverec(coeffs_from_arr, wavelet='db2') >>> # image and image_recon are equal + """ coeff_tensor = tf.convert_to_tensor(coeff_tensor) coeffs = [] diff --git a/tensorflow_mri/python/ops/wavelet_ops_test.py b/tensorflow_mri/python/ops/wavelet_ops_test.py index 08d5eaf1..f222afd8 100644 --- a/tensorflow_mri/python/ops/wavelet_ops_test.py +++ b/tensorflow_mri/python/ops/wavelet_ops_test.py @@ -1,5 +1,5 @@ # ============================================================================== -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/recon/__init__.py b/tensorflow_mri/python/recon/__init__.py new file mode 100644 index 00000000..e26ed684 --- /dev/null +++ b/tensorflow_mri/python/recon/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Image reconstruction.""" + +from tensorflow_mri.python.recon import recon_adjoint +from tensorflow_mri.python.recon import recon_least_squares diff --git a/tensorflow_mri/python/recon/recon_adjoint.py b/tensorflow_mri/python/recon/recon_adjoint.py new file mode 100644 index 00000000..a4e69626 --- /dev/null +++ b/tensorflow_mri/python/recon/recon_adjoint.py @@ -0,0 +1,152 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Signal reconstruction (adjoint).""" + +import tensorflow as tf + +from tensorflow_mri.python.linalg import linear_operator_mri +from tensorflow_mri.python.util import api_util + + +@api_util.export("recon.adjoint_universal") +def recon_adjoint(data, operator): + r"""Reconstructs a signal using the adjoint of the system operator. + + Given measurement data $b$ generated by a linear system $A$ such that + $Ax = b$, this function estimates the corresponding signal $x$ as + $x = A^H b$, where $A$ is the specified linear operator. + + ```{note} + This function is part of the family of + [universal operators](https://mrphys.github.io/tensorflow-mri/guide/universal/), + a set of functions and classes designed to work flexibly with any linear + system. + ``` + + ```{seealso} + `tfmri.recon.adjoint` is an MRI-specific version of this function and may be + used to perform zero-filled reconstructions. + ``` + + Args: + data: A `tf.Tensor` of real or complex dtype. The measurement data $b$. + Its shape must be compatible with `operator.range_shape`. + operator: A `tfmri.linalg.LinearOperator` representing the system operator + $A$. Its range shape must be compatible with `data.shape`. + ```{tip} + You can use any of the operators in `tfmri.linalg`, a composition of + multiple operators, or a subclassed operator. + ``` + + Returns: + A `tf.Tensor` containing the reconstructed signal. Has the same dtype as + `data` and shape `batch_shape + operator.domain_shape`. `batch_shape` is + the result of broadcasting the batch shapes of `data` and `operator`. + """ + data = tf.convert_to_tensor(data) + data = operator.preprocess(data, adjoint=True) + signal = operator.transform(data, adjoint=True) + signal = operator.postprocess(signal, adjoint=True) + return signal + + +@api_util.export("recon.adjoint", "recon.adj") +def recon_adjoint_mri(kspace, + image_shape, + mask=None, + trajectory=None, + density=None, + sensitivities=None, + phase=None, + sens_norm=True): + r"""Reconstructs an MR image using the adjoint MRI operator. + + Given *k*-space data $b$, this function estimates the corresponding + image as $x = A^H b$, where $A$ is the MRI linear operator. + + This operator supports Cartesian and non-Cartesian *k*-space data. + + Additional density compensation and intensity correction steps are applied + depending on the input arguments. + + This operator supports batched inputs. All batch shapes should be + broadcastable with each other. + + This operator supports multicoil imaging. Coil combination is triggered + when `sensitivities` is not `None`. If you have multiple coils but wish to + reconstruct each coil separately, simply set `sensitivities` to `None`. The + coil dimension will then be treated as a standard batch dimension (i.e., it + becomes part of `...`). + + Args: + kspace: A `tf.Tensor`. The *k*-space samples. Must have type `complex64` or + `complex128`. `kspace` can be either Cartesian or non-Cartesian. A + Cartesian `kspace` must have shape + `[..., num_coils, *image_shape]`, where `...` are batch dimensions. A + non-Cartesian `kspace` must have shape `[..., num_coils, num_samples]`. + If not multicoil (`sensitivities` is `None`), then the `num_coils` axis + must be omitted. + image_shape: A 1D integer `tf.Tensor`. Must have length 2 or 3. + The shape of the reconstructed image[s]. + mask: An optional `tf.Tensor` of type `bool`. The sampling mask. Must have + shape `[..., *image_shape]`. `mask` should be passed for reconstruction + from undersampled Cartesian *k*-space. For each point, `mask` should be + `True` if the corresponding *k*-space sample was measured and `False` + otherwise. + trajectory: An optional `tf.Tensor` of type `float32` or `float64`. Must + have shape `[..., num_samples, rank]`. `trajectory` should be passed for + reconstruction from non-Cartesian *k*-space. + density: An optional `tf.Tensor` of type `float32` or `float64`. The + sampling densities. Must have shape `[..., num_samples]`. This input is + only relevant for non-Cartesian MRI reconstruction. If passed, the MRI + linear operator will include sampling density compensation. If `None`, + the MRI operator will not perform sampling density compensation. + sensitivities: An optional `tf.Tensor` of type `complex64` or `complex128`. + The coil sensitivity maps. Must have shape + `[..., num_coils, *image_shape]`. If provided, a multi-coil parallel + imaging reconstruction will be performed. + phase: An optional `tf.Tensor` of type `float32` or `float64`. Must have + shape `[..., *image_shape]`. A phase estimate for the reconstructed image. + If provided, a phase-constrained reconstruction will be performed. This + improves the conditioning of the reconstruction problem in applications + where there is no interest in the phase data. However, artefacts may + appear if an inaccurate phase estimate is passed. + sens_norm: A `boolean`. Whether to normalize coil sensitivities. + Defaults to `True`. + + Returns: + A `tf.Tensor`. The reconstructed image. Has the same type as `kspace` and + shape `[..., *image_shape]`, where `...` is the broadcasted batch shape of + all inputs. + + Notes: + Reconstructs an image by applying the adjoint MRI operator to the *k*-space + data. This typically involves an inverse FFT or a (density-compensated) + NUFFT, and coil combination for multicoil inputs. This type of + reconstruction is often called zero-filled reconstruction, because missing + *k*-space samples are assumed to be zero. Therefore, the resulting image is + likely to display aliasing artefacts if *k*-space is not sufficiently + sampled according to the Nyquist criterion. + """ + # Create the linear operator. + operator = linear_operator_mri.LinearOperatorMRI(image_shape, + mask=mask, + trajectory=trajectory, + density=density, + sensitivities=sensitivities, + phase=phase, + fft_norm='ortho', + sens_norm=sens_norm) + return recon_adjoint(kspace, operator) diff --git a/tensorflow_mri/python/recon/recon_adjoint_test.py b/tensorflow_mri/python/recon/recon_adjoint_test.py new file mode 100644 index 00000000..0bd8e1d1 --- /dev/null +++ b/tensorflow_mri/python/recon/recon_adjoint_test.py @@ -0,0 +1,94 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Signal reconstruction (adjoint).""" + +import tensorflow as tf +import tensorflow_nufft as tfft + +from tensorflow_mri.python.ops import fft_ops +from tensorflow_mri.python.recon import recon_adjoint +from tensorflow_mri.python.util import io_util +from tensorflow_mri.python.util import test_util + + +class ReconAdjointTest(test_util.TestCase): + """Tests for reconstruction functions.""" + @classmethod + def setUpClass(cls): + """Prepare tests.""" + super().setUpClass() + cls.data = io_util.read_hdf5('tests/data/recon_ops_data.h5') + cls.data.update(io_util.read_hdf5('tests/data/recon_ops_data_2.h5')) + cls.data.update(io_util.read_hdf5('tests/data/recon_ops_data_3.h5')) + + def test_adj_fft(self): + """Test simple FFT recon.""" + kspace = self.data['fft/kspace'] + sens = self.data['fft/sens'] + image_shape = kspace.shape[-2:] + + # Test single-coil. + image = recon_adjoint.recon_adjoint_mri(kspace[0, ...], image_shape) + expected = fft_ops.ifftn(kspace[0, ...], norm='ortho', shift=True) + + self.assertAllClose(expected, image) + + # Test multi-coil. + image = recon_adjoint.recon_adjoint_mri( + kspace, image_shape, sensitivities=sens) + expected = fft_ops.ifftn(kspace, axes=[-2, -1], norm='ortho', shift=True) + scale = tf.math.reduce_sum(sens * tf.math.conj(sens), axis=0) + expected = tf.math.divide_no_nan( + tf.math.reduce_sum(expected * tf.math.conj(sens), axis=0), scale) + + self.assertAllClose(expected, image) + + def test_adj_nufft(self): + """Test simple NUFFT recon.""" + kspace = self.data['nufft/kspace'] + sens = self.data['nufft/sens'] + traj = self.data['nufft/traj'] + dens = self.data['nufft/dens'] + image_shape = [144, 144] + fft_norm_factor = tf.cast(tf.math.sqrt(144. * 144.), tf.complex64) + + # Save us some typing. + inufft = lambda src, pts: tfft.nufft(src, pts, + grid_shape=[144, 144], + transform_type='type_1', + fft_direction='backward') + + # Test single-coil. + image = recon_adjoint.recon_adjoint_mri(kspace[0, ...], image_shape, + trajectory=traj, + density=dens) + + expected = inufft(kspace[0, ...] / tf.cast(dens, tf.complex64), traj) + expected /= fft_norm_factor + + self.assertAllClose(expected, image) + + # Test multi-coil. + image = recon_adjoint.recon_adjoint_mri(kspace, image_shape, + trajectory=traj, + density=dens, + sensitivities=sens) + expected = inufft(kspace / dens, traj) + expected /= fft_norm_factor + scale = tf.math.reduce_sum(sens * tf.math.conj(sens), axis=0) + expected = tf.math.divide_no_nan( + tf.math.reduce_sum(expected * tf.math.conj(sens), axis=0), scale) + + self.assertAllClose(expected, image) diff --git a/tensorflow_mri/python/recon/recon_least_squares.py b/tensorflow_mri/python/recon/recon_least_squares.py new file mode 100644 index 00000000..c031d795 --- /dev/null +++ b/tensorflow_mri/python/recon/recon_least_squares.py @@ -0,0 +1,15 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Signal reconstruction (least squares).""" diff --git a/tensorflow_mri/python/summary/__init__.py b/tensorflow_mri/python/summary/__init__.py index d7030a38..5066ae9f 100644 --- a/tensorflow_mri/python/summary/__init__.py +++ b/tensorflow_mri/python/summary/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/summary/image_summary.py b/tensorflow_mri/python/summary/image_summary.py index faad713a..3f391209 100644 --- a/tensorflow_mri/python/summary/image_summary.py +++ b/tensorflow_mri/python/summary/image_summary.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/__init__.py b/tensorflow_mri/python/util/__init__.py index 94afc4c7..4cd8d11b 100644 --- a/tensorflow_mri/python/util/__init__.py +++ b/tensorflow_mri/python/util/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,12 +17,12 @@ from tensorflow_mri.python.util import api_util from tensorflow_mri.python.util import check_util from tensorflow_mri.python.util import deprecation +from tensorflow_mri.python.util import doc_util from tensorflow_mri.python.util import import_util from tensorflow_mri.python.util import io_util from tensorflow_mri.python.util import keras_util from tensorflow_mri.python.util import layer_util from tensorflow_mri.python.util import linalg_ext -from tensorflow_mri.python.util import linalg_imaging from tensorflow_mri.python.util import math_util from tensorflow_mri.python.util import model_util from tensorflow_mri.python.util import nest_util @@ -31,3 +31,4 @@ from tensorflow_mri.python.util import sys_util from tensorflow_mri.python.util import tensor_util from tensorflow_mri.python.util import test_util +from tensorflow_mri.python.util import types_util diff --git a/tensorflow_mri/python/util/api_util.py b/tensorflow_mri/python/util/api_util.py index 3a34af1c..f382feb3 100644 --- a/tensorflow_mri/python/util/api_util.py +++ b/tensorflow_mri/python/util/api_util.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,10 +23,12 @@ _API_ATTR = '_api_names' _SUBMODULE_NAMES = [ + 'activations', 'array', 'callbacks', 'coils', 'convex', + 'geometry', 'image', 'initializers', 'io', @@ -45,10 +47,12 @@ ] _SUBMODULE_DOCSTRINGS = { + 'activations': "Activation functions.", 'array': "Array processing operations.", 'callbacks': "Keras callbacks.", 'coils': "Parallel imaging operations.", 'convex': "Convex optimization operations.", + 'geometry': "Geometric operations.", 'image': "Image processing operations.", 'initializers': "Keras initializers.", 'io': "Input/output operations.", @@ -60,7 +64,7 @@ 'models': "Keras models.", 'optimize': "Optimization operations.", 'plot': "Plotting utilities.", - 'recon': "Image reconstruction.", + 'recon': "Signal reconstruction.", 'sampling': "k-space sampling operations.", 'signal': "Signal processing operations.", 'summary': "Tensorboard summaries." diff --git a/tensorflow_mri/python/util/check_util.py b/tensorflow_mri/python/util/check_util.py index 0885f3db..3c861dd7 100755 --- a/tensorflow_mri/python/util/check_util.py +++ b/tensorflow_mri/python/util/check_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/check_util_test.py b/tensorflow_mri/python/util/check_util_test.py index 6b410005..3feda02d 100644 --- a/tensorflow_mri/python/util/check_util_test.py +++ b/tensorflow_mri/python/util/check_util_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/data_util.py b/tensorflow_mri/python/util/data_util.py index b639a372..d3ececeb 100644 --- a/tensorflow_mri/python/util/data_util.py +++ b/tensorflow_mri/python/util/data_util.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/deprecation.py b/tensorflow_mri/python/util/deprecation.py index b8ba3101..2adb0573 100755 --- a/tensorflow_mri/python/util/deprecation.py +++ b/tensorflow_mri/python/util/deprecation.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ # The following dictionary contains the removal date for deprecations # at a given release. REMOVAL_DATE = { - '0.19.0': '2022-09-01', '0.20.0': '2022-10-01' } diff --git a/tensorflow_mri/python/util/doc_util.py b/tensorflow_mri/python/util/doc_util.py new file mode 100644 index 00000000..9b5879ba --- /dev/null +++ b/tensorflow_mri/python/util/doc_util.py @@ -0,0 +1,25 @@ +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utilities for documentation.""" + +import inspect + + +def get_nd_layer_signature(base): + signature = inspect.signature(base.__init__) + parameters = signature.parameters + parameters = [v for k, v in parameters.items() if k not in ('self', 'rank')] + signature = signature.replace(parameters=parameters) + return signature diff --git a/tensorflow_mri/python/util/import_util.py b/tensorflow_mri/python/util/import_util.py index 16b2d2a1..ef0fd82d 100644 --- a/tensorflow_mri/python/util/import_util.py +++ b/tensorflow_mri/python/util/import_util.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/import_util_test.py b/tensorflow_mri/python/util/import_util_test.py index 53e5419a..30e9d3b0 100644 --- a/tensorflow_mri/python/util/import_util_test.py +++ b/tensorflow_mri/python/util/import_util_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/io_util.py b/tensorflow_mri/python/util/io_util.py index 953f4365..4391014d 100755 --- a/tensorflow_mri/python/util/io_util.py +++ b/tensorflow_mri/python/util/io_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/keras_util.py b/tensorflow_mri/python/util/keras_util.py index 5bce9c47..59a4f4ef 100644 --- a/tensorflow_mri/python/util/keras_util.py +++ b/tensorflow_mri/python/util/keras_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -68,3 +68,22 @@ def get_config(self): def is_tensor_or_variable(x): return tf.is_tensor(x) or isinstance(x, tf.Variable) + + +def complexx(): + """Returns the default complex dtype, as a string. + + The default complex dtype is the complex equivalent of the default + float type, which can be obtained as `tf.keras.backend.floatx()`. + + To change the default complex dtype, change the default float type via + `tf.keras.backend.set_floatx()`. + + Returns: + The current default complex dtype, as a string. + """ + complex_dtypes = { + 'float32': 'complex64', + 'float64': 'complex128' + } + return tf.dtypes.as_dtype(complex_dtypes[tf.keras.backend.floatx()]).name diff --git a/tensorflow_mri/python/util/keras_util_test.py b/tensorflow_mri/python/util/keras_util_test.py index 5d0da724..209adb8c 100644 --- a/tensorflow_mri/python/util/keras_util_test.py +++ b/tensorflow_mri/python/util/keras_util_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/layer_util.py b/tensorflow_mri/python/util/layer_util.py index 880f7a40..cb323d81 100644 --- a/tensorflow_mri/python/util/layer_util.py +++ b/tensorflow_mri/python/util/layer_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,7 +16,13 @@ import tensorflow as tf +from tensorflow_mri.python.layers import coil_sensitivities from tensorflow_mri.python.layers import convolutional +from tensorflow_mri.python.layers import data_consistency +from tensorflow_mri.python.layers import padding +from tensorflow_mri.python.layers import pooling +from tensorflow_mri.python.layers import reshaping +from tensorflow_mri.python.layers import recon_adjoint from tensorflow_mri.python.layers import signal_layers @@ -41,9 +47,13 @@ def get_nd_layer(name, rank): _ND_LAYERS = { - ('AveragePooling', 1): tf.keras.layers.AveragePooling1D, - ('AveragePooling', 2): tf.keras.layers.AveragePooling2D, - ('AveragePooling', 3): tf.keras.layers.AveragePooling3D, + ('AveragePooling', 1): pooling.AveragePooling1D, + ('AveragePooling', 2): pooling.AveragePooling2D, + ('AveragePooling', 3): pooling.AveragePooling3D, + ('CoilSensitivityEstimation', 2): + coil_sensitivities.CoilSensitivityEstimation2D, + ('CoilSensitivityEstimation', 3): + coil_sensitivities.CoilSensitivityEstimation3D, ('Conv', 1): convolutional.Conv1D, ('Conv', 2): convolutional.Conv2D, ('Conv', 3): convolutional.Conv3D, @@ -58,6 +68,9 @@ def get_nd_layer(name, rank): ('Cropping', 3): tf.keras.layers.Cropping3D, ('DepthwiseConv', 1): tf.keras.layers.DepthwiseConv1D, ('DepthwiseConv', 2): tf.keras.layers.DepthwiseConv2D, + ('DivisorPadding', 1): padding.DivisorPadding1D, + ('DivisorPadding', 2): padding.DivisorPadding2D, + ('DivisorPadding', 3): padding.DivisorPadding3D, ('DWT', 1): signal_layers.DWT1D, ('DWT', 2): signal_layers.DWT2D, ('DWT', 3): signal_layers.DWT3D, @@ -70,19 +83,25 @@ def get_nd_layer(name, rank): ('IDWT', 1): signal_layers.IDWT1D, ('IDWT', 2): signal_layers.IDWT2D, ('IDWT', 3): signal_layers.IDWT3D, + ('LeastSquaresGradientDescent', 2): + data_consistency.LeastSquaresGradientDescent2D, + ('LeastSquaresGradientDescent', 3): + data_consistency.LeastSquaresGradientDescent3D, ('LocallyConnected', 1): tf.keras.layers.LocallyConnected1D, ('LocallyConnected', 2): tf.keras.layers.LocallyConnected2D, - ('MaxPool', 1): tf.keras.layers.MaxPool1D, - ('MaxPool', 2): tf.keras.layers.MaxPool2D, - ('MaxPool', 3): tf.keras.layers.MaxPool3D, + ('MaxPool', 1): pooling.MaxPooling1D, + ('MaxPool', 2): pooling.MaxPooling2D, + ('MaxPool', 3): pooling.MaxPooling3D, + ('ReconAdjoint', 2): recon_adjoint.ReconAdjoint2D, + ('ReconAdjoint', 3): recon_adjoint.ReconAdjoint3D, ('SeparableConv', 1): tf.keras.layers.SeparableConv1D, ('SeparableConv', 2): tf.keras.layers.SeparableConv2D, ('SpatialDropout', 1): tf.keras.layers.SpatialDropout1D, ('SpatialDropout', 2): tf.keras.layers.SpatialDropout2D, ('SpatialDropout', 3): tf.keras.layers.SpatialDropout3D, - ('UpSampling', 1): tf.keras.layers.UpSampling1D, - ('UpSampling', 2): tf.keras.layers.UpSampling2D, - ('UpSampling', 3): tf.keras.layers.UpSampling3D, + ('UpSampling', 1): reshaping.UpSampling1D, + ('UpSampling', 2): reshaping.UpSampling2D, + ('UpSampling', 3): reshaping.UpSampling3D, ('ZeroPadding', 1): tf.keras.layers.ZeroPadding1D, ('ZeroPadding', 2): tf.keras.layers.ZeroPadding2D, ('ZeroPadding', 3): tf.keras.layers.ZeroPadding3D diff --git a/tensorflow_mri/python/util/linalg_ext.py b/tensorflow_mri/python/util/linalg_ext.py index a5aca2ad..9798c4bc 100644 --- a/tensorflow_mri/python/util/linalg_ext.py +++ b/tensorflow_mri/python/util/linalg_ext.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/linalg_ext_test.py b/tensorflow_mri/python/util/linalg_ext_test.py index f8135f63..0732e5c9 100644 --- a/tensorflow_mri/python/util/linalg_ext_test.py +++ b/tensorflow_mri/python/util/linalg_ext_test.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/linalg_imaging.py b/tensorflow_mri/python/util/linalg_imaging.py deleted file mode 100644 index 1bd7bd9e..00000000 --- a/tensorflow_mri/python/util/linalg_imaging.py +++ /dev/null @@ -1,815 +0,0 @@ -# Copyright 2021 University College London. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Linear algebra for images. - -Contains the imaging mixin and imaging extensions of basic linear operators. -""" - -import abc - -import tensorflow as tf - -from tensorflow_mri.python.ops import array_ops -from tensorflow_mri.python.util import api_util -from tensorflow_mri.python.util import check_util -from tensorflow_mri.python.util import linalg_ext -from tensorflow_mri.python.util import tensor_util - - -class LinalgImagingMixin(tf.linalg.LinearOperator): - """Mixin for linear operators meant to operate on images.""" - def transform(self, x, adjoint=False, name="transform"): - """Transform a batch of images. - - Applies this operator to a batch of non-vectorized images `x`. - - Args: - x: A `Tensor` with compatible shape and same dtype as `self`. - adjoint: A `boolean`. If `True`, transforms the input using the adjoint - of the operator, instead of the operator itself. - name: A name for this operation. - - Returns: - The transformed `Tensor` with the same `dtype` as `self`. - """ - with self._name_scope(name): # pylint: disable=not-callable - x = tf.convert_to_tensor(x, name="x") - self._check_input_dtype(x) - input_shape = self.range_shape if adjoint else self.domain_shape - input_shape.assert_is_compatible_with(x.shape[-input_shape.rank:]) # pylint: disable=invalid-unary-operand-type - return self._transform(x, adjoint=adjoint) - - @property - def domain_shape(self): - """Domain shape of this linear operator.""" - return self._domain_shape() - - @property - def range_shape(self): - """Range shape of this linear operator.""" - return self._range_shape() - - def domain_shape_tensor(self, name="domain_shape_tensor"): - """Domain shape of this linear operator, determined at runtime.""" - with self._name_scope(name): # pylint: disable=not-callable - # Prefer to use statically defined shape if available. - if self.domain_shape.is_fully_defined(): - return tensor_util.convert_shape_to_tensor(self.domain_shape.as_list()) - return self._domain_shape_tensor() - - def range_shape_tensor(self, name="range_shape_tensor"): - """Range shape of this linear operator, determined at runtime.""" - with self._name_scope(name): # pylint: disable=not-callable - # Prefer to use statically defined shape if available. - if self.range_shape.is_fully_defined(): - return tensor_util.convert_shape_to_tensor(self.range_shape.as_list()) - return self._range_shape_tensor() - - def batch_shape_tensor(self, name="batch_shape_tensor"): - """Batch shape of this linear operator, determined at runtime.""" - with self._name_scope(name): # pylint: disable=not-callable - if self.batch_shape.is_fully_defined(): - return tensor_util.convert_shape_to_tensor(self.batch_shape.as_list()) - return self._batch_shape_tensor() - - def adjoint(self, name="adjoint"): - """Returns the adjoint of this linear operator. - - The returned operator is a valid `LinalgImagingMixin` instance. - - Calling `self.adjoint()` and `self.H` are equivalent. - - Args: - name: A name for this operation. - - Returns: - A `LinearOperator` derived from `LinalgImagingMixin`, which - represents the adjoint of this linear operator. - """ - if self.is_self_adjoint: - return self - with self._name_scope(name): # pylint: disable=not-callable - return LinearOperatorAdjoint(self) - - H = property(adjoint, None) - - @abc.abstractmethod - def _transform(self, x, adjoint=False): - # Subclasses must override this method. - raise NotImplementedError("Method `_transform` is not implemented.") - - def _matvec(self, x, adjoint=False): - # Default implementation of `_matvec` for imaging operator. The vectorized - # input `x` is first expanded to the its full shape, then transformed, then - # vectorized again. Typically subclasses should not need to override this - # method. - x = self.expand_range_dimension(x) if adjoint else \ - self.expand_domain_dimension(x) - x = self._transform(x, adjoint=adjoint) - x = self.flatten_domain_shape(x) if adjoint else \ - self.flatten_range_shape(x) - return x - - def _matmul(self, x, adjoint=False, adjoint_arg=False): - # Default implementation of `matmul` for imaging operator. If outer - # dimension of argument is 1, call `matvec`. Otherwise raise an error. - # Typically subclasses should not need to override this method. - arg_outer_dim = -2 if adjoint_arg else -1 - - if x.shape[arg_outer_dim] != 1: - raise ValueError( - f"`{self.__class__.__name__}` does not support matrix multiplication.") - - x = tf.squeeze(x, axis=arg_outer_dim) - x = self.matvec(x, adjoint=adjoint) - x = tf.expand_dims(x, axis=arg_outer_dim) - return x - - @abc.abstractmethod - def _domain_shape(self): - # Users must override this method. - return tf.TensorShape(None) - - @abc.abstractmethod - def _range_shape(self): - # Users must override this method. - return tf.TensorShape(None) - - def _batch_shape(self): - # Users should override this method if this operator has a batch shape. - return tf.TensorShape([]) - - def _domain_shape_tensor(self): - # Users should override this method if they need to provide a dynamic domain - # shape. - raise NotImplementedError("_domain_shape_tensor is not implemented.") - - def _range_shape_tensor(self): - # Users should override this method if they need to provide a dynamic range - # shape. - raise NotImplementedError("_range_shape_tensor is not implemented.") - - def _batch_shape_tensor(self): # pylint: disable=arguments-differ - # Users should override this method if they need to provide a dynamic batch - # shape. - return tf.constant([], dtype=tf.dtypes.int32) - - def _shape(self): - # Default implementation of `_shape` for imaging operators. Typically - # subclasses should not need to override this method. - return self._batch_shape() + tf.TensorShape( - [self.range_shape.num_elements(), - self.domain_shape.num_elements()]) - - def _shape_tensor(self): - # Default implementation of `_shape_tensor` for imaging operators. Typically - # subclasses should not need to override this method. - return tf.concat([self.batch_shape_tensor(), - [tf.size(self.range_shape_tensor()), - tf.size(self.domain_shape_tensor())]], 0) - - def flatten_domain_shape(self, x): - """Flattens `x` to match the domain dimension of this operator. - - Args: - x: A `Tensor`. Must have shape `[...] + self.domain_shape`. - - Returns: - The flattened `Tensor`. Has shape `[..., self.domain_dimension]`. - """ - # pylint: disable=invalid-unary-operand-type - self.domain_shape.assert_is_compatible_with( - x.shape[-self.domain_shape.rank:]) - - batch_shape = x.shape[:-self.domain_shape.rank] - batch_shape_tensor = tf.shape(x)[:-self.domain_shape.rank] - - output_shape = batch_shape + self.domain_dimension - output_shape_tensor = tf.concat( - [batch_shape_tensor, [self.domain_dimension_tensor()]], 0) - - x = tf.reshape(x, output_shape_tensor) - return tf.ensure_shape(x, output_shape) - - def flatten_range_shape(self, x): - """Flattens `x` to match the range dimension of this operator. - - Args: - x: A `Tensor`. Must have shape `[...] + self.range_shape`. - - Returns: - The flattened `Tensor`. Has shape `[..., self.range_dimension]`. - """ - # pylint: disable=invalid-unary-operand-type - self.range_shape.assert_is_compatible_with( - x.shape[-self.range_shape.rank:]) - - batch_shape = x.shape[:-self.range_shape.rank] - batch_shape_tensor = tf.shape(x)[:-self.range_shape.rank] - - output_shape = batch_shape + self.range_dimension - output_shape_tensor = tf.concat( - [batch_shape_tensor, [self.range_dimension_tensor()]], 0) - - x = tf.reshape(x, output_shape_tensor) - return tf.ensure_shape(x, output_shape) - - def expand_domain_dimension(self, x): - """Expands `x` to match the domain shape of this operator. - - Args: - x: A `Tensor`. Must have shape `[..., self.domain_dimension]`. - - Returns: - The expanded `Tensor`. Has shape `[...] + self.domain_shape`. - """ - self.domain_dimension.assert_is_compatible_with(x.shape[-1]) - - batch_shape = x.shape[:-1] - batch_shape_tensor = tf.shape(x)[:-1] - - output_shape = batch_shape + self.domain_shape - output_shape_tensor = tf.concat([ - batch_shape_tensor, self.domain_shape_tensor()], 0) - - x = tf.reshape(x, output_shape_tensor) - return tf.ensure_shape(x, output_shape) - - def expand_range_dimension(self, x): - """Expands `x` to match the range shape of this operator. - - Args: - x: A `Tensor`. Must have shape `[..., self.range_dimension]`. - - Returns: - The expanded `Tensor`. Has shape `[...] + self.range_shape`. - """ - self.range_dimension.assert_is_compatible_with(x.shape[-1]) - - batch_shape = x.shape[:-1] - batch_shape_tensor = tf.shape(x)[:-1] - - output_shape = batch_shape + self.range_shape - output_shape_tensor = tf.concat([ - batch_shape_tensor, self.range_shape_tensor()], 0) - - x = tf.reshape(x, output_shape_tensor) - return tf.ensure_shape(x, output_shape) - - -@api_util.export("linalg.LinearOperator") -class LinearOperator(LinalgImagingMixin, tf.linalg.LinearOperator): # pylint: disable=abstract-method - r"""Base class defining a [batch of] linear operator[s]. - - Provides access to common matrix operations without the need to materialize - the matrix. - - This operator is similar to `tf.linalg.LinearOperator`_, but has additional - methods to simplify operations on images, while maintaining compatibility - with the TensorFlow linear algebra framework. - - Inputs and outputs to this linear operator or its subclasses may have - meaningful non-vectorized N-D shapes. Thus this class defines the additional - properties `domain_shape` and `range_shape` and the methods - `domain_shape_tensor` and `range_shape_tensor`. These enrich the information - provided by the built-in properties `shape`, `domain_dimension`, - `range_dimension` and methods `domain_dimension_tensor` and - `range_dimension_tensor`, which only have information about the vectorized 1D - shapes. - - Subclasses of this operator must define the methods `_domain_shape` and - `_range_shape`, which return the static domain and range shapes of the - operator. Optionally, subclasses may also define the methods - `_domain_shape_tensor` and `_range_shape_tensor`, which return the dynamic - domain and range shapes of the operator. These two methods will only be called - if `_domain_shape` and `_range_shape` do not return fully defined static - shapes. - - Subclasses must define the abstract method `_transform`, which - applies the operator (or its adjoint) to a [batch of] images. This internal - method is called by `transform`. In general, subclasses of this operator - should not define the methods `_matvec` or `_matmul`. These have default - implementations which call `_transform`. - - Operators derived from this class may be used in any of the following ways: - - 1. Using method `transform`, which expects a full-shaped input and returns - a full-shaped output, i.e. a tensor with shape `[...] + shape`, where - `shape` is either the `domain_shape` or the `range_shape`. This method is - unique to operators derived from this class. - 2. Using method `matvec`, which expects a vectorized input and returns a - vectorized output, i.e. a tensor with shape `[..., n]` where `n` is - either the `domain_dimension` or the `range_dimension`. This method is - part of the TensorFlow linear algebra framework. - 3. Using method `matmul`, which expects matrix inputs and returns matrix - outputs. Note that a matrix is just a column vector in this context, i.e. - a tensor with shape `[..., n, 1]`, where `n` is either the - `domain_dimension` or the `range_dimension`. Matrices which are not column - vectors (i.e. whose last dimension is not 1) are not supported. This - method is part of the TensorFlow linear algebra framework. - - Operators derived from this class may also be used with the functions - `tf.linalg.matvec`_ and `tf.linalg.matmul`_, which will call the - corresponding methods. - - This class also provides the convenience functions `flatten_domain_shape` and - `flatten_range_shape` to flatten full-shaped inputs/outputs to their - vectorized form. Conversely, `expand_domain_dimension` and - `expand_range_dimension` may be used to expand vectorized inputs/outputs to - their full-shaped form. - - **Subclassing** - - Subclasses must always define `_transform`, which implements this operator's - functionality (and its adjoint). In general, subclasses should not define the - methods `_matvec` or `_matmul`. These have default implementations which call - `_transform`. - - Subclasses must always define `_domain_shape` - and `_range_shape`, which return the static domain/range shapes of the - operator. If the subclassed operator needs to provide dynamic domain/range - shapes and the static shapes are not always fully-defined, it must also define - `_domain_shape_tensor` and `_range_shape_tensor`, which return the dynamic - domain/range shapes of the operator. In general, subclasses should not define - the methods `_shape` or `_shape_tensor`. These have default implementations. - - If the subclassed operator has a non-scalar batch shape, it must also define - `_batch_shape` which returns the static batch shape. If the static batch shape - is not always fully-defined, the subclass must also define - `_batch_shape_tensor`, which returns the dynamic batch shape. - - Args: - dtype: The `tf.dtypes.DType` of the matrix that this operator represents. - is_non_singular: Expect that this operator is non-singular. - is_self_adjoint: Expect that this operator is equal to its Hermitian - transpose. If `dtype` is real, this is equivalent to being symmetric. - is_positive_definite: Expect that this operator is positive definite, - meaning the quadratic form :math:`x^H A x` has positive real part for all - nonzero :math:`x`. Note that we do not require the operator to be - self-adjoint to be positive-definite. - is_square: Expect that this operator acts like square [batch] matrices. - name: A name for this `LinearOperator`. - - .. _tf.linalg.LinearOperator: https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperator - .. _tf.linalg.matvec: https://www.tensorflow.org/api_docs/python/tf/linalg/matvec - .. _tf.linalg.matmul: https://www.tensorflow.org/api_docs/python/tf/linalg/matmul - """ - - -@api_util.export("linalg.LinearOperatorAdjoint") -class LinearOperatorAdjoint(LinalgImagingMixin, # pylint: disable=abstract-method - tf.linalg.LinearOperatorAdjoint): - """Linear operator representing the adjoint of another operator. - - `LinearOperatorAdjoint` is initialized with an operator :math:`A` and - represents its adjoint :math:`A^H`. - - .. note: - Similar to `tf.linalg.LinearOperatorAdjoint`_, but with imaging extensions. - - Args: - operator: A `LinearOperator`. - is_non_singular: Expect that this operator is non-singular. - is_self_adjoint: Expect that this operator is equal to its Hermitian - transpose. - is_positive_definite: Expect that this operator is positive definite, - meaning the quadratic form :math:`x^H A x` has positive real part for all - nonzero :math:`x`. Note that we do not require the operator to be - self-adjoint to be positive-definite. - is_square: Expect that this operator acts like square [batch] matrices. - name: A name for this `LinearOperator`. Default is `operator.name + - "_adjoint"`. - - .. _tf.linalg.LinearOperatorAdjoint: https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperatorAdjoint - """ - def _transform(self, x, adjoint=False): - # pylint: disable=protected-access - return self.operator._transform(x, adjoint=(not adjoint)) - - def _domain_shape(self): - return self.operator.range_shape - - def _range_shape(self): - return self.operator.domain_shape - - def _batch_shape(self): - return self.operator.batch_shape - - def _domain_shape_tensor(self): - return self.operator.range_shape_tensor() - - def _range_shape_tensor(self): - return self.operator.domain_shape_tensor() - - def _batch_shape_tensor(self): - return self.operator.batch_shape_tensor() - - -@api_util.export("linalg.LinearOperatorComposition") -class LinearOperatorComposition(LinalgImagingMixin, # pylint: disable=abstract-method - tf.linalg.LinearOperatorComposition): - """Composes one or more linear operators. - - `LinearOperatorComposition` is initialized with a list of operators - :math:`A_1, A_2, ..., A_J` and represents their composition - :math:`A_1 A_2 ... A_J`. - - .. note: - Similar to `tf.linalg.LinearOperatorComposition`_, but with imaging - extensions. - - Args: - operators: A `list` of `LinearOperator` objects, each with the same `dtype` - and composable shape. - is_non_singular: Expect that this operator is non-singular. - is_self_adjoint: Expect that this operator is equal to its Hermitian - transpose. - is_positive_definite: Expect that this operator is positive definite, - meaning the quadratic form :math:`x^H A x` has positive real part for all - nonzero :math:`x`. Note that we do not require the operator to be - self-adjoint to be positive-definite. - is_square: Expect that this operator acts like square [batch] matrices. - name: A name for this `LinearOperator`. Default is the individual - operators names joined with `_o_`. - - .. _tf.linalg.LinearOperatorComposition: https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperatorComposition - """ - def _transform(self, x, adjoint=False): - # pylint: disable=protected-access - if adjoint: - transform_order_list = self.operators - else: - transform_order_list = list(reversed(self.operators)) - - result = transform_order_list[0]._transform(x, adjoint=adjoint) - for operator in transform_order_list[1:]: - result = operator._transform(result, adjoint=adjoint) - return result - - def _domain_shape(self): - return self.operators[-1].domain_shape - - def _range_shape(self): - return self.operators[0].range_shape - - def _batch_shape(self): - return array_ops.broadcast_static_shapes( - *[operator.batch_shape for operator in self.operators]) - - def _domain_shape_tensor(self): - return self.operators[-1].domain_shape_tensor() - - def _range_shape_tensor(self): - return self.operators[0].range_shape_tensor() - - def _batch_shape_tensor(self): - return array_ops.broadcast_dynamic_shapes( - *[operator.batch_shape_tensor() for operator in self.operators]) - - -@api_util.export("linalg.LinearOperatorAddition") -class LinearOperatorAddition(LinalgImagingMixin, # pylint: disable=abstract-method - linalg_ext.LinearOperatorAddition): - """Adds one or more linear operators. - - `LinearOperatorAddition` is initialized with a list of operators - :math:`A_1, A_2, ..., A_J` and represents their addition - :math:`A_1 + A_2 + ... + A_J`. - - Args: - operators: A `list` of `LinearOperator` objects, each with the same `dtype` - and shape. - is_non_singular: Expect that this operator is non-singular. - is_self_adjoint: Expect that this operator is equal to its Hermitian - transpose. - is_positive_definite: Expect that this operator is positive definite, - meaning the quadratic form :math:`x^H A x` has positive real part for all - nonzero :math:`x`. Note that we do not require the operator to be - self-adjoint to be positive-definite. - is_square: Expect that this operator acts like square [batch] matrices. - name: A name for this `LinearOperator`. Default is the individual - operators names joined with `_p_`. - """ - def _transform(self, x, adjoint=False): - # pylint: disable=protected-access - result = self.operators[0]._transform(x, adjoint=adjoint) - for operator in self.operators[1:]: - result += operator._transform(x, adjoint=adjoint) - return result - - def _domain_shape(self): - return self.operators[0].domain_shape - - def _range_shape(self): - return self.operators[0].range_shape - - def _batch_shape(self): - return array_ops.broadcast_static_shapes( - *[operator.batch_shape for operator in self.operators]) - - def _domain_shape_tensor(self): - return self.operators[0].domain_shape_tensor() - - def _range_shape_tensor(self): - return self.operators[0].range_shape_tensor() - - def _batch_shape_tensor(self): - return array_ops.broadcast_dynamic_shapes( - *[operator.batch_shape_tensor() for operator in self.operators]) - - -@api_util.export("linalg.LinearOperatorScaledIdentity") -class LinearOperatorScaledIdentity(LinalgImagingMixin, # pylint: disable=abstract-method - tf.linalg.LinearOperatorScaledIdentity): - """Linear operator representing a scaled identity matrix. - - .. note: - Similar to `tf.linalg.LinearOperatorScaledIdentity`_, but with imaging - extensions. - - Args: - shape: Non-negative integer `Tensor`. The shape of the operator. - multiplier: A `Tensor` of shape `[B1, ..., Bb]`, or `[]` (a scalar). - is_non_singular: Expect that this operator is non-singular. - is_self_adjoint: Expect that this operator is equal to its hermitian - transpose. - is_positive_definite: Expect that this operator is positive definite, - meaning the quadratic form `x^H A x` has positive real part for all - nonzero `x`. Note that we do not require the operator to be - self-adjoint to be positive-definite. See: - https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices - is_square: Expect that this operator acts like square [batch] matrices. - assert_proper_shapes: Python `bool`. If `False`, only perform static - checks that initialization and method arguments have proper shape. - If `True`, and static checks are inconclusive, add asserts to the graph. - name: A name for this `LinearOperator`. - - .. _tf.linalg.LinearOperatorScaledIdentity: https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperatorScaledIdentity - """ - def __init__(self, - shape, - multiplier, - is_non_singular=None, - is_self_adjoint=None, - is_positive_definite=None, - is_square=True, - assert_proper_shapes=False, - name="LinearOperatorScaledIdentity"): - - self._domain_shape_tensor_value = tensor_util.convert_shape_to_tensor( - shape, name="shape") - self._domain_shape_value = tf.TensorShape(tf.get_static_value( - self._domain_shape_tensor_value)) - - super().__init__( - num_rows=tf.math.reduce_prod(shape), - multiplier=multiplier, - is_non_singular=is_non_singular, - is_self_adjoint=is_self_adjoint, - is_positive_definite=is_positive_definite, - is_square=is_square, - assert_proper_shapes=assert_proper_shapes, - name=name) - - def _transform(self, x, adjoint=False): - domain_rank = tf.size(self.domain_shape_tensor()) - multiplier_shape = tf.concat([ - tf.shape(self.multiplier), - tf.ones((domain_rank,), dtype=tf.int32)], 0) - multiplier_matrix = tf.reshape(self.multiplier, multiplier_shape) - if adjoint: - multiplier_matrix = tf.math.conj(multiplier_matrix) - return x * multiplier_matrix - - def _domain_shape(self): - return self._domain_shape_value - - def _range_shape(self): - return self._domain_shape_value - - def _batch_shape(self): - return self.multiplier.shape - - def _domain_shape_tensor(self): - return self._domain_shape_tensor_value - - def _range_shape_tensor(self): - return self._domain_shape_tensor_value - - def _batch_shape_tensor(self): - return tf.shape(self.multiplier) - - -@api_util.export("linalg.LinearOperatorDiag") -class LinearOperatorDiag(LinalgImagingMixin, tf.linalg.LinearOperatorDiag): # pylint: disable=abstract-method - """Linear operator representing a square diagonal matrix. - - This operator acts like a [batch] diagonal matrix `A` with shape - `[B1, ..., Bb, N, N]` for some `b >= 0`. The first `b` indices index a - batch member. For every batch index `(i1, ..., ib)`, `A[i1, ..., ib, : :]` is - an `N x N` matrix. This matrix `A` is not materialized, but for - purposes of broadcasting this shape will be relevant. - - .. note: - Similar to `tf.linalg.LinearOperatorDiag`_, but with imaging extensions. - - Args: - diag: A `tf.Tensor` of shape `[B1, ..., Bb, *S]`. - rank: An `int`. The rank of `S`. Must be <= `diag.shape.rank`. - is_non_singular: Expect that this operator is non-singular. - is_self_adjoint: Expect that this operator is equal to its Hermitian - transpose. If `diag` is real, this is auto-set to `True`. - is_positive_definite: Expect that this operator is positive definite, - meaning the quadratic form :math:`x^H A x` has positive real part for all - nonzero :math:`x`. Note that we do not require the operator to be - self-adjoint to be positive-definite. - is_square: Expect that this operator acts like square [batch] matrices. - name: A name for this `LinearOperator`. - - .. _tf.linalg.LinearOperatorDiag: https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperatorDiag - """ - # pylint: disable=invalid-unary-operand-type - def __init__(self, - diag, - rank, - is_non_singular=None, - is_self_adjoint=None, - is_positive_definite=None, - is_square=True, - name='LinearOperatorDiag'): - # pylint: disable=invalid-unary-operand-type - diag = tf.convert_to_tensor(diag, name='diag') - self._rank = check_util.validate_rank(rank, name='rank', accept_none=False) - if self._rank > diag.shape.rank: - raise ValueError( - f"Argument `rank` must be <= `diag.shape.rank`, but got: {rank}") - - self._shape_tensor_value = tf.shape(diag) - self._shape_value = diag.shape - batch_shape = self._shape_tensor_value[:-self._rank] - - super().__init__( - diag=tf.reshape(diag, tf.concat([batch_shape, [-1]], 0)), - is_non_singular=is_non_singular, - is_self_adjoint=is_self_adjoint, - is_positive_definite=is_positive_definite, - is_square=is_square, - name=name) - - def _transform(self, x, adjoint=False): - diag = tf.math.conj(self.diag) if adjoint else self.diag - return tf.reshape(diag, self.domain_shape_tensor()) * x - - def _domain_shape(self): - return self._shape_value[-self._rank:] - - def _range_shape(self): - return self._shape_value[-self._rank:] - - def _batch_shape(self): - return self._shape_value[:-self._rank] - - def _domain_shape_tensor(self): - return self._shape_tensor_value[-self._rank:] - - def _range_shape_tensor(self): - return self._shape_tensor_value[-self._rank:] - - def _batch_shape_tensor(self): - return self._shape_tensor_value[:-self._rank] - - -@api_util.export("linalg.LinearOperatorGramMatrix") -class LinearOperatorGramMatrix(LinearOperator): # pylint: disable=abstract-method - r"""Linear operator representing the Gram matrix of an operator. - - If :math:`A` is a `LinearOperator`, this operator is equivalent to - :math:`A^H A`. - - The Gram matrix of :math:`A` appears in the normal equation - :math:`A^H A x = A^H b` associated with the least squares problem - :math:`{\mathop{\mathrm{argmin}}_x} {\left \| Ax-b \right \|_2^2}`. - - This operator is self-adjoint and positive definite. Therefore, linear systems - defined by this linear operator can be solved using the conjugate gradient - method. - - This operator supports the optional addition of a regularization parameter - :math:`\lambda` and a transform matrix :math:`T`. If these are provided, - this operator becomes :math:`A^H A + \lambda T^H T`. This appears - in the regularized normal equation - :math:`\left ( A^H A + \lambda T^H T \right ) x = A^H b + \lambda T^H T x_0`, - associated with the regularized least squares problem - :math:`{\mathop{\mathrm{argmin}}_x} {\left \| Ax-b \right \|_2^2 + \lambda \left \| T(x-x_0) \right \|_2^2}`. - - Args: - operator: A `tfmri.linalg.LinearOperator`. The operator :math:`A` whose Gram - matrix is represented by this linear operator. - reg_parameter: A `Tensor` of shape `[B1, ..., Bb]` and real dtype. - The regularization parameter :math:`\lambda`. Defaults to 0. - reg_operator: A `tfmri.linalg.LinearOperator`. The regularization transform - :math:`T`. Defaults to the identity. - gram_operator: A `tfmri.linalg.LinearOperator`. The Gram matrix - :math:`A^H A`. This may be optionally provided to use a specialized - Gram matrix implementation. Defaults to `None`. - is_non_singular: Expect that this operator is non-singular. - is_self_adjoint: Expect that this operator is equal to its Hermitian - transpose. - is_positive_definite: Expect that this operator is positive definite, - meaning the quadratic form :math:`x^H A x` has positive real part for all - nonzero :math:`x`. Note that we do not require the operator to be - self-adjoint to be positive-definite. - is_square: Expect that this operator acts like square [batch] matrices. - name: A name for this `LinearOperator`. - """ - def __init__(self, - operator, - reg_parameter=None, - reg_operator=None, - gram_operator=None, - is_non_singular=None, - is_self_adjoint=True, - is_positive_definite=True, - is_square=True, - name=None): - parameters = dict( - operator=operator, - reg_parameter=reg_parameter, - reg_operator=reg_operator, - is_non_singular=is_non_singular, - is_self_adjoint=is_self_adjoint, - is_positive_definite=is_positive_definite, - is_square=is_square, - name=name) - self._operator = operator - self._reg_parameter = reg_parameter - self._reg_operator = reg_operator - self._gram_operator = gram_operator - if gram_operator is not None: - self._composed = gram_operator - else: - self._composed = LinearOperatorComposition( - operators=[self._operator.H, self._operator]) - - if not is_self_adjoint: - raise ValueError("A Gram matrix is always self-adjoint.") - if not is_positive_definite: - raise ValueError("A Gram matrix is always positive-definite.") - if not is_square: - raise ValueError("A Gram matrix is always square.") - - if self._reg_parameter is not None: - reg_operator_gm = LinearOperatorScaledIdentity( - shape=self._operator.domain_shape, - multiplier=tf.cast(self._reg_parameter, self._operator.dtype)) - if self._reg_operator is not None: - reg_operator_gm = LinearOperatorComposition( - operators=[reg_operator_gm, - self._reg_operator.H, - self._reg_operator]) - self._composed = LinearOperatorAddition( - operators=[self._composed, reg_operator_gm]) - - super().__init__(operator.dtype, - is_non_singular=is_non_singular, - is_self_adjoint=is_self_adjoint, - is_positive_definite=is_positive_definite, - is_square=is_square, - parameters=parameters) - - def _transform(self, x, adjoint=False): - return self._composed.transform(x, adjoint=adjoint) - - def _domain_shape(self): - return self.operator.domain_shape - - def _range_shape(self): - return self.operator.domain_shape - - def _batch_shape(self): - return self.operator.batch_shape - - def _domain_shape_tensor(self): - return self.operator.domain_shape_tensor() - - def _range_shape_tensor(self): - return self.operator.domain_shape_tensor() - - def _batch_shape_tensor(self): - return self.operator.batch_shape_tensor() - - @property - def operator(self): - return self._operator diff --git a/tensorflow_mri/python/util/math_util.py b/tensorflow_mri/python/util/math_util.py index 367f9619..3dfc07e6 100644 --- a/tensorflow_mri/python/util/math_util.py +++ b/tensorflow_mri/python/util/math_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/model_util.py b/tensorflow_mri/python/util/model_util.py index 4f8b2f3a..2ea8d80d 100644 --- a/tensorflow_mri/python/util/model_util.py +++ b/tensorflow_mri/python/util/model_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,7 +42,13 @@ def get_nd_model(name, rank): ('ConvBlock', 1): conv_blocks.ConvBlock1D, ('ConvBlock', 2): conv_blocks.ConvBlock2D, ('ConvBlock', 3): conv_blocks.ConvBlock3D, + ('ConvBlockLSTM', 1): conv_blocks.ConvBlockLSTM1D, + ('ConvBlockLSTM', 2): conv_blocks.ConvBlockLSTM2D, + ('ConvBlockLSTM', 3): conv_blocks.ConvBlockLSTM3D, ('UNet', 1): conv_endec.UNet1D, ('UNet', 2): conv_endec.UNet2D, - ('UNet', 3): conv_endec.UNet3D + ('UNet', 3): conv_endec.UNet3D, + ('UNetLSTM', 1): conv_endec.UNetLSTM1D, + ('UNetLSTM', 2): conv_endec.UNetLSTM2D, + ('UNetLSTM', 3): conv_endec.UNetLSTM3D } diff --git a/tensorflow_mri/python/util/nest_util.py b/tensorflow_mri/python/util/nest_util.py index e4e86e36..cb56b9e4 100644 --- a/tensorflow_mri/python/util/nest_util.py +++ b/tensorflow_mri/python/util/nest_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/plot_util.py b/tensorflow_mri/python/util/plot_util.py index 0273d24e..bae540cf 100644 --- a/tensorflow_mri/python/util/plot_util.py +++ b/tensorflow_mri/python/util/plot_util.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ import matplotlib as mpl import matplotlib.animation as ani +import matplotlib.colors as mcol import matplotlib.pyplot as plt import matplotlib.tight_bbox as tight_bbox import numpy as np @@ -124,7 +125,7 @@ def plot_tiled_image_sequence(images, layout=None, bbox_inches=None, pad_inches=0.1, - aspect=1.77, # 16:9 + aspect=None, grid_shape=None, fig_title=None, subplot_titles=None): @@ -156,8 +157,9 @@ def plot_tiled_image_sequence(images, try to figure out the tight bbox of the figure. pad_inches: A `float`. Amount of padding around the figure when bbox_inches is `'tight'`. Defaults to 0.1. - aspect: A `float`. The desired aspect ratio of the overall figure. Ignored - if `grid_shape` is specified. + aspect: A `float`. The desired aspect ratio of the overall figure. If + `None`, defaults to the aspect ratio of `fig_size`. Ignored if + `grid_shape` is specified. grid_shape: A `tuple` of `float`s. The number of rows and columns in the grid. If `None`, the grid shape is computed from `aspect`. fig_title: A `str`. The title of the figure. @@ -176,6 +178,12 @@ def plot_tiled_image_sequence(images, images = _preprocess_image(images, part=part, expected_ndim=(4, 5)) num_tiles, num_frames, image_rows, image_cols = images.shape[:4] + if fig_size is None: + fig_size = mpl.rcParams['figure.figsize'] + + if aspect is None: + aspect = fig_size[0] / fig_size[1] + # Compute the number of rows and cols for tile. if grid_shape is not None: grid_rows, grid_cols = grid_shape @@ -242,10 +250,11 @@ def plot_tiled_image(images, layout=None, bbox_inches=None, pad_inches=0.1, - aspect=1.77, # 16:9 + aspect=None, grid_shape=None, fig_title=None, - subplot_titles=None): + subplot_titles=None, + show_colorbar=False): r"""Plots one or more images in a grid. Args: @@ -261,7 +270,9 @@ def plot_tiled_image(images, norm: A `matplotlib.colors.Normalize`_. Used to scale scalar data to the [0, 1] range before mapping to colors using `cmap`. By default, a linear scaling mapping the lowest value to 0 and the highest to 1 is used. This - parameter is ignored for RGB(A) data. + parameter is ignored for RGB(A) data. Can be set to `'global'`, in which + case a global `Normalize` instance is used for all of the images in the + tile. fig_size: A `tuple` of `float`s. Width and height of the figure in inches. dpi: A `float`. The resolution of the figure in dots per inch. bg_color: A `color`_. The background color. @@ -272,12 +283,14 @@ def plot_tiled_image(images, try to figure out the tight bbox of the figure. pad_inches: A `float`. Amount of padding around the figure when bbox_inches is `'tight'`. Defaults to 0.1. - aspect: A `float`. The desired aspect ratio of the overall figure. Ignored - if `grid_shape` is specified. + aspect: A `float`. The desired aspect ratio of the overall figure. If + `None`, defaults to the aspect ratio of `fig_size`. Ignored if + `grid_shape` is specified. grid_shape: A `tuple` of `float`s. The number of rows and columns in the grid. If `None`, the grid shape is computed from `aspect`. fig_title: A `str`. The title of the figure. subplot_titles: A `list` of `str`s. The titles of the subplots. + show_colorbar: A `bool`. If `True`, a colorbar is displayed. Returns: A `list` of `matplotlib.image.AxesImage`_ objects. @@ -292,6 +305,12 @@ def plot_tiled_image(images, images = _preprocess_image(images, part=part, expected_ndim=(3, 4)) num_tiles, image_rows, image_cols = images.shape[:3] + if fig_size is None: + fig_size = mpl.rcParams['figure.figsize'] + + if aspect is None: + aspect = fig_size[0] / fig_size[1] + # Compute the number of rows and cols for tile. if grid_shape is not None: grid_rows, grid_cols = grid_shape @@ -303,6 +322,10 @@ def plot_tiled_image(images, figsize=fig_size, dpi=dpi, facecolor=bg_color, layout=layout) + # Global normalization mode. + if norm == 'global': + norm = mcol.Normalize(vmin=images.min(), vmax=images.max()) + artists = [] for row, col in np.ndindex(grid_rows, grid_cols): # For each tile. tile_idx = row * grid_cols + col # Index of current tile. @@ -326,6 +349,9 @@ def plot_tiled_image(images, artists.append(artist) artists.append(artists) + if show_colorbar: + fig.colorbar(artists[0], ax=axs.ravel().tolist()) + if fig_title is not None: fig.suptitle(fig_title) diff --git a/tensorflow_mri/python/util/plot_util_test.py b/tensorflow_mri/python/util/plot_util_test.py index aa5ec18c..ed3ed695 100644 --- a/tensorflow_mri/python/util/plot_util_test.py +++ b/tensorflow_mri/python/util/plot_util_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/prefer_static.py b/tensorflow_mri/python/util/prefer_static.py index b79a619d..48dc4be9 100644 --- a/tensorflow_mri/python/util/prefer_static.py +++ b/tensorflow_mri/python/util/prefer_static.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/sys_util.py b/tensorflow_mri/python/util/sys_util.py index b2651d14..6a2750f8 100644 --- a/tensorflow_mri/python/util/sys_util.py +++ b/tensorflow_mri/python/util/sys_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensorflow_mri/python/util/tensor_util.py b/tensorflow_mri/python/util/tensor_util.py index d765d82a..5f6529e1 100644 --- a/tensorflow_mri/python/util/tensor_util.py +++ b/tensorflow_mri/python/util/tensor_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +17,9 @@ import tensorflow as tf +from tensorflow.python.ops.control_flow_ops import with_dependencies + + def cast_to_complex(tensor): """Casts a floating-point tensor to the corresponding complex dtype. @@ -110,12 +113,18 @@ def maybe_get_static_value(tensor): return tensor -def static_and_dynamic_shapes_from_shape(shape): +def static_and_dynamic_shapes_from_shape(shape, + assert_proper_shape=False, + arg_name=None): """Returns static and dynamic shapes from tensor shape. Args: shape: This could be a 1D integer tensor, a tensor shape, a list, a tuple or any other valid representation of a tensor shape. + assert_proper_shape: If `True`, adds assertion op to the graph to verify + that the shape is proper at runtime. If `False`, only static checks are + performed. + arg_name: An optional `str`. The name of the argument. Returns: A tuple of two objects: @@ -129,9 +138,43 @@ def static_and_dynamic_shapes_from_shape(shape): Raises: ValueError: If `shape` is not 1D. + TypeError: If `shape` does not have integer dtype. """ - static = tf.TensorShape(tf.get_static_value(shape, partial=True)) - dynamic = tf.convert_to_tensor(shape, tf.int32) - if dynamic.shape.rank != 1: - raise ValueError(f"Expected shape to be 1D, got {dynamic}.") + if isinstance(shape, (tuple, list)) and not shape: + dtype = tf.int32 + else: + dtype = None + dynamic = tf.convert_to_tensor(shape, dtype=dtype, name=arg_name) + if not dynamic.dtype.is_integer: + raise TypeError( + f"{arg_name or 'shape'} must be integer type. Found: {shape}") + if dynamic.shape.rank not in (None, 1): + raise ValueError( + f"{arg_name or 'shape'} must be a 1-D Tensor. Found: {shape}") + if assert_proper_shape: + dynamic = with_dependencies([ + tf.debugging.assert_rank( + dynamic, + 1, + message=f"{arg_name or 'shape'} must be a 1-D Tensor"), + tf.debugging.assert_non_negative( + dynamic, + message=f"{arg_name or 'shape'} must be non-negative"), + ], dynamic) + + static = tf.get_static_value(shape, partial=True) + if (static is None and + isinstance(shape, tf.Tensor) and + shape.shape.is_fully_defined()): + # This is a special case in which `shape` is a `tf.Tensor` with unknown + # values but known shape. In this case `tf.get_static_value` will simply + # return None, but we can still infer the rank if we're a bit smarter. + static = [None] * shape.shape[0] + # Check value is non-negative. This will be done by `tf.TensorShape`, but + # do it here anyway so that we can provide a more informative error. + if static is not None and any(s is not None and s < 0 for s in static): + raise ValueError( + f"{arg_name or 'shape'} must be non-negative. Found: {shape}") + static = tf.TensorShape(static) + return static, dynamic diff --git a/tensorflow_mri/python/util/test_util.py b/tensorflow_mri/python/util/test_util.py index 88b982ed..60673fbd 100644 --- a/tensorflow_mri/python/util/test_util.py +++ b/tensorflow_mri/python/util/test_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -117,8 +117,9 @@ def run_in_graph_and_eager_modes(func=None, config=None, use_gpu=True): execution enabled. This allows unittests to confirm the equivalence between eager and graph execution. - .. note:: + ```{note} This decorator can only be used when executing eagerly in the outer scope. + ``` Args: func: function to be annotated. If `func` is None, this method returns a diff --git a/tensorflow_mri/python/util/types_util.py b/tensorflow_mri/python/util/types_util.py index 113237a3..3bfe8c9c 100644 --- a/tensorflow_mri/python/util/types_util.py +++ b/tensorflow_mri/python/util/types_util.py @@ -1,4 +1,4 @@ -# Copyright 2021 University College London. All Rights Reserved. +# Copyright 2021 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,3 +22,28 @@ FLOATING_TYPES = [tf.float16, tf.float32, tf.float64] COMPLEX_TYPES = [tf.complex64, tf.complex128] + + +def is_ref(x): + """Evaluates if the object has reference semantics. + + An object is deemed "reference" if it is a `tf.Variable` instance or is + derived from a `tf.Module` with `dtype` and `shape` properties. + + Args: + x: Any object. + + Returns: + is_ref: Python `bool` indicating input is has nonreference semantics, i.e., + is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties. + """ + return ( + isinstance(x, tf.Variable) or + (isinstance(x, tf.Module) and hasattr(x, "dtype") and + hasattr(x, "shape"))) + + +def assert_not_ref_type(x, arg_name): + if is_ref(x): + raise TypeError( + f"Argument {arg_name} cannot be reference type. Found: {type(x)}.") diff --git a/tools/build/create_api.py b/tools/build/create_api.py index a8cebd76..e64cedb2 100644 --- a/tools/build/create_api.py +++ b/tools/build/create_api.py @@ -32,6 +32,7 @@ '''# This file was automatically generated by ${script_path}. # Do not edit. """TensorFlow MRI.""" +import glob as _glob import os as _os import sys as _sys @@ -39,12 +40,9 @@ # TODO(jmontalt): Remove these imports on release 1.0.0. from tensorflow_mri.python.ops.array_ops import * -from tensorflow_mri.python.ops.coil_ops import * from tensorflow_mri.python.ops.convex_ops import * from tensorflow_mri.python.ops.fft_ops import * -from tensorflow_mri.python.ops.geom_ops import * from tensorflow_mri.python.ops.image_ops import * -from tensorflow_mri.python.ops.linalg_ops import * from tensorflow_mri.python.ops.math_ops import * from tensorflow_mri.python.ops.optimizer_ops import * from tensorflow_mri.python.ops.recon_ops import * @@ -67,6 +65,47 @@ __path__ = [_tfmri_api_dir] elif _tfmri_api_dir not in __path__: __path__.append(_tfmri_api_dir) + +# Hook for loading tests by `unittest`. +def load_tests(loader, tests, pattern): + """Loads all TFMRI tests, including unit tests and doc tests. + + For the parameters, see the + [`load_tests` protocol](https://docs.python.org/3/library/unittest.html#load-tests-protocol). + """ + import doctest # pylint: disable=import-outside-toplevel + + # This loads all the regular unit tests. These three lines essentially + # replicate the standard behavior if there was no `load_tests` function. + root_dir = _os.path.dirname(__file__) + unit_tests = loader.discover(start_dir=root_dir, pattern=pattern) + tests.addTests(unit_tests) + + def set_up_doc_test(test): + """Sets up a doctest. + + Runs at the beginning of every doctest. We use it to import common + packages including NumPy, TensorFlow and TensorFlow MRI. Tests are kept + more concise by not repeating these imports each time. + + Args: + test: A `DocTest` object. + """ + # pylint: disable=import-outside-toplevel,import-self + import numpy as _np + import tensorflow as _tf + import tensorflow_mri as _tfmri + # Add these packages to globals. + test.globs['np'] = _np + test.globs['tf'] = _tf + test.globs['tfmri'] = _tfmri + + # Now load all the doctests. + py_files = _glob.glob(_os.path.join(root_dir, '**/*.py'), recursive=True) + tests.addTests(doctest.DocFileSuite( + *py_files, module_relative=False, setUp=set_up_doc_test)) + + return tests ''') diff --git a/tools/docs/api_docs.md b/tools/docs/api_docs.md new file mode 100644 index 00000000..d832d0fa --- /dev/null +++ b/tools/docs/api_docs.md @@ -0,0 +1,3 @@ +# API documentation + +TensorFlow MRI has a Python API. This section contains the API documentation for TensorFlow MRI. diff --git a/tools/docs/api_docs.rst b/tools/docs/api_docs.rst deleted file mode 100644 index b8ec4dc3..00000000 --- a/tools/docs/api_docs.rst +++ /dev/null @@ -1,2 +0,0 @@ -TensorFlow MRI API documentation -================================ diff --git a/tools/docs/conf.py b/tools/docs/conf.py index b7f201c6..1e2aa5f4 100644 --- a/tools/docs/conf.py +++ b/tools/docs/conf.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, path.abspath('../..')) +sys.path.insert(0, path.abspath('extensions')) # -- Project information ----------------------------------------------------- @@ -61,12 +62,11 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', - 'sphinx.ext.autosummary', - 'sphinx.ext.linkcode', - 'sphinx.ext.autosectionlabel', 'myst_nb', + 'myst_autodoc', + 'myst_autosummary', + 'myst_napoleon', + 'sphinx.ext.linkcode', 'sphinx_sitemap' ] @@ -76,14 +76,13 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'templates'] # Do not add full qualification to objects' signatures. add_module_names = False -# For classes, list the documentation of both the class and the `__init__` -# method. -autoclass_content = 'both' +# For classes, list the class documentation but not `__init__`. +autoclass_content = 'class' # -- Options for HTML output ------------------------------------------------- @@ -124,6 +123,7 @@ sitemap_url_scheme = '{link}' # For autosummary generation. +autosummary_generate = True autosummary_filename_map = conf_helper.AutosummaryFilenameMap() # -- Options for MyST ---------------------------------------------------------- @@ -133,7 +133,9 @@ "colon_fence", "deflist", "dollarmath", + "fieldlist", "html_image", + "substitution" ] # https://myst-nb.readthedocs.io/en/latest/authoring/basics.html @@ -143,6 +145,11 @@ '.ipynb' ] +# https://myst-parser.readthedocs.io/en/latest/syntax/optional.html#substitutions-with-jinja2 +myst_substitutions = { + 'release': release +} + # Do not execute notebooks. # https://myst-nb.readthedocs.io/en/latest/computation/execute.html nb_execution_mode = "off" @@ -161,8 +168,9 @@ def linkcode_resolve(domain, info): Returns: The GitHub URL to the object, or `None` if not relevant. """ - if info['fullname'] == 'nufft': - # Can't provide link for nufft, since it lives in external package. + custom_ops = {'nufft', 'spiral_waveform'} + if info['fullname'] in custom_ops: + # Can't provide link to source for custom ops. return None # Obtain fully-qualified name of object. @@ -243,86 +251,61 @@ def linkcode_resolve(domain, info): 'np.ndarray': 'https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html', 'np.inf': 'https://numpy.org/doc/stable/reference/constants.html#numpy.inf', 'np.nan': 'https://numpy.org/doc/stable/reference/constants.html#numpy.nan', - # TensorFlow types. - 'tf.Tensor': 'https://www.tensorflow.org/api_docs/python/tf/Tensor', - 'tf.TensorShape': 'https://www.tensorflow.org/api_docs/python/tf/TensorShape', - 'tf.dtypes.DType': 'https://www.tensorflow.org/api_docs/python/tf/dtypes/DType' } -TFMRI_OBJECTS_PATTERN = re.compile(r"``(?Ptfmri.[a-zA-Z0-9_.]+)``") - -COMMON_TYPES_PATTERNS = { - k: re.compile(rf"``{k}``")for k in COMMON_TYPES_LINKS} - -COMMON_TYPES_REPLACEMENTS = { - k: rf"`{k} <{v}>`_" for k, v in COMMON_TYPES_LINKS.items()} - -CODE_LETTER_PATTERN = re.compile(r"``(?P\w+)``(?P[a-zA-Z])") -CODE_LETTER_REPL = r"``\g``\ \g" - -LINK_PATTERN = re.compile(r"``(?P[\w\.]+)``_") -LINK_REPL = r"`\g`_" - def process_docstring(app, what, name, obj, options, lines): # pylint: disable=missing-param-doc,unused-argument - """Process autodoc docstrings.""" - # Replace Note: and Warning: by RST equivalents. - rst_lines = [] - admonition_lines = None - for line in lines: - if admonition_lines is None: - # We are not in an admonition right now. Check if this line will start - # one. - if (line.strip().startswith('Warning:') or - line.strip().startswith('Note:')): - # This line starts an admonition. - label_position = line.index(':') - admonition_type = line[:label_position].strip().lower() - admonition_content = line[label_position + 1:].strip() - leading_whitespace = ' ' * (len(line) - len(line.lstrip())) - extra_indentation = ' ' - admonition_lines = [f"{leading_whitespace}.. {admonition_type}::"] - admonition_lines.append( - leading_whitespace + extra_indentation + admonition_content) - else: - # This line does not start an admonition. It's just a regular line. - # Add it to the new lines. - rst_lines.append(line) - else: - # Check if this is the end of the admonition. - if line.strip() == '': - # Line is empty, so the end of the admonition. Add admonition and - # finish. - rst_lines.extend(admonition_lines) - admonition_lines = None - else: - # This is an admonition line. Add to list of admonition lines. - admonition_lines.append(extra_indentation + line) - # If we reached the end and we are still in an admonition, add it. - if admonition_lines is not None: - rst_lines.extend(admonition_lines) - - # Replace markdown literal markers (`) by ReST literal markers (``). - myst = '\n'.join(rst_lines) - text = myst.replace('`', '``') - text = text.replace(':math:``', ':math:`') - - # Correct inline code followed by word characters. - text = CODE_LETTER_PATTERN.sub(CODE_LETTER_REPL, text) - # Add links to some common types. - for k in COMMON_TYPES_LINKS: - text = COMMON_TYPES_PATTERNS[k].sub(COMMON_TYPES_REPLACEMENTS[k], text) - # Add links to TFMRI objects. - for match in TFMRI_OBJECTS_PATTERN.finditer(text): - name = match.group('name') - url = get_doc_url(name) - pattern = rf"``{name}``" - repl = rf"`{name} <{url}>`_" - text = text.replace(pattern, repl) - - # Correct double quotes. - text = LINK_PATTERN.sub(LINK_REPL, text) - lines[:] = text.splitlines() + """Processes autodoc docstrings.""" + # Regular expressions. + blankline_re = re.compile(r"^\s*$") + prompt_re = re.compile(r"^\s*>>>") + tf_symbol_re = re.compile(r"`(?Ptf\.[a-zA-Z0-9_.]+)`") + tfmri_symbol_re = re.compile(r"`(?Ptfmri\.[a-zA-Z0-9_.]+)`") + + # Loop initialization. `insert_lines` keeps a list of lines to be inserted + # as well as their positions. + insert_lines = [] + in_prompt = False + + # Iterate line by line. + for lineno, line in enumerate(lines): + + # Check if we're in a prompt block. + if in_prompt: + # Check if end of prompt block. + if blankline_re.match(line): + in_prompt = False + insert_lines.append((lineno, "```")) + continue + + # Check for >>> prompt, if found insert code block (unless already in + # prompt). + m = prompt_re.match(line) + if m and not in_prompt: + in_prompt = True + # We need to insert a new line. It's not safe to modify the list we're + # iterating over, so instead we store the line in `insert_lines` and we + # insert it after the loop. + insert_lines.append((lineno, "```python")) + continue + + # Add links to TF symbols. + m = tf_symbol_re.search(line) + if m: + symbol = m.group('symbol') + link = f"https://www.tensorflow.org/api_docs/python/{symbol.replace('.', '/')}" + lines[lineno] = line.replace(f"`{symbol}`", f"[`{symbol}`]({link})") + + # Add links to TFMRI symbols. + m = tfmri_symbol_re.search(line) + if m: + symbol = m.group('symbol') + link = f"https://mrphys.github.io/tensorflow-mri/api_docs/{symbol.replace('.', '/')}" + lines[lineno] = line.replace(f"`{symbol}`", f"[`{symbol}`]({link})") + + # Now insert the lines (in reversed order so that line numbers stay valid). + for lineno, line in reversed(insert_lines): + lines.insert(lineno, line) def get_doc_url(name): diff --git a/tools/docs/create_documents.py b/tools/docs/create_documents.py index c3c53ede..f04026cb 100644 --- a/tools/docs/create_documents.py +++ b/tools/docs/create_documents.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -34,78 +34,79 @@ os.makedirs(os.path.join(API_DOCS_PATH, 'tfmri'), exist_ok=True) # Read the index template. -with open(os.path.join(TEMPLATES_PATH, 'index.rst'), 'r') as f: +with open(os.path.join(TEMPLATES_PATH, 'index.md'), 'r') as f: INDEX_TEMPLATE = string.Template(f.read()) TFMRI_DOC_TEMPLATE = string.Template( -"""tfmri -===== - -.. automodule:: tensorflow_mri - -Modules -------- - -.. autosummary:: - :nosignatures: - - ${namespaces} - - -Classes -------- - -.. autosummary:: - :toctree: tfmri - :template: ops/class.rst - :nosignatures: - - - -Functions ---------- - -.. autosummary:: - :toctree: tfmri - :template: ops/function.rst - :nosignatures: - - broadcast_dynamic_shapes - broadcast_static_shapes - cartesian_product - central_crop - meshgrid - ravel_multi_index - resize_with_crop_or_pad - scale_by_min_max - unravel_index +"""# tfmri + +```{automodule} tensorflow_mri +``` + +## Modules + +```{autosummary} +--- +nosignatures: +--- +${namespaces} +``` + +## Classes + +```{autosummary} +--- +toctree: tfmri +nosignatures: +--- +``` + +## Functions + +```{autosummary} +--- +toctree: tfmri +nosignatures: +--- +broadcast_dynamic_shapes +broadcast_static_shapes +cartesian_product +central_crop +meshgrid +ravel_multi_index +resize_with_crop_or_pad +scale_by_min_max +unravel_index +``` """) MODULE_DOC_TEMPLATE = string.Template( -"""tfmri.${module} -======${underline} - -.. automodule:: tensorflow_mri.${module} - -Classes -------- - -.. autosummary:: - :toctree: ${module} - :template: ${module}/class.rst - :nosignatures: - - ${classes} - -Functions ---------- - -.. autosummary:: - :toctree: ${module} - :template: ${module}/function.rst - :nosignatures: - - ${functions} +"""# tfmri.${module} + +```{automodule} tensorflow_mri.${module} +``` + +## Classes + +```{autosummary} +--- +toctree: ${module} +template: ${module}/class.md +nosignatures: +--- +${classes} +``` + +## Functions + +```{autosummary} +--- +toctree: ${module} +template: ${module}/function.md +nosignatures: +--- +${functions} +``` """) @@ -128,28 +129,27 @@ class Module: # Write namespace templates. for name, module in modules.items(): - classes = '\n '.join(sorted(set(module.classes))) - functions = '\n '.join(sorted(set(module.functions))) + classes = '\n'.join(sorted(set(module.classes))) + functions = '\n'.join(sorted(set(module.functions))) - filename = os.path.join(API_DOCS_PATH, f'tfmri/{name}.rst') + filename = os.path.join(API_DOCS_PATH, f'tfmri/{name}.md') with open(filename, 'w') as f: f.write(MODULE_DOC_TEMPLATE.substitute( module=name, - underline='=' * len(name), classes=classes, functions=functions)) -# Write top-level API doc tfmri.rst. -filename = os.path.join(API_DOCS_PATH, 'tfmri.rst') +# Write top-level API doc tfmri.md. +filename = os.path.join(API_DOCS_PATH, 'tfmri.md') with open(filename, 'w') as f: namespaces = api_util.get_submodule_names() f.write(TFMRI_DOC_TEMPLATE.substitute( - namespaces='\n '.join(sorted(namespaces)))) + namespaces='\n'.join(sorted(namespaces)))) -# Write index.rst. -filename = os.path.join(DOCS_PATH, 'index.rst') +# Write index.md. +filename = os.path.join(DOCS_PATH, 'index.md') with open(filename, 'w') as f: namespaces = api_util.get_submodule_names() namespaces = ['api_docs/tfmri/' + namespace for namespace in namespaces] f.write(INDEX_TEMPLATE.substitute( - namespaces='\n '.join(sorted(namespaces)))) + namespaces='\n'.join(sorted(namespaces)))) diff --git a/tools/docs/create_templates.py b/tools/docs/create_templates.py index 55e651a5..f8217928 100644 --- a/tools/docs/create_templates.py +++ b/tools/docs/create_templates.py @@ -1,4 +1,4 @@ -# Copyright 2022 University College London. All Rights Reserved. +# Copyright 2022 The TensorFlow MRI Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,21 +27,27 @@ CLASS_TEMPLATE = string.Template( -"""${module}.{{ objname | escape | underline }}${underline} +"""# ${module}.{{ objname }} -.. currentmodule:: {{ module }} +```{currentmodule} {{ module }} +``` -.. auto{{ objtype }}:: {{ objname }} - :members: - :show-inheritance: +```{auto{{ objtype }}} {{ objname }} +--- +members: +show-inheritance: +--- +``` """) FUNCTION_TEMPLATE = string.Template( -"""${module}.{{ objname | escape | underline }}${underline} +"""# ${module}.{{ objname }} -.. currentmodule:: {{ module }} +```{currentmodule} {{ module }} +``` -.. auto{{ objtype }}:: {{ objname }} +```{auto{{ objtype }}} {{ objname }} +``` """) NAMESPACES = api_util.get_submodule_names() @@ -61,13 +67,11 @@ module = f'tfmri.{namespace}' # Substitute the templates for this module. - class_template = CLASS_TEMPLATE.substitute( - module=module, underline='=' * (len(module) + 1)) - function_template = FUNCTION_TEMPLATE.substitute( - module=module, underline='=' * (len(module) + 1)) + class_template = CLASS_TEMPLATE.substitute(module=module) + function_template = FUNCTION_TEMPLATE.substitute(module=module) # Write template files. - with open(os.path.join(TEMPLATE_PATH, namespace, 'class.rst'), 'w') as f: + with open(os.path.join(TEMPLATE_PATH, namespace, 'class.md'), 'w') as f: f.write(class_template) - with open(os.path.join(TEMPLATE_PATH, namespace, 'function.rst'), 'w') as f: + with open(os.path.join(TEMPLATE_PATH, namespace, 'function.md'), 'w') as f: f.write(function_template) diff --git a/tools/docs/guide.md b/tools/docs/guide.md new file mode 100644 index 00000000..8c0d02fa --- /dev/null +++ b/tools/docs/guide.md @@ -0,0 +1 @@ +# Guide diff --git a/tools/docs/guide.rst b/tools/docs/guide.rst deleted file mode 100644 index 7a61aa6b..00000000 --- a/tools/docs/guide.rst +++ /dev/null @@ -1,2 +0,0 @@ -TensorFlow MRI guide -==================== diff --git a/tools/docs/guide/faq.rst b/tools/docs/guide/faq.md similarity index 73% rename from tools/docs/guide/faq.rst rename to tools/docs/guide/faq.md index a699674f..30695aef 100644 --- a/tools/docs/guide/faq.rst +++ b/tools/docs/guide/faq.md @@ -1,5 +1,4 @@ -Frequently Asked Questions -========================== +# Frequently asked questions **When trying to install TensorFlow MRI, I get an error about OpenEXR which includes: @@ -10,6 +9,8 @@ OpenEXR is needed by TensorFlow Graphics, which is a dependency of TensorFlow MRI. This issue can be fixed by installing the OpenEXR library. On Debian/Ubuntu: -.. code-block:: console +``` +apt install libopenexr-dev +``` - $ apt install libopenexr-dev +Depending on your environment, you might need sudo access. diff --git a/tools/docs/guide/fft.ipynb b/tools/docs/guide/fft.ipynb new file mode 100644 index 00000000..72099ca6 --- /dev/null +++ b/tools/docs/guide/fft.ipynb @@ -0,0 +1,101 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Fast Fourier transform (FFT)\n", + "\n", + "TensorFlow MRI uses the built-in FFT ops in core TensorFlow. These are [`tf.signal.fft`](https://www.tensorflow.org/api_docs/python/tf/signal/fft), [`tf.signal.fft2d`](https://www.tensorflow.org/api_docs/python/tf/signal/fft2d) and [`tf.signal.fft3d`](https://www.tensorflow.org/api_docs/python/tf/signal/fft3d).\n", + "\n", + "## N-dimensional FFT\n", + "\n", + "For convenience, TensorFlow MRI also provides [`tfmri.signal.fft`](https://mrphys.github.io/tensorflow-mri/api_docs/tfmri/signal/fft/), which can be used for N-dimensional FFT calculations and provides convenient access to commonly used functionality such as padding/cropping, normalization and shifting of the zero-frequency component within the same function call.\n", + "\n", + "## Custom FFT kernels for CPU\n", + "\n", + "Unfortunately, TensorFlow's FFT ops are [known to be slow](https://github.com/tensorflow/tensorflow/issues/6541) on CPU. As a result, the FFT can become a significant bottleneck on MRI processing pipelines, especially on iterative reconstructions where the FFT is called repeatedly.\n", + "\n", + "To address this issue, TensorFlow MRI provides a set of custom FFT kernels based on the FFTW library. These offer a significant boost in performance compared to the kernels in core TensorFlow.\n", + "\n", + "The custom FFT kernels are automatically registered to the TensorFlow framework when importing TensorFlow MRI. If you have imported TensorFlow MRI, then the standard FFT ops will use the optimized kernels automatically.\n", + "\n", + "```{tip}\n", + "You only need to `import tensorflow_mri` in order to use the custom FFT kernels. You can then access them as usual through `tf.signal.fft`, `tf.signal.fft2d` and `tf.signal.fft3d`.\n", + "```\n", + "\n", + "The only caveat is that the [FFTW license](https://www.fftw.org/doc/License-and-Copyright.html) is more restrictive than the [Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0) used by TensorFlow MRI. In particular, GNU GPL requires you to distribute any derivative software under equivalent terms.\n", + "\n", + "```{warning}\n", + "If you intend to use custom FFT kernels for commercial purposes, you will need to purchase a commercial FFTW license.\n", + "```\n", + "\n", + "### Disable the use of custom FFT kernels\n", + "\n", + "You can control whether custom FFT kernels are used via the `TFMRI_USE_CUSTOM_FFT` environment variable. When set to false, TensorFlow MRI will not register its custom FFT kernels, falling back to the standard FFT kernels in core TensorFlow. If the variable is unset, its value defaults to true.\n", + "\n", + "````{tip}\n", + "Set `TFMRI_USE_CUSTOM_FFT=0` to disable the custom FFT kernels.\n", + "\n", + "```python\n", + "os.environ[\"TFMRI_USE_CUSTOM_FFT\"] = \"0\"\n", + "import tensorflow_mri as tfmri\n", + "```\n", + "\n", + "```{attention}\n", + "`TFMRI_USE_CUSTOM_FFT` must be set **before** importing TensorFlow MRI. Setting or changing its value after importing the package will have no effect.\n", + "```\n", + "````\n", + "\n", + "### Customize the behavior of custom FFT kernels\n", + "\n", + "FFTW allows you to control the rigor of the planning process. The more rigorously a plan is created, the more efficient the actual FFT execution is likely to be, at the expense of a longer planning time. TensorFlow MRI lets you control the FFTW planning rigor through the `TFMRI_FFTW_PLANNING_RIGOR` environment variable. Valid values for this variable are:\n", + "\n", + "- `\"estimate\"` specifies that, instead of actual measurements of different algorithms, a simple heuristic is used to pick a (probably sub-optimal) plan quickly.\n", + "- `\"measure\"` tells FFTW to find an optimized plan by actually computing several FFTs and measuring their execution time. Depending on your machine, this can take some time (often a few seconds). This is the default planning option.\n", + "- `\"patient\"` is like `\"measure\"`, but considers a wider range of algorithms and often produces a “more optimal” plan (especially for large transforms), but at the expense of several times longer planning time (especially for large transforms).\n", + "- `\"exhaustive\"` is like `\"patient\"`, but considers an even wider range of algorithms, including many that we think are unlikely to be fast, to produce the most optimal plan but with a substantially increased planning time.\n", + "\n", + "````{tip}\n", + "Set the environment variable `TFMRI_FFTW_PLANNING_RIGOR` to control the planning rigor.\n", + "\n", + "```python\n", + "os.environ[\"TFMRI_FFTW_PLANNING_RIGOR\"] = \"estimate\"\n", + "import tensorflow_mri as tfmri\n", + "```\n", + "\n", + "```{attention}\n", + "`TFMRI_FFTW_PLANNING_RIGOR` must be set **before** importing TensorFlow MRI. Setting or changing its value after importing the package will have no effect.\n", + "```\n", + "````\n", + "\n", + "```{note}\n", + "FFTW accumulates \"wisdom\" each time the planner is called, and this wisdom is persisted across invocations of the FFT kernels (during the same process). Therefore, more rigorous planning options will result in long planning times during the first FFT invocation, but may result in faster execution during subsequent invocations. When performing a large amount of similar FFT invocations (e.g., while training a model or performing iterative reconstructions), you are more likely to benefit from more rigorous planning.\n", + "```\n", + "\n", + "```{seealso}\n", + "The FFTW [planner flags](https://www.fftw.org/doc/Planner-Flags.html) documentation page.\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.2 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.8.2" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "0adcc2737ebf6a4a119f135174df96668767fca1ef1112612db5ecadf2b6d608" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tools/docs/guide/install.md b/tools/docs/guide/install.md new file mode 100644 index 00000000..73571a5b --- /dev/null +++ b/tools/docs/guide/install.md @@ -0,0 +1,77 @@ +# Install TensorFlow MRI + +## Requirements + +TensorFlow MRI should work in most Linux systems that meet the +[requirements for TensorFlow](https://www.tensorflow.org/install). + +```{warning} +TensorFlow MRI is not yet available for Windows or macOS. +[`Help us support them!](https://github.com/mrphys/tensorflow-mri/issues/3). +``` + +### TensorFlow compatibility + +Each TensorFlow MRI release is compiled against a specific version of +TensorFlow. To ensure compatibility, it is recommended to install matching +versions of TensorFlow and TensorFlow MRI according to the table below. + +```{include} ../../../README.md +--- +start-after: +end-before: +--- +``` + +```{warning} +Each TensorFlow MRI version aims to target and support the latest TensorFlow +version only. A new version of TensorFlow MRI will be released shortly after +each TensorFlow release. TensorFlow MRI versions that target older versions +of TensorFlow will not generally receive any updates. +``` + +## Set up your system + +You will need a working TensorFlow installation. Follow the +[TensorFlow installation instructions](https://www.tensorflow.org/install) if +you do not have one already. + + +### Use a GPU + +If you need GPU support, we suggest that you use one of the +[TensorFlow Docker images](https://www.tensorflow.org/install/docker). +These come with a GPU-enabled TensorFlow installation and are the easiest way +to run TensorFlow and TensorFlow MRI on your system. + +.. code-block:: console + + $ docker pull tensorflow/tensorflow:latest-gpu + +Alternatively, make sure you follow +[these instructions](https://www.tensorflow.org/install/gpu) when setting up +your system. + + +## Download from PyPI + +TensorFlow MRI is available on the Python package index (PyPI) and can be +installed using the ``pip`` package manager: + +``` +pip install tensorflow-mri +``` + + +## Run in Google Colab + +To get started without installing anything on your system, you can use +[Google Colab](https://colab.research.google.com/notebooks/welcome.ipynb). +Simply create a new notebook and use ``pip`` to install TensorFlow MRI. + +``` +!pip install tensorflow-mri +``` + +The Colab environment is already configured to run TensorFlow and has GPU +support. diff --git a/tools/docs/guide/install.rst b/tools/docs/guide/install.rst deleted file mode 100644 index 404c4a7b..00000000 --- a/tools/docs/guide/install.rst +++ /dev/null @@ -1,89 +0,0 @@ -Install TensorFlow MRI -====================== - -Requirements ------------- - -TensorFlow MRI should work in most Linux systems that meet the -`requirements for TensorFlow `_. - -.. warning:: - - TensorFlow MRI is not yet available for Windows or macOS. - `Help us support them! `_. - - -TensorFlow compatibility -~~~~~~~~~~~~~~~~~~~~~~~~ - -Each TensorFlow MRI release is compiled against a specific version of -TensorFlow. To ensure compatibility, it is recommended to install matching -versions of TensorFlow and TensorFlow MRI according to the -:ref:`TensorFlow compatibility table`. - -.. warning:: - - Each TensorFlow MRI version aims to target and support the latest TensorFlow - version only. A new version of TensorFlow MRI will be released shortly after - each TensorFlow release. TensorFlow MRI versions that target older versions - of TensorFlow will not generally receive any updates. - - -Set up your system ------------------- - -You will need a working TensorFlow installation. Follow the `TensorFlow -installation instructions `_ if you do not -have one already. - - -Use a GPU -~~~~~~~~~ - -If you need GPU support, we suggest that you use one of the -`TensorFlow Docker images `_. -These come with a GPU-enabled TensorFlow installation and are the easiest way -to run TensorFlow and TensorFlow MRI on your system. - -.. code-block:: console - - $ docker pull tensorflow/tensorflow:latest-gpu - -Alternatively, make sure you follow -`these instructions `_ when setting up -your system. - - -Download from PyPI ------------------- - -TensorFlow MRI is available on the Python package index (PyPI) and can be -installed using the ``pip`` package manager: - -.. code-block:: console - - $ pip install tensorflow-mri - - -Run in Google Colab -------------------- - -To get started without installing anything on your system, you can use -`Google Colab `_. -Simply create a new notebook and use ``pip`` to install TensorFlow MRI. - -.. code:: python - - !pip install tensorflow-mri - - -The Colab environment is already configured to run TensorFlow and has GPU -support. - - -TensorFlow compatibility table ------------------------------- - -.. include:: ../../../README.rst - :start-after: start-compatibility-table - :end-before: end-compatibility-table diff --git a/tools/docs/guide/universal.ipynb b/tools/docs/guide/universal.ipynb new file mode 100644 index 00000000..097c9c19 --- /dev/null +++ b/tools/docs/guide/universal.ipynb @@ -0,0 +1,32 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Universal operators\n", + "\n", + "Coming soon..." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.2 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.8.2" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "0adcc2737ebf6a4a119f135174df96668767fca1ef1112612db5ecadf2b6d608" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tools/docs/templates/index.md b/tools/docs/templates/index.md new file mode 100644 index 00000000..ef928571 --- /dev/null +++ b/tools/docs/templates/index.md @@ -0,0 +1,43 @@ +# TensorFlow MRI {{ release }} + +```{include} ../../README.md +--- +start-after: +end-before: +--- +``` + +```{toctree} +--- +caption: Guide +hidden: +--- +Guide +Installation +Fast Fourier transform +Non-uniform FFT +Linear algebra +Optimization +MRI reconstruction +Contributing +FAQ +``` + +```{toctree} +--- +caption: Tutorials +hidden: +--- +Tutorials +Image reconstruction +``` + +```{toctree} +--- +caption: API Documentation +hidden: +--- +api_docs +api_docs/tfmri +${namespaces} +``` diff --git a/tools/docs/templates/index.rst b/tools/docs/templates/index.rst deleted file mode 100644 index f7099966..00000000 --- a/tools/docs/templates/index.rst +++ /dev/null @@ -1,45 +0,0 @@ -TensorFlow MRI |release| -======================== - -.. image:: https://img.shields.io/badge/-View%20on%20GitHub-128091?logo=github&labelColor=grey - :target: https://github.com/mrphys/tensorflow-mri - :alt: View on GitHub - -.. include:: ../../README.rst - :start-after: start-intro - :end-before: end-intro - - -.. toctree:: - :caption: Guide - :hidden: - - Guide - Installation - Non-uniform FFT - Linear algebra - Optimization - MRI reconstruction - Contributing - FAQ - - -.. toctree:: - :caption: Tutorials - :hidden: - - Tutorials - Image reconstruction - - -.. toctree:: - :caption: API Documentation - :hidden: - - API documentation - api_docs/tfmri - ${namespaces} - - -.. meta:: - :google-site-verification: 8PySedj6KJ0kc5qC1CbO6_9blFB9Nho3SgXvbRzyVOU diff --git a/tools/docs/test_docs.py b/tools/docs/test_docs.py deleted file mode 100644 index 404cd482..00000000 --- a/tools/docs/test_docs.py +++ /dev/null @@ -1,13 +0,0 @@ -import doctest -import pathlib -import sys -wdir = pathlib.Path().absolute() -sys.path.insert(0, str(wdir)) - -from tensorflow_mri.python.ops import array_ops -from tensorflow_mri.python.ops import wavelet_ops - -kwargs = dict(raise_on_error=True) - -doctest.testmod(array_ops, **kwargs) -doctest.testmod(wavelet_ops, **kwargs) diff --git a/tools/docs/tutorials.rst b/tools/docs/tutorials.md similarity index 87% rename from tools/docs/tutorials.rst rename to tools/docs/tutorials.md index 9c522205..a0c22b26 100644 --- a/tools/docs/tutorials.rst +++ b/tools/docs/tutorials.md @@ -1,5 +1,4 @@ -TensorFlow MRI tutorials -======================== +# Tutorials All TensorFlow MRI tutorials are written as Jupyter notebooks. diff --git a/tools/docs/tutorials/recon.md b/tools/docs/tutorials/recon.md new file mode 100644 index 00000000..be02baae --- /dev/null +++ b/tools/docs/tutorials/recon.md @@ -0,0 +1,8 @@ +# Image reconstruction + +```{toctree} +--- +hidden: +--- +CG-SENSE +``` diff --git a/tools/docs/tutorials/recon.rst b/tools/docs/tutorials/recon.rst deleted file mode 100644 index 6cbac95d..00000000 --- a/tools/docs/tutorials/recon.rst +++ /dev/null @@ -1,7 +0,0 @@ -Image reconstruction -==================== - -.. toctree:: - :hidden: - - CG-SENSE diff --git a/tools/docs/tutorials/recon/cg_sense.ipynb b/tools/docs/tutorials/recon/cg_sense.ipynb index d1ab7fa4..874c497c 100644 --- a/tools/docs/tutorials/recon/cg_sense.ipynb +++ b/tools/docs/tutorials/recon/cg_sense.ipynb @@ -7,16 +7,6 @@ "# Image reconstruction with CG-SENSE" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "[![View on website](https://img.shields.io/badge/-View%20on%20website-128091?labelColor=grey&logo=data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjYwIiBoZWlnaHQ9IjI1OSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgb3ZlcmZsb3c9ImhpZGRlbiI+PGRlZnM+PGNsaXBQYXRoIGlkPSJjbGlwMCI+PHJlY3QgeD0iMTI2NCIgeT0iMTIzOCIgd2lkdGg9IjI2MCIgaGVpZ2h0PSIyNTkiLz48L2NsaXBQYXRoPjxsaW5lYXJHcmFkaWVudCB4MT0iMTI2NCIgeTE9IjEzNjcuNSIgeDI9IjE1MjQiIHkyPSIxMzY3LjUiIGdyYWRpZW50VW5pdHM9InVzZXJTcGFjZU9uVXNlIiBzcHJlYWRNZXRob2Q9InJlZmxlY3QiIGlkPSJmaWxsMSI+PHN0b3Agb2Zmc2V0PSIwIiBzdG9wLWNvbG9yPSIjMTI4MDkxIi8+PHN0b3Agb2Zmc2V0PSIwLjAyMjcyNzMiIHN0b3AtY29sb3I9IiMxMjgxOTIiLz48c3RvcCBvZmZzZXQ9IjAuMDQ1NDU0NSIgc3RvcC1jb2xvcj0iIzEyODM5NCIvPjxzdG9wIG9mZnNldD0iMC4wNjgxODE4IiBzdG9wLWNvbG9yPSIjMTI4NDk2Ii8+PHN0b3Agb2Zmc2V0PSIwLjA5MDkwOTEiIHN0b3AtY29sb3I9IiMxMjg2OTgiLz48c3RvcCBvZmZzZXQ9IjAuMTEzNjM2IiBzdG9wLWNvbG9yPSIjMTM4ODlBIi8+PHN0b3Agb2Zmc2V0PSIwLjEzNjM2NCIgc3RvcC1jb2xvcj0iIzEzODk5QiIvPjxzdG9wIG9mZnNldD0iMC4xNTkwOTEiIHN0b3AtY29sb3I9IiMxMzhCOUQiLz48c3RvcCBvZmZzZXQ9IjAuMTgxODE4IiBzdG9wLWNvbG9yPSIjMTM4QzlGIi8+PHN0b3Agb2Zmc2V0PSIwLjIwNDU0NSIgc3RvcC1jb2xvcj0iIzE0OERBMCIvPjxzdG9wIG9mZnNldD0iMC4yMjcyNzMiIHN0b3AtY29sb3I9IiMxNDhGQTIiLz48c3RvcCBvZmZzZXQ9IjAuMjUiIHN0b3AtY29sb3I9IiMxNDkwQTMiLz48c3RvcCBvZmZzZXQ9IjAuMjcyNzI3IiBzdG9wLWNvbG9yPSIjMTQ5MUE1Ii8+PHN0b3Agb2Zmc2V0PSIwLjI5NTQ1NSIgc3RvcC1jb2xvcj0iIzE0OTNBNiIvPjxzdG9wIG9mZnNldD0iMC4zMTgxODIiIHN0b3AtY29sb3I9IiMxNTk0QTgiLz48c3RvcCBvZmZzZXQ9IjAuMzQwOTA5IiBzdG9wLWNvbG9yPSIjMTU5NUE5Ii8+PHN0b3Agb2Zmc2V0PSIwLjM2MzYzNiIgc3RvcC1jb2xvcj0iIzE1OTZBQSIvPjxzdG9wIG9mZnNldD0iMC4zODYzNjQiIHN0b3AtY29sb3I9IiMxNTk3QUIiLz48c3RvcCBvZmZzZXQ9IjAuNDA5MDkxIiBzdG9wLWNvbG9yPSIjMTU5OUFEIi8+PHN0b3Agb2Zmc2V0PSIwLjQzMTgxOCIgc3RvcC1jb2xvcj0iIzE1OUFBRSIvPjxzdG9wIG9mZnNldD0iMC40NTQ1NDUiIHN0b3AtY29sb3I9IiMxNjlCQUYiLz48c3RvcCBvZmZzZXQ9IjAuNDc3MjczIiBzdG9wLWNvbG9yPSIjMTY5Q0IwIi8+PHN0b3Agb2Zmc2V0PSIwLjUiIHN0b3AtY29sb3I9IiMxNjlEQjEiLz48c3RvcCBvZmZzZXQ9IjAuNTIyNzI3IiBzdG9wLWNvbG9yPSIjMTY5RUIyIi8+PHN0b3Agb2Zmc2V0PSIwLjU0NTQ1NSIgc3RvcC1jb2xvcj0iIzE2OUVCMyIvPjxzdG9wIG9mZnNldD0iMC41NjgxODIiIHN0b3AtY29sb3I9IiMxNjlGQjQiLz48c3RvcCBvZmZzZXQ9IjAuNTkwOTA5IiBzdG9wLWNvbG9yPSIjMTZBMEI1Ii8+PHN0b3Agb2Zmc2V0PSIwLjYxMzYzNiIgc3RvcC1jb2xvcj0iIzE2QTFCNiIvPjxzdG9wIG9mZnNldD0iMC42MzYzNjQiIHN0b3AtY29sb3I9IiMxN0ExQjciLz48c3RvcCBvZmZzZXQ9IjAuNjU5MDkxIiBzdG9wLWNvbG9yPSIjMTdBMkI4Ii8+PHN0b3Agb2Zmc2V0PSIwLjY4MTgxOCIgc3RvcC1jb2xvcj0iIzE3QTNCOCIvPjxzdG9wIG9mZnNldD0iMC43MDQ1NDUiIHN0b3AtY29sb3I9IiMxN0EzQjkiLz48c3RvcCBvZmZzZXQ9IjAuNzI3MjczIiBzdG9wLWNvbG9yPSIjMTdBNEJBIi8+PHN0b3Agb2Zmc2V0PSIwLjc1IiBzdG9wLWNvbG9yPSIjMTdBNUJBIi8+PHN0b3Agb2Zmc2V0PSIwLjc3MjcyNyIgc3RvcC1jb2xvcj0iIzE3QTVCQiIvPjxzdG9wIG9mZnNldD0iMC43OTU0NTUiIHN0b3AtY29sb3I9IiMxN0E1QkIiLz48c3RvcCBvZmZzZXQ9IjAuODE4MTgyIiBzdG9wLWNvbG9yPSIjMTdBNkJDIi8+PHN0b3Agb2Zmc2V0PSIwLjg0MDkwOSIgc3RvcC1jb2xvcj0iIzE3QTZCQyIvPjxzdG9wIG9mZnNldD0iMC44NjM2MzYiIHN0b3AtY29sb3I9IiMxN0E3QkMiLz48c3RvcCBvZmZzZXQ9IjAuODg2MzY0IiBzdG9wLWNvbG9yPSIjMTdBN0JEIi8+PHN0b3Agb2Zmc2V0PSIwLjkwOTA5MSIgc3RvcC1jb2xvcj0iIzE3QTdCRCIvPjxzdG9wIG9mZnNldD0iMC45MzE4MTgiIHN0b3AtY29sb3I9IiMxN0E3QkQiLz48c3RvcCBvZmZzZXQ9IjAuOTU0NTQ1IiBzdG9wLWNvbG9yPSIjMTdBN0JEIi8+PHN0b3Agb2Zmc2V0PSIwLjk3NzI3MyIgc3RvcC1jb2xvcj0iIzE3QTdCRCIvPjxzdG9wIG9mZnNldD0iMSIgc3RvcC1jb2xvcj0iIzE4QThCRSIvPjwvbGluZWFyR3JhZGllbnQ+PC9kZWZzPjxnIGNsaXAtcGF0aD0idXJsKCNjbGlwMCkiIHRyYW5zZm9ybT0idHJhbnNsYXRlKC0xMjY0IC0xMjM4KSI+PHBhdGggZD0iTTEzOTQgMTMwMi43NUMxMzU4LjEgMTMwMi43NSAxMzI5IDEzMzEuNzQgMTMyOSAxMzY3LjUgMTMyOSAxNDAzLjI2IDEzNTguMSAxNDMyLjI1IDEzOTQgMTQzMi4yNSAxNDI5LjkgMTQzMi4yNSAxNDU5IDE0MDMuMjYgMTQ1OSAxMzY3LjUgMTQ1OSAxMzMxLjc0IDE0MjkuOSAxMzAyLjc1IDEzOTQgMTMwMi43NVpNMTM5NCAxMjg5LjhDMTQzNy4wOCAxMjg5LjggMTQ3MiAxMzI0LjU5IDE0NzIgMTM2Ny41IDE0NzIgMTQxMC40MSAxNDM3LjA4IDE0NDUuMiAxMzk0IDE0NDUuMiAxMzUwLjkyIDE0NDUuMiAxMzE2IDE0MTAuNDEgMTMxNiAxMzY3LjUgMTMxNiAxMzI0LjU5IDEzNTAuOTIgMTI4OS44IDEzOTQgMTI4OS44Wk0xMzk0IDEyNzYuODVDMTM0My43NCAxMjc2Ljg1IDEzMDMgMTMxNy40NCAxMzAzIDEzNjcuNSAxMzAzIDE0MTcuNTYgMTM0My43NCAxNDU4LjE1IDEzOTQgMTQ1OC4xNSAxNDQ0LjI2IDE0NTguMTUgMTQ4NSAxNDE3LjU2IDE0ODUgMTM2Ny41IDE0ODUgMTMxNy40NCAxNDQ0LjI2IDEyNzYuODUgMTM5NCAxMjc2Ljg1Wk0xMzk0IDEyMzhDMTQ2NS44IDEyMzggMTUyNCAxMjk1Ljk4IDE1MjQgMTM2Ny41IDE1MjQgMTQzOS4wMiAxNDY1LjggMTQ5NyAxMzk0IDE0OTcgMTMyMi4yIDE0OTcgMTI2NCAxNDM5LjAyIDEyNjQgMTM2Ny41IDEyNjQgMTI5NS45OCAxMzIyLjIgMTIzOCAxMzk0IDEyMzhaIiBmaWxsPSJ1cmwoI2ZpbGwxKSIgZmlsbC1ydWxlPSJldmVub2RkIi8+PC9nPjwvc3ZnPg==)](https://mrphys.github.io/tensorflow-mri/tutorials/recon/cg_sense)\n", - "[![Run in Colab](https://img.shields.io/badge/-Run%20in%20Colab-128091?labelColor=grey&logo=googlecolab)](https://colab.research.google.com/github/mrphys/tensorflow-mri/blob/master/tools/docs/tutorials/recon/cg_sense.ipynb)\n", - "[![View on GitHub](https://img.shields.io/badge/-View%20on%20GitHub-128091?labelColor=grey&logo=github)](https://github.com/mrphys/tensorflow-mri/blob/master/tools/docs/tutorials/recon/cg_sense.ipynb)\n", - "[![Download notebook](https://img.shields.io/badge/-Download%20notebook-128091?labelColor=grey&logo=data:image/svg+xml;base64,<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->

<svg
   version="1.1"
   id="svg55"
   width="24"
   height="24"
   viewBox="0 0 24 24"
   sodipodi:docname="icons8-download-96.png"
   inkscape:version="1.1.2 (1:1.1+202202050942+0a00cf5339)"
   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
   xmlns="http://www.w3.org/2000/svg"
   xmlns:svg="http://www.w3.org/2000/svg">
  <defs
     id="defs59">
    <inkscape:path-effect
       effect="fillet_chamfer"
       id="path-effect3740"
       is_visible="true"
       lpeversion="1"
       satellites_param="F,0,0,1,0,4,0,1 @ F,0,0,1,0,4,0,1 @ F,0,0,1,0,4,0,1 @ F,0,0,1,0,4,0,1"
       unit="px"
       method="auto"
       mode="F"
       radius="4"
       chamfer_steps="1"
       flexible="false"
       use_knot_distance="true"
       apply_no_radius="true"
       apply_with_radius="true"
       only_selected="false"
       hide_knots="false" />
    <inkscape:path-effect
       effect="fillet_chamfer"
       id="path-effect3720"
       is_visible="true"
       lpeversion="1"
       satellites_param="F,0,0,1,0,1,0,1 @ F,0,0,1,0,1,0,1 @ F,0,0,1,0,1,0,1 @ F,0,0,1,0,1,0,1"
       unit="px"
       method="auto"
       mode="F"
       radius="1"
       chamfer_steps="1"
       flexible="false"
       use_knot_distance="true"
       apply_no_radius="true"
       apply_with_radius="true"
       only_selected="false"
       hide_knots="false" />
    <inkscape:path-effect
       effect="fillet_chamfer"
       id="path-effect3517"
       is_visible="true"
       lpeversion="1"
       satellites_param="F,0,0,1,0,0,0,1 @ F,0,0,1,0,0,0,1 @ F,0,0,1,0,0,0,1 @ F,0,0,1,0,0,0,1"
       unit="px"
       method="auto"
       mode="F"
       radius="4"
       chamfer_steps="1"
       flexible="false"
       use_knot_distance="true"
       apply_no_radius="true"
       apply_with_radius="true"
       only_selected="false"
       hide_knots="false" />
    <inkscape:path-effect
       effect="fillet_chamfer"
       id="path-effect3360"
       is_visible="true"
       lpeversion="1"
       satellites_param="F,0,0,1,0,0,0,1 @ F,0,0,1,0,0,0,1 @ F,0,0,1,0,0,0,1 @ F,0,0,1,0,0,0,1"
       unit="px"
       method="auto"
       mode="F"
       radius="4"
       chamfer_steps="1"
       flexible="false"
       use_knot_distance="true"
       apply_no_radius="true"
       apply_with_radius="true"
       only_selected="false"
       hide_knots="false" />
    <inkscape:path-effect
       effect="fillet_chamfer"
       id="path-effect3312"
       is_visible="true"
       lpeversion="1"
       satellites_param="F,0,0,1,0,3,0,1 @ F,0,0,1,0,3,0,1 @ F,0,0,1,0,3,0,1"
       unit="px"
       method="auto"
       mode="F"
       radius="4"
       chamfer_steps="1"
       flexible="false"
       use_knot_distance="true"
       apply_no_radius="true"
       apply_with_radius="true"
       only_selected="false"
       hide_knots="false" />
    <inkscape:path-effect
       effect="fillet_chamfer"
       id="path-effect1987"
       is_visible="true"
       lpeversion="1"
       satellites_param="F,0,0,1,0,4,0,1 @ F,0,0,1,0,4,0,1 @ F,0,0,1,0,4,0,1 @ F,0,0,1,0,4,0,1"
       unit="px"
       method="auto"
       mode="F"
       radius="4"
       chamfer_steps="1"
       flexible="false"
       use_knot_distance="true"
       apply_no_radius="true"
       apply_with_radius="true"
       only_selected="false"
       hide_knots="false" />
    <inkscape:path-effect
       effect="fillet_chamfer"
       id="path-effect929"
       is_visible="true"
       lpeversion="1"
       satellites_param="F,0,0,1,0,4,0,1 @ F,0,0,1,0,4,0,1 @ F,0,0,1,0,4,0,1 @ F,0,0,1,0,4,0,1"
       unit="px"
       method="auto"
       mode="F"
       radius="4"
       chamfer_steps="1"
       flexible="false"
       use_knot_distance="true"
       apply_no_radius="true"
       apply_with_radius="true"
       only_selected="false"
       hide_knots="false" />
    <inkscape:path-effect
       effect="fillet_chamfer"
       id="path-effect560"
       is_visible="true"
       lpeversion="1"
       satellites_param="F,0,0,1,0,4,0,1 @ F,0,0,1,0,4.1323252,0,1 @ F,0,0,1,0,4,0,1"
       unit="px"
       method="auto"
       mode="F"
       radius="4"
       chamfer_steps="1"
       flexible="false"
       use_knot_distance="true"
       apply_no_radius="true"
       apply_with_radius="true"
       only_selected="false"
       hide_knots="false" />
    <inkscape:path-effect
       effect="fillet_chamfer"
       id="path-effect249"
       is_visible="true"
       lpeversion="1"
       satellites_param="F,0,0,1,0,3.4641016,0,1 @ F,0,0,1,0,3.4641016,0,1 @ F,0,0,1,0,3.4641016,0,1"
       unit="px"
       method="auto"
       mode="F"
       radius="2"
       chamfer_steps="1"
       flexible="false"
       use_knot_distance="false"
       apply_no_radius="true"
       apply_with_radius="true"
       only_selected="false"
       hide_knots="false" />
  </defs>
  <sodipodi:namedview
     id="namedview57"
     pagecolor="#ffffff"
     bordercolor="#666666"
     borderopacity="1.0"
     inkscape:pageshadow="2"
     inkscape:pageopacity="0.0"
     inkscape:pagecheckerboard="0"
     showgrid="true"
     inkscape:snap-global="false"
     inkscape:zoom="17.375"
     inkscape:cx="-4.4028777"
     inkscape:cy="10.244604"
     inkscape:window-width="1920"
     inkscape:window-height="1043"
     inkscape:window-x="0"
     inkscape:window-y="0"
     inkscape:window-maximized="1"
     inkscape:current-layer="layer1"
     width="2400000px">
    <inkscape:grid
       type="xygrid"
       id="grid804" />
  </sodipodi:namedview>
  <g
     inkscape:groupmode="layer"
     id="layer1"
     inkscape:label="Vectors">
    <path
       id="path2779"
       style="fill:#ffffff;stroke-width:0.290655"
       inkscape:transform-center-x="0.031879892"
       inkscape:transform-center-y="1.0318494"
       d="m 11,2 a 1,1 0 0 0 -1,1 v 8.010254 l -4,-0.0088 a 0.29449253,0.29449252 0 0 0 -0.2016603,0.510254 l 5.6054693,5.226562 a 0.81189245,0.81189245 0 0 0 1.101562,0.0054 l 5.691406,-5.208006 A 0.2923067,0.2923067 0 0 0 18,11.027832 l -4,-0.0088 V 3 A 1,1 0 0 0 13,2 Z" />
    <path
       id="rect1887"
       style="fill:#ffffff;stroke-width:0.251577"
       d="m 3,20 h 18 a 1,1 45 0 1 1,1 1,1 135 0 1 -1,1 H 3 A 1,1 45 0 1 2,21 1,1 135 0 1 3,20 Z" />
  </g>
</svg>
)](https://raw.githubusercontent.com/mrphys/tensorflow-mri/master/tools/docs/tutorials/recon/cg_sense.ipynb)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -1011,7 +1001,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Copyright 2022 University College London. All rights reserved.\n", + "# Copyright 2022 The TensorFlow MRI Authors. All rights reserved.\n", "#\n", "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", "# you may not use this file except in compliance with the License.\n", diff --git a/tools/docs/tutorials/recon/unet_fastmri.ipynb b/tools/docs/tutorials/recon/unet_fastmri.ipynb new file mode 100644 index 00000000..52f817cb --- /dev/null +++ b/tools/docs/tutorials/recon/unet_fastmri.ipynb @@ -0,0 +1,642 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Train a baseline U-Net on the fastMRI dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import functools\n", + "import itertools\n", + "import pathlib\n", + "\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import tensorflow as tf\n", + "import tensorflow_io as tfio\n", + "import tensorflow_mri as tfmri" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# Proportion of k-space lines in fully-sampled central region.\n", + "fully_sampled_region = 0.08" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# If necessary, change the path names here.\n", + "fastmri_path = pathlib.Path(\"/media/storage/fastmri\")\n", + "\n", + "data_path_train = fastmri_path / \"knee_multicoil_train\"\n", + "data_path_val = fastmri_path / \"knee_multicoil_val\"\n", + "data_path_test = fastmri_path / \"knee_multicoil_test\"" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "files_train = data_path_train.glob(\"*.h5\")\n", + "files_val = data_path_val.glob(\"*.h5\")\n", + "files_test = data_path_test.glob(\"*.h5\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "# Spec for an element of the fastMRI dataset (the contents of one file).\n", + "element_spec = {\n", + " # kspace shape is `[slices, coils, height, width]` as described in\n", + " # https://fastmri.org/dataset/.\n", + " '/kspace': tf.TensorSpec(shape=[None, None, None, None], dtype=tf.complex64),\n", + " # the dataset also contains the root sum-of-squares reconstruction of the\n", + " # multicoil k-space data, with shape `[slices, height, width]` and where\n", + " # `height` and `width` are cropped to 320.\n", + " '/reconstruction_rss': tf.TensorSpec(shape=[None, 320, 320], dtype=tf.float32)\n", + "}\n", + "\n", + "def read_hdf5(filename, spec=None):\n", + " \"\"\"Reads an HDF file into a `dict` of `tf.Tensor`s.\n", + "\n", + " Args:\n", + " filename: A string, the filename of an HDF5 file.\n", + " spec: A dict of `dataset:tf.TensorSpec` or `dataset:dtype`\n", + " pairs that specify the HDF5 dataset selected and the `tf.TensorSpec`\n", + " or dtype of the dataset. In eager mode the spec is probed\n", + " automatically. In graph mode `spec` has to be specified.\n", + " \"\"\"\n", + " io_tensor = tfio.IOTensor.from_hdf5(filename, spec=spec)\n", + " tensors = {k: io_tensor(k).to_tensor() for k in io_tensor.keys}\n", + " return {k: tf.ensure_shape(v, spec[k].shape) for k, v in tensors.items()}\n", + "\n", + "def initialize_fastmri_dataset(files):\n", + " \"\"\"Creates a `tf.data.Dataset` from a list of fastMRI HDF5 files.\n", + " \n", + " Args:\n", + " files: A list of strings, the filenames of the HDF5 files.\n", + " element_spec: The spec of an element of the dataset. See `read_hdf5` for\n", + " more details.\n", + " batch_size: An int, the batch size.\n", + " shuffle: A boolean, whether to shuffle the dataset.\n", + " \"\"\"\n", + " # Canonicalize `files` as a list of strings.\n", + " files = list(map(str, files))\n", + " if len(files) == 0:\n", + " raise ValueError(\"no files found\")\n", + " # Make a `tf.data.Dataset` from the list of files.\n", + " ds = tf.data.Dataset.from_tensor_slices(files)\n", + " # Read the data in the file.\n", + " ds = ds.map(functools.partial(read_hdf5, spec=element_spec))\n", + " # The first dimension of the inputs is the slice dimension. Split each\n", + " # multi-slice element into multiple single-slice elements, as the\n", + " # reconstruction is performed on a slice-by-slice basis.\n", + " split_slices = lambda x: tf.data.Dataset.from_tensor_slices(x)\n", + " ds = ds.flat_map(split_slices)\n", + " # Remove slashes.\n", + " ds = ds.map(lambda x: {k[1:]: v for k, v in x.items()})\n", + " return ds" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-08-05 10:46:04.414626: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F AVX512_VNNI FMA\n", + "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "2022-08-05 10:46:05.491923: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 22290 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3090, pci bus id: 0000:65:00.0, compute capability: 8.6\n", + "2022-08-05 10:46:05.493531: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1532] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 22304 MB memory: -> device: 1, name: NVIDIA GeForce RTX 3090, pci bus id: 0000:b3:00.0, compute capability: 8.6\n", + "2022-08-05 10:46:05.767432: I tensorflow_io/core/kernels/cpu_check.cc:128] Your CPU supports instructions that this TensorFlow IO binary was not compiled to use: AVX2 AVX512F FMA\n" + ] + } + ], + "source": [ + "ds_train = initialize_fastmri_dataset(files_train)\n", + "ds_val = initialize_fastmri_dataset(files_val)\n", + "# ds_test = initialize_fastmri_dataset(files_test)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "ds_train = ds_train.take(100)\n", + "ds_val = ds_val.take(100)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqgAAAKaCAYAAADyCqv6AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOz92XIjSZIlgKoDIEiQ2AmuseTaWdXd0tLdDzPz//M4IiMy3T3TU1VZlVkZGREM7sRKECTg94FylAcn1Bysufcm8RAmQiHgcDe3RU316GJqWZ7n9qV8KV/Kl/KlfClfypfypXwp61JKL92AL+VL+VK+lC/lS/lSvpQv5Uvh8gWgfilfypfypXwpX8qX8qV8KWtVvgDUL+VL+VK+lC/lS/lSvpQvZa3KF4D6pXwpX8qX8qV8KV/Kl/KlrFX5AlC/lC/lS/lSvpQv5Uv5Ur6UtSqVoh+r1WqeZZl/X7XjP8syy7LMFouF38/Pm5mVSiX/Hc+gXjyP7/o+1I068R/X8GypVFqqQ98RvZt/j/qZek7HBu/Xe6M+al+0DTp2uLZYLLyPqfnRsdQ+8jxFJTUO3I6ozWZms9ksCx/8DcvW1lbO45gaKx1jXIv6lWWZzefzpWcwDxGd8Tuj34ragDnmNqHdKdrQgudT9Jdqh37X+7GGQb+p9VY0trwGyuXyZ2MY0W+qn6l5Vf4QjQ/zKjOz6XT64rS7ubmZ89zxf7OneeV+6drX73xdeVTEj7neUqn0Ge2n5lfbkqqT54fbwL89p2+6xrgufR/TmPYvegfTktJ40dpdtT5RHz4vFgubz+dLv2H9Y451ffH8rQvtbm9v52hLRJs65zwXKUygtFrEq7QO5qGRLNC6mAbBk/h3FKavqE7MW7lctvl8buVyeek5Lqt4XNROpgHQIu7ha/gc8ecIp0V14Dvu4/mI5phpV9eJ8nX8L6LbQoCaGkBeONxBbmjEFDEo0b1RZ7gebo8WZTDKoLRETCnVV2U26LtOOL9PGaJONLeP69ax0rpwjRcs7uF7y+XyUhujNkX95YXKgkmFpL4fz6TG+yVKim50gelv+pmVH154eI7HU2kF86CCuFKpfHYv2qIMdVX7Uv1FnRHIjGhTaVGf0Wv8TPSeaP1G9IjxZEGs4Iw/8z0qlKKxUAEXjZ32ax2KChkzW+pzCqRx0fFR+tF3KDDkOUnxjIg/reLZWieK8uTUfET1aT8VYHDbdC3wGud+a1sjutf+R/wyarf2X9+nz0cgqUhOvkThNkW8KTWfER/m8cS1FF0p79TvrIhGxp0Uv9K5j4xCKVrkuYnwCNNNCsxFfda+8HVci2RYqu5oTvR3XSPR+o9kIY9JpGw9BzOsdPHrC1UgaWGhkxJsEXNaRSwRs9H6wMD5mVQ7ozbooEYTUQR+td3M8KM2F42h9jV6XtvBRAqiLBIsKYAStblIeGPO17EUzVO5XP6s3SwkI2WKgRV/XyWM8D9ScCBQlb64jiJrt75HGQu3KQKDkWDgvkX0VCqV/Flta0QPuEetIUrf+j5+TudG+8b1clu57Tz28/k8CSbWoUTAS/ugghz/WdHB/wgERb+bxWBD6VyfWcVr+H1ah3rWUuCMaYPnlekwMj5EAjpah2wljsBilmWfKZ4RkOLfUgp9xH95fenYo65ova0S9L9FieadCyyJfD2ac5WNKRCFd6LeFL3yXOm6iGgzBdzQVn6f1hu1T2lM8YryOqUhNhQVyZ9obXJ9yrNxH48JflcsFa0FHjPu9yp6V15VVP5mVKGNjP5HQi4SZPycN0i0Cb7O9fPzqHc+n3+mDes71KWHEi14nchoMaWYQ/S7jkVEuAqIuL6o7SnGz8RY1Ffcy+3A7+zG5XcpIykSQC9dFBjpfHAfeI6VCUSMAfelxje6rm57pbHoHqYbBapFc2Fmn7U3dX9KoEd9gCBXhsv3p5RYdY8p3SgDY8bG90YCIbUmI2asQoDve+miNMElz5+UGeUj+J3/p/hTBHjAY1KhR1pv9J6UcSKiNV2HUUgL04COB9Mw18O/mS0rM/i9XC4vAaYU/Ufj//Dw4O8t8mbpNW1Disdo+/laynIaAarfunDIT5ZlVqlUQp6GEtF4EV9i2c70ivAItfJFa1xpJOJTKbmJZ/h9qXtAt1n2BKC536VSyfEKnsH/lLGHFS8OCdF71FClRoGidYh6uS3ReK2iU7QFvIr7Hs1LqjwboEaClBvFJQJceq8yRlyLnkkxqIhBR2BKgaECA74fn1OWLL5XtQx+PzPHqD3Re7W9aiHSxRn1I+oDvkdzqAw+Yn74HFnFtH3rVjCe6o7TEtESnsfv/B2fmSmrsCkSUhHtoQ1qXeH7IuAARhKtNa5b10nquwpKMwuBdQpcgikW0QTfq/0v4i+RsFGBr+9lgZVyM6Xo4iVK0Ro0s8+EBPfP7HMLh679SEhHc1AUbsL8ScFg0RrT90RrhPusQp7fpdYwLbzmdVwYtKKvKnu4Xdom7ZfWz+3T+1IxlTzuKaEfrf91KkxPAGBs9TZbplf9zHRaxEP4GYwnwFCKz6tM5ftSADrVJm0H0z08c5VKxe/hfvPzEQ7S90fYoQh06j0RBuHx4LHXkCL1RnDbVcHDOLJlWHGUrvdVmKEwBlUZSooBasOje5VRFgEzvj8CTSmhpe1iQaltWyUEzJYFQVEsYsSgtW+pRcZBxUX3RoCEx43HRutItScSDso0eCy5Pr6mC2Hdis45xo+ZSxSHFwnbqB58j6wb0VxF9Kq/mX2+oVDbraBD+6q/8TylQAxfw39mYmpdi0oUQ6r1cptWjQP/zrFMqfdHvCUCIKhnPp+vZYgK05ryIPwe8UXlAwo0I6HHder6j0B8NH/KQ1AnxpfrLlLUovmPaDB6NiUfIpooug/tjdauCnPti85b1J8inqzhFcyjtJ3PEfK/VYnmJDVH6imNQq34Gb0WybAo5IPbofgjhSMiUBV5dlN8U7EC2o5+qxeJ26h1pjBTau1z+6M50HerrONnGNiqfEFdvB5wL9a74gZ+PsJeUSnkytGg6SKNmI42JrVIdbK1KMONmBZ/17p1MPi+FFCIhHQRSNB6QYQpARotisilq3XD9B69k+tN/Y46U2Oh71eAEjF5Ju7nEtxvWZRGtU96nYsyKfyxyyJy96sAw7siIRY9p23XdvNz+JxadxGjjZhN9LuCglVrSulYx5ZpKKI7bnsUQ8jjrfdzX1Kur6jPGm+1LiA1WmcR39J5wzWuJ4rZ5N+1DqU3HpdIGEcCVt+f4nnKK1PWb/6s90XySMePQVBKGHO9vHZSnjQez5QA1zWl7YvCjnCd5YyZLbmJHx4elu7lPq5T4X4WxWjyd3Z7q+VPrXupOrgwzUdrCM+nlHZdO9H64DYojXBbHx4ePuN/LEsUL2k/IprVtaVGOX2G+8Z95zbzM1HcalSYz+jzKgdSGCoqK3fx40X6PWI6/Hu0YPX5qHFadxGz1jqU+RQ9w//5XUXPM7PQxaN9iBaPEolZ8U62SBBFizEah1WCAwWMg60cUf18L7c7RfjrUFJAKsXQdDFHi3gVzfE7IysRvzeyKvEYY8NGat613XoNf8o4IjcV1xP1NWLG0Vghtoo9AxGtR3WhPOe+1BinrNUp8PJcRvlblgg4mpmnreE1GgE0riM1lvg9sgKmeBqXIiGqNKP0ngK6KTrmNkQApcijxfWrx0fHW2kpBRgioc/jGIHGSMBrvF8R/+S+RRbxIvDwWxWdE9CXet90fphGI08WCtM+6sLzfC2SV0W0wSWl7OM35mcqW1A4iw7TArv8eb51vXM7+Z7n8KgocwyPRQqkRu9MWXv5ewoDRH3h31fRu7e76EclEhWUPAHcUQxI0cIp6gS+o57ouSj+k5kDvz+aFH1vEcDja8zIUguJ369txm8Rkeq4p8atCKQUCfZoQUXvjn6P7tf6UkLmJQr3XxdNqj9cIq2yaGyjfqfGkGmB79M24zel5aJ4U+1byjKQ+s3MwsB2/dN28vtWjbdaNfR3baOOpc6LWTr2kX/X+qK2rFvhMVf+q/OhrviU4qSCSAUh34PPbEnhtqnw0TbzfVGbecyLwEHqeiQYFaxGNKPrMVqfSnuRvIrmhNvFn6NxT/UtJRfUyhut45cqvKZSIWjRetNYxwjkgJYR38k0pPwKJQJXykth+VMAx33CdVU8uG8ae8lzju+LxSL08PBcow4GuUwz0VgxXXA7lXdE6zsyNPF4R/yVlQhdI2r9XqW0rsILK138KeCB7xwIHQ14VF+qkdx5fbcytajuiLDRRm23Th5PqvbFByux6LQN+Mx16GLjceLxilLfpJiPvjsaD3ZDaRvBhCMmGykA3Be+V4HCOhQFT3xdaYrnVd3a+izuw39dqMoAlXmwcOHCtBExJmUw0bP6Xp7nlFWHizIlneuiUA4VzBHz1jnQ53XO9P3aV2b++qz2VxVX1KH0vw4lGh+zzxWZ1LpmWtS5T3lBcD/Xye5WPA/+BDpl2o8EorYt4mnReuK55bZrPVhf2v9onavQjOgNdUchKVGIRIrOeDzwOQrz4fHX9R2tT/49AiUvXbTP/B/jhRy1KmsZaKbAVEq5jeQtv1/XAls4dQ6KAB4XfkblrI4FX4vCR7jw2tJxVH6F71irKczF46DrJJLZakRTq/1z+xrROq/LVWWlBZUbyx2MhLyCSjRM61MGzHVFQjLVaW0jt00JTgdH644EIL6nGIheiyYvYtLROyNGw4xS3SC6qCLGrQKJS+RK0TbrXGiJ5mZdGKXSotKMzkfUx2g+NSZH6SEV88TvYCbJv4N5Fy1cblNK6zeLc3ymGI4ydBXOEcBOCe5onFR4P4dOlLExwFSwmQJpERjj+heLxdLO33Ur3F6mFV2Xuv4VyEVgC2PDcXGRINP5S9ECt1llAp7VNmsfFewpz2YQoMYCtYxz3dG11DgrfXM/I3pWpY1BFLc3RV8qV1Gv9inVTv79pcsqOaF8mEGLrkFNzYRrfE/krYlwB39mpYvbE6WC4jZyXfjjdG96b4R7+LvyKv7T8VGPLdMGt1/Xo/K8CERHeIb7iTHTMeG+MJ9Vvs7YJeXRKSrPAqi6UPSeyGWNknpWmR2/7zkAkjupkxb1IfU816MMCc9HBJaqK7VIo/FJ1c0MMbLspggsIi5laikgGwG5VPiGPsv3rAuz5DHQecXnlGuM50nHVy2sCjrN0m46PM/xWbg/9T7cVxQyE82HKjo6v7oedA0wA0xZe6P+RuOvQpzvVSAV0W3E7CN6jfoePcffVSCsW+E+MfCL1i3zQeUzZssCHvHNPA8KbJWPF3mWeK75GdBuZHlN0Yv2XwFOJLRVWdP5Rlt0bFTY8zgo71Ra4boUPKscidqktI9rCnQieYNx1+dfskRtUMVKc3+qTMT3KEeo2eebiVJ0GL074oEMnpT3mS3LR72mdBndo/w/hRN0PXO9KKk1gvHielDYe1xk3eff9LAK9Tpp21NtKpKRKi9SZaWLX0vE0CPhg/9szsc9qlniTxF/NMH67tRgKWEp4+Kik8XPKJPid+iCSBGQvjeK5Yrq1nuK3LQqqLgvqT5EzDxaZNw/bm/U9+cQ3W9RdB647coglcFHoEwZjjKdiGlGDBh14X9ElyzozB4ZRsqFkwJyOpfRGkoxQBUW0TUd04iGU20ws88ABuhbrVQR0EiNgebKTK111JOal3UpKRcg05q65VGK1mKULzKay79FKEVjyeBN6TwF1iJ6TlnodZx0vfFnBY1FgEKLXkspQLruI37B7Unx4KhOnhMdF5VDL1Uw1/gf8VD+zyWyEqoMTLnRzT7fcPocGRXxMtTJ9BaFsPBGaX6e/6vSpN+Zz0fGujxfDvmL+CIK6ogUtxQN6thH84H7i+ZV+8TP6DjzOD6HZgt38bOmm2J4ej1FAKgvAlmpTqaKEpq681JgCYPG/3mgeIK5zVE7I2amBKSLgyct1dcISPD9qwBDSrCrkNG+KrPkMYkWRQTGona/dImsJmafW1X5PrPPmRSu8X++L89z32Udxeswk9IYJLyDmUr0Xi26niIrbmo+oro59KBSqRQywxT9pcaJ28TCJFqHqdCSiL65bgWv2kadawUq6wJQFWQpCFGXfURnqMfs83Wb2j1eRCspQJei1SikIsVnI9AYga5V62+VfErxQh3r1DvY9R55P6J2pmRCZIyJPkd9SPHely5oU1HfeKw1dDA1NkoL/JueIqU8PGpP5MFS+af0yXOpfEXnN1IaNZwo4vMpA5TySR5XxVopfKWbrlQuRh6VIhwVyUctuI9TZEbrpag8K2CwyH3AAx4t/KKGc9GJT92PwVWzczSpRSAymuxo4eN7FLPH7dE+p5gh3xONgdat31cVXYjav4gJAFzxuOF/pLVy/1jgrQujNIuFg/4WLUb9Q9F6IrqKmKQWVaZSdetaUEaBz9Cwo9+UyfN/rjeiOxXWOt/R+KQEEcYkAlDRePKaKorjYwuS9p2LjrcC2XUT9Cjqco94CUpKePHzEbDC76p04H7cm/qfsgrxnJvFO/QjIRjNhcZsr5o/5Z86JvxOpf8iL5UqNZFg12e4HWqJW+UR4WuR16fomZcqOj7RNf7OfCyyuKmbX8Gfzj2wQbSWUzglajvXqesBJVozKEqDkdVdx0j7pWtfn9N+pELWuC/wMhV5HaL2cL80pDMKr2GLa6rv/D9VnpVmqig2SQV6BHAi4aiLNao7ErqR8NbJi35LTSi3ITUpKqgj037UVn1PCngoUUYLmu9NgVZd9FyKgFUU7B/dx22J4o7XiVFyYSFslgZ9UYA3/kcCWNeB1qnMMGJ0KhxTtKx/2kazdDJnrouf0/nS+Y+YuVoBuEQuLV37DDhT7+d6FFDp7ylBz+MUxbJF87RuJaJb7ZMKVAWcSisRD9P51P+qpGo9Kb4RxXJG/BXXU4YQpZUUuFi1RvAOXSORDFN+FwEGnhelJX1/anyf88d1RHIsNSa/ddG2ch90HHU8wSuYZ6BwXZHyr3Oh/FllqtkyTfNairwwin+iPkf9x/261rhPPCaRnI/WWLT+IhrTdRfJdQ0viMZWnwGm4zbw3Oh6jdZlajy1PMuCmiL+6GWY0FQHMVnRpEfCTxkFv0fbGDHqiHkyIGDiLAoijhi99jvF+NFvXZx5/mi5hFYTEYmOJ4+3jkcRIy8SDCkBw+OXYozrWlKARRdzNGYp5o9FqPXx+DFdRwJX4yxVeGLx83hH96MubXOR61qZo1q4eK7xXy20TMdF61zfH4GVohKteZ4D7Zv2U/8i6wcKu7zXoajwSa1dLjynallWwRc9y+9WHhUpTqk6NbYyAglMB5FwjEAMF3ZXRjSm60Tfw+0uEuo8BszbVTnn+82WU8WxANf1F/U5Eu7c3qIQuXUoOq8p+tWxUX6KcYh4HOqP6os+R/dzexUX8DuU35h9vunTzDw/a1FoShFAi/CGYoZUnVw4RjSS2Yqnov+RvIjGjOeaFYcoLBBzGcngorLSgprqTLTYo2v6nDI7b0jp85ycZp8H6+J/RPwRc40YkrZZ28l95O/chojx8ncuef7kQo8YOLcj6pO2j0EDMy2dLy4p5pwqqX5EBBzN+ToUFvIMUlLjH803f47i6hgARYxF648YkjJltfKssixFDCjqJ19LPcv/dX1E4xG1hYVPJNxTjC/y1BQxTAUC+ItiLKNx4N9YGK5DUcH8nHg1fFeglgJvKR6tvE4/Y9yjuWKaieIBo/Hn9xeBctzLZ33jGfxe5ALmMdOcm7p++d0KutRiFAEbta5Gsof/ImChfUit81VC/rcqHIJhtrz2ioB1StYqDUXyWXnVqraplTaiQQXX0e9QklAvhz1F/F7by0qk8vhoDUfrlN+RokUdE/UaR8/o+uE1Fa3LaOwjmuf3PhcrPGsXv1YWxTGlwBwzR1yLBiJ6d8RkeEJYK1bBm1rkKNouFYQMaLSkBj9FRNEEc78j61URwaSILsXUI5Ch/Y7mT90tfF80nusk5Nm1zG1OafYR80NRutDruF/drhEDxX1Kb9pufof+6Tyn5jZ6L/8eWfuV1vj5iPYi5qlAiO/VvkfrAMw2pcyirqJ4Sq27SFhE6+Mli/KeIqGgY6RWnxSN4z34H4GfFP/X+xUUqmA3izempKzqKZri/1Fb1APBbVHwobuj2XCgfE/pdZUsY4Cha+g5wl0Bhz4f8Zd1KNF60vbi92judVyVX0Z14R34rzxI61blSuvQz6lwIj3EQt+fkuORhytaW7qGiuY6khmoU5VEHQ/llSzHlQ9o21L8ISVLdX2syvlt9jfEoHKn1L2inefvPCn8bHRfEbPi5/W6TnhUnxKuEouWVJ1KOLpo+H/EjJQoI0IuGo+U4I+e4fanmCoTIn6PFr+2QUF11N+XLiyoojhEFkz6h5Lqa8Q0uX4VfquEGtZYdNZ6NIcKylCHzr/ObRETxztT1p8o9lqLCiYtUbt1XJjP8Hem7xSDV4WqqL0Ro12Hov2O+AnmQ+/Fb+o657lka6ECMfzp80V1RW2IaDNaZyrki+g1JRD53bgeJXXnVG2pNaTtisY9al+qjbwOdD6Z/3BReavjou+LYrBfojAvU8VJsURklIm8IdHYK+9I8VeV/0xrurH3uUqtWjbZEIJ1E/FPBbq4lnp/xMe1X9pW/E8ZHtSjpbSDZyOaisK5UvTJz6Q8xs/luYVpplJMIBoU7gR3lBdhKnaHGWJEICnQmvpdByTFCLTtRcSJ/8z4ePK1nkgDTwnv5wjVIoLEf2XSWqL55L6seq/2W99b9O7fuvDu2qJ510UYua31uWjeuS4VXEWCm/PppeaYiwqpov5FIQl6TzSPbO1axbC1bdHajMZafysSstrPiJ8UgRY8w+MWzV003i9V1CppFgs37UuK1/B4K1Di+iNQELVN28FrB+9gIBzVx+1OzYW2iV2jLIyjuNuUAI3WcLQOonbrb2qt5XfrvQocIqCSmosi2l4XutU51zWHe1b1iedE4xmjsdYQt4h2orhI/sw0q9cjOo3WZ54/ATvgBOXXXJTPKt3w+5iX6xhz2yOZoO2PZAaPTRQrqvXoHGi7+Xf9nLonKiv9sdzAVUJEY8LwbGS90mB+Jo7oHRGRaoC1/q4DlKqXJ1frwn9lanyfWq6K3NwgAB3fVfFH0Vjo75FAi96vxBRZMVLv0THh+1YR229ZIlAYCXv+4zilCMSYxRYVPF+kBGkblM5UodPxZuanY880pHRZJDT5Gr4Xxbtqf3SstU38nkjYRvQarZ0UA1RAlGp3tK6Vdovq+K2LClOmraKUbko7+lskMPmP+RfPqYZC4b/ykhRd8vfIspaSKxFf4nfAnc9F+8IAIxW3qrJDx17HU9ekti96Vv/zWLLViscq6ndqjtaB9+pa5rGJYi1TMiXiyak+6niuAmD4rHQCC3t0jxqbIo9BFAcd9Yvd2rpW+KAR5ZURb428bjoOfF9E2yorlA5TckH5A18rso6n5iRVnnXUaUrY8wtUi+G8mipAuZ6osRqbEHVYCQfXlJHic4pgUH+KeSqxR0xH38fjEDHLCPgoM+f6o/GOwiy0Hm0jnuP4JrO0S0mZjLYtYgjrIuR54USAia+rMOXno/E1i9OUsMDU+UwJsoge+JrSD37nuiKBym3DcxzIr+9nRhXRdZGg1e8qWLhE9KN9i9aijqUy4dQ8RbTJ7+YsGilw/lsXbYcq8xGd4r4Uf8G9Zpaca7VG4pnnjO1z4+DVRYprUUgCPx/xf64n9TzLCa03BQS17RGdRjTKz+iZ70XgKorHTMky0Gy0bl+6KM9M8Qp9xixt5FBLpc4znmHaitzlWiKlK1WKZCzaEfGrCODykaTa1ujP7PP4bR0vpffnAD/Uq2PEioTiJX2fvjPCOxgXxoP8zCq6ffYmqSLwlgJi2nEtEeFEAIIHLgXaGDjw90hARs8xiIj6FYEY7Ss/EwG7yOWaiiHjeyJGFsWO4XOK6PlZbQ8Lfl183CYlVH5PxOBfsnCbFZSrtV/HIpr/1OJTmogAgtJQJHD5Mwv8VKA7vgNgcX3cxsjSyLSkc6c0qmBF6UTXFJcINKlQTbmmFFiokIpoNRL0+pk9L2w5WRfaVcGQAm4poW62vGtZ+Wk093hfSrjxHDI9KrjS8dc517nm9ah0FH1nMFvEy1PCnu/TMUmNbQQM9LMaUSK+Ga2rSFYwf9e2cszvuhZtu/JRFOU3OpfKN1Ci+cb/LMs+W88RcNQS8dgI4xTJuShUT8PGdC1HmEBlPOoALfLzDMpxncMQmV/zukEIHO7Bd70vNc4cX6pjwnyiaMx1bKJSGIPKJapIX8qN0utKaNFCj4ROyu2RitFKMYmICafaywIsEqpKVFp/1Bdusz7HxBxNpPapSDBF11ctcP69KG4xRWhFbX+pwoKPi1o5zYqBkFmaVnnuotjVFHjVNupnvLvIAsPXNNYvJRT5t9S64e/aPqWNooL2RzF6WlcKfCndq9DStRzRX9R/XdOpe1+yFPFHFO1/0fdozCKgyEoDGwUglJTmUE+0ZlL8WYUgr6FIMOpnrjulJBZZZvTeiN/zvRFd6NhFNKXyB+1llz4DCw7fSMmBSDlbp5LiLyjKU1NrUA0KymeL6BzXFUsgPRlfVwORrhOdtwgzRFijiO+n2qj8kYEe/lcqlc/WHrcpksPcZ9zP4JeP5y6i3/l8vmQM4bGJ+hWNh66XVTx3pV8gYhbRb1hwKaEIxM2NxLPaYAVtTJzc4dREf9bJwNrIk4r3qQakTEeFI98btYP7EgngVCkChZFQ4f8KvvR/tLCjuKDUAksxDLP1A6mR8FDa5DnTPkbzGIGoCPREVsvU4tc6tH26Nrj+qOjcav1/C7BTgaG/m8UuSlyPXHL83ufGvOKarg2tJ9WfqG/8PeWufami9AarSDSWZsVKCfO3lIdJeVektKX4l9I4WxPV7YrClkAWmmoR4nfwf5UnTBt8XWVGZN3j8dHx0LXC96bmI7J+6rhxWzjsSu+LhLnK2XXhudzmaP6UZlL8WJVa/KW8XEXGs+esaabPyAoa0Y7OufYlos9ITnB/lYaj0BSEabGhhcdKc6RHwFTxUIp/a2H+EcmPiNdH60djrgvfufIOiy2NKrSLBLEKkSL3RcQMuR6UIrCqJQIgugDwmdurDJsJqShoWBnKc9sWLdgIGKYsZikgG7U5YhYoGp8TjRGeY43+b7Gw/f+7RPNrFtNRSqjjdy6pWKNo/nUhKm1F74vmEu/V+6N5S4XS8Hu1/1p/FPrA67xSqXzWZl1bqXWshYVRRDs6Xqk4y8gCEH3nOiIhsw5FrctsddPCAi21pgGANMY04odm8dqJAC3fw8InWl/8nddQChDwNQaDGtupbUn1h8dC61eaTQGQ1NjruOOabl7h/quxRtdMNPZ/i1X4pUo0F7zecC0au8iAxXXy/ZHlXL/rtej9qFMBYCT3uE5dn2ZxHLbSUsQnOcYUdMPPRmA/MgZlWfaZhVNxGuqPcEZqjtC3CICydTfi+SmPZcrzoeVZLn6eBHQm9TnP0+klogZhUNR1zgPGv/E7lNFye7ltOqDRQoiEXmoAU8AzJfyZCXG90UTz9YjhKhFpGxhQKCMvirNSgRb1OWL8EahYhxK1U4FWpGSowIjoIZo/s8/DP9T6nxLyEbPA85Gg0hL1BfVonE/Ki6F0l3IZMQPlfkUCh9/DzEutT9FzqTVVBBp0flJ94+upul6yKB1xyrRUYb4bWTn0f5Eg5+86nlF9KmSLxjr6HCm2yvPQLwUELBx57Lge3KOyRPuVUgSKaC4qEc0yHf4tMc9qjODn1o3/RmtZ6Yf/R3J9FQ2m5HVK5qXGKHLTAzgxHURrhtdaFPKnfde2c0m533kstB06xto+HreoLXDrp/qW4svKV1K8oUgGME/j/qTKswBqiki4UWYxc+DnuQNaR4qp6m88AKp9M/BQhs7XixhU0YJQwuHx0f7q9SLgU8RoigBQdC8Te8RMo/GNQJh+jxYIfousIC9duN1RHGQE5Pk5vZ+fSdGHvrdISKkHIKLrFJPRosw+1U9lsHmef7YjOFqr+KxjhLZG9J4ax8jSWcRbdN3pulGeo2Of4kHcF7320iVSKiIeYhYDGBSdh0gIcT38nD6v3yNeGc1pUdhPRBPR/KcEaVTHc9ZyysuglqWiMY14gT6n7dZxj/hqqs7UeKxjQdsVLPJvoItICU8BIJU9OpYKdrk+foaVb36GP2uYiPK5FFjjfha9GyUCylxH0W/ROzmelHl7xAdVbvCYpTzEALcRFuJ3qackmk/tU1SedZJUUUO0A9FLwaiiRRcxC32f/sb3RMwgdT/3Sa9pW/l6aqHwIkn1O2JAZsUpYXBdgV/K0sDP8fcork5jW6Jx4jHAM9wuPBsJi1UE91sWZjypNkZCA8/if0pA8jyl1olaZKIFqp6AVeMYMUV9dpUQ4/CD6NkoDozbr33iceL28G8qCPg+s2Wwy8KNBYrWZ7Z82EHEFLn+FM9KudBfokQ0lqKP1JjjexSbyzzIbDnkSteM0hFcoHwqk/IddXUqLTCNrVpfUd+YN0b8FyltIoGI9um1IuEf3as8NOL1Cmi4T5G1W+dI5U/UvlQbX6JEvEFlWJY9uqGjs+vNPt98hxIBQv3PIEr/R/MTuZ+Z/qGwROEzqFdltYa8oW0IsdH28pjxM3w/j5HiB/6sNAQr5XNCJ/BdQwT098gry7+rB0XbxqFG0Tu0FAJUdZ1wpcoYI0HMvyuz1N94AiJCx71clCkqU4qEnbZ/FVOM6lNCiKxU+B6BRBBCJHi4nbpAI2arpWiMontTQAaLLhIyq+Lh1qFEC1bnIwJaXKJ55/+oK9I2o7lnoc9/Gj7D46tgDPOiTLaIURe1veia0iP/V0uogg4dQy4pYKJWwxRI4YKxSlmdo75zX6L7XrpE7UjxiBR/iZ7XeVGgGoEjXeupOMhoreF6iicr31VgqDTCn9VqrmDA7Amoak7ulLEkar+uVb0vWqe6DlK8gGWG1h3RPephEKPA5SVLtM6jNcfAENdTyqmOQ0QPOhc6PhEwZEONtq9IzvIzCrai8VB+jzqY5zFIxm8cp8n9jOZc6Yh/5/UQeZuYDlPyTn/j9afrBeOiNBnJjufQbaGLPwJdOgER0+DPkfs3qi8iZr5PJ4UHMiJu7YcyUO5D0X3KQLT9/FsKzD6n/zpW0bMKFIoYGV+L2q4lRWg6NlFMoo7BOpSUsEThOMgUbaDoGHL9KXCQEn7MIDn3XGqxsqtEBWO06IvoMWIiGAvcXzQGkbBI9TfqvyqRPC46Tik3LNrBLkIwbd7pbmafWUD0WS0p8LUuZRWoiuYnxRPwv0iJSdF2RFMpYWkWh2JxG/X9KSsazzfXpXIEBWtHcz5qW6J26PfnrqvoWlSn8p3UOk/VXdTelyq8tiJQg4LfiuI8ozoVYHF9/Dy7rbH+Hx4ezOyJ7ytIZuCFZyIAGq0xszhFmq4N/qyKBgAp8yi1rkdKO37nNcV/KrfRriIsBAs3Cq/HVXTNbS+VSlYul33seS543IrKs3fxM7FFg6HIHM/pM5F7OcXklCFERK6AkEtqAoqAIhOp9p2fUSJIAQytvwgMRZPPbdR4GR0HJuJUW1iTT5n+dRFyO9RtoJroOgl5jAf/1/GPhDs/nxJsqd+LxpEXZaS5ovCYaiyUMqA8X3a3Mn2kaDQFVrTfqwSMXld3TrSu9V70UetUDV0/MwMtl8tLQBX183jru7Q9Wv9Ll2jutKSsKVE9/B33KX9WvqC8UMc24h9ch64zBXmoC89FwK9IFvD9KXrTsKjUOtc6onWRGkesN12DXEdKdmVZ5vTL46FjGdWjvOGli46zrmEGfEozKTpnGlJaTLn9uS24D/Ww/GJFVemWZSm3I8IUTI8RDtJnmE4qlcpna4XHJbL06tiojFNFHetAeavOl2K5SM7pd+UjqAtjh/GGFyOynheVlZuk8GK1OOEa3xc9u0oYpBpYBLJSQFOf1Qko+p56fySguUSxRGhbCsAoKFHAgRK9c9W4oU2rGDmIRxm/uhz4WW5X6vq6FPSFQZO6pXEfj6lqwsqgdKHiHmZoem/R56jwPCmQjTwT2n5tn4YiYCw42F37g+8p8MNrgy0A/LwK6IgJR+MQjbG+k5+N5jNlGXzOvK5DUQFp9rlbWMc7GjfUE1mCInpaxYPY6sO0odZxzTwAGimyoit98DXEGUcgITVOqCf1Th27yGOUotnUfcoTU/1CuxkwRWud/+v88Visk2FA17vyn2g+ULj/PDY8X6mQpqL2oB5O/cUHIyiv5esRiEU/uH5+DjSrsgR0jPfwUctoE/qoFmaWY1EYBNNdiq9znyL+EfFIVkj1d15PugZ07iPZuKo8O81UVKkKc2VyKrC5PpToeiREoqKMiiegaCD0nqL4ziJmEzHT1Bjhfh0jZdJF/VVmFS3uosKEExFWxGhTc8jjoWOxDiVquzI3vtcspsvUszqOei363cw+Yy5Kp0pv2k6dB3Z/KaPVelK0zG4vbUuK3vl3ZtjaHx4z3mEaCW1dlzyPkbVQLUusXBXFQWkfot9fsqigSIEeuM9SoIvnNaqDCwMeHtdU6FUk9LQP0f0pIKdFf2OLudKI0g+EogKQqP6oHgaL0f1FPC+iX20jvkfWaf49MhSse0nJUAY4Cgbxne+HVZnHLHqH8gJNXxSFHfLvvFb0nlT/lHfiuoJC5s1cAEg3NjY+Wwu8TiKrPCuUvLa5pJRH7m/U12id87t4bUQhCPpfFVId72hutRQC1MjipI3QRawEow1X4Yp7dCD1vfjO72VCV4JP1aH1pRgrM2wVAMqQnrsouY/67oixRW3j/kVghJ+NmGBEGEyoq0IEtN9a5yqC+y1LxGxWgaJIQHB9KmhRFCClmB3TRMTslLGuGs9I2Kv7SoGnjgP+6+8RoGFaimhf35OySmGsIiCmYDS1VnR8VNilgJWuiQiIrVNRC4ZeN4uFdsQbI0GqY8slmh/+jZ+JeHOK90c8LGqHtlMFnY4D7xDn5yKLVKrPSncKWCM6UXpWWZFaf9H7+feU1wfvxPd1oN0iIMq/mcUAK5rfaO3rPbzmI2wQ8WbluxyvroXBGNznytv5vUzLDFI3NjZsNpst8Xn1duoaL1rvumZwnZV0DlnQ8dPPeJ7nRXktezF4biIAqhZzNVZE7ddSCFCf647RASoqCtbUtREB4NR7lMiKiDqKu1CQkhJoEbPU/jJhLxYL15K0TVHfdNGpMInmIQKZKWtn9JkBkc5JNIYKhFX46Di/dEkJIu5/dI9aJFPzs2p8uC5mkClhzExPAabep7QevVNzOirjToHkaJx47Suw4PfqGETMM2KUUYkEe0RfCoBSvITXUBQCE7X3pYpaHSKhxPSS4pMKklL1Y+wifpCaI25LZFhYBdS4XrXG4H8q7KDIssZWU/RV05Dhuq4Btb5H45WSTSlBjXem5CYD52gNKd3iXbzBMuJjL1EUABbxqhSGiHg218/jzbTHcjhaz9EaicZN6Up5nwIw5YmMA+C+f3h4sM3NTW9buVy2arXqz85mM383hwAsFgt7eHj4bI8BvCYR/SvvQ+FxStEsrjGtscUUm5107Bg7MG9S/KRt0XdH5VkxqApeFIxFWh4/GxEK359iXqnFvkqw6aLVhcLv5DqjiVVBnXJ5KYOJGKhqvLpYtB+pMUgt2tT4RcwxxYwjDS7qG9cR9XUdSmoR47cIpOlzKSGvrvoU/aRoisdUGUsEGCKGHn2PLJaa0owZO6/lonehKNDmPkUWJJ2Ponep9RljUgQiI96hfKeID6wj3So/KqLNaK1zUYGEP43vAx1qXRHAiPhjVFhuRK5BriNaZ1GfUR/Pb2R84DqYVlWB0b5GQDka56guBU3a9mguFFg9h0dHCsw60HEK/Ci/iOga9+pnvicC5cxzotSNqfHnd2g90RrStpg9bfxh4AkAubW15UdCVyoVq1artlgsrFar2cbGhm1ubvrYwLKK++ABGI/HtrGxYaVSyWaz2Wd8kdsfGUh0LcFSjBJZZPU57XuEJXTO2UIcraOUIS0qKwFqasGkhMlzra4gBjb1Rgwm1SZt26oBjspzGG8k5FIEob9Fk891M8CL+p8STLrIIsHL7+bfGRRp/1ZZP1Nt5BK5SF6icH9UgSqiN11Y/J1BndJ8qqTmBP9580fqvpRgjzTRCIRy4H3E1KL1yW3RaxE41ft1XBRQRDSvwkWv87uKmLGOA99TpL2vg5A3+3w80e4oJCLiscqDo/nV8Cp8Zv7A9M7zqfwdbVahyfXi9/l87kACzyldMt1zYXACYcsuzAggcfsiHsn95T4pHep1BiYMannMI76gbVUZodfgTcF4RLwM961DiWQeg6IInET95jGOZC3uSxmFuG6EffCcRfVH9Kx0zH/wkiJeFlbGWq1mtVrNr7FytrW1ZZubmzafz+3+/t7MzDqdjpmZ3d3d2dbWlj08PNj19fVn+XsfHh7cqhrx6SLAl6ItHsNoHiM8F41RJHd4DiIjYFSnlkKAGqFfZXhoQJEWGwkwHaxocFLCalX7ov+peiJBh2cit3nEmPX5aBFGgDC6nnpOGbq2N9JQo3amhDbfp22LrNZKC5HAe8nC7VDajAAU7uPQDF3wqlBonyOhxuPJMUEsFLUO7QP3pchirr9zidxvbHHg+rhE9JKiRb2m9Kjjpv1nWtJ1rvxH5zQSeFq4DREvSzH337pomxgwriqRhfC5/DTihXxPClhwXUW0UiSoovZoPdF9KgyjtmA89PxxPB+1IwVYojZFY50KldJxTM2pzn1qXM1WexV/y6K0puCPDVKqcOmcaZ34rzwnZYDQ8U3Rv9K0gjh+B/O2Uqm0ZPUsl8u2tbVl1WrVrad5ntvGxobt7OxYlmW2tbXlgHkwGFir1bKtrS2bTqcuGwaDgW1ubtru7q5lWWb9ft/u7u7cxT6ZTHxcOcco96mobxHGiOS+jlEkU3mOIvkX8Qt+3yqe9qxd/AqIosYz4+FO8zMqWCIghWejd3Fhwo8IlIsOEr+HB0qJM3q/En0EJotKakIiRr8qfgeAShm+9j16R4oYI2tGSoCrRv8cgvutirZVQUlUIjqJBBfHCEWCQxdrNAdqyU71IVU/6tDCShW+K03rGo1oR5k0zyuvdR4LthRov7lwfco3ImEf8YpIcYwEnPYrUip43NahROA7EiwRgIqsF3yP3q+gSmkgRad5ntvDw8OzXKNmcS5PlQuRhVBpLyU3Up+VNjQ8jd+v/eW1ovyD2xddT/FXbR9b97R/3IcojA73piyMv3VhF7KuUxRd35G80fApnn+OJzb7fO9FipbRNuWdkcyKPJuwkqLNlUrFFouFbW5u2sbGhs3nc2s0GlYqlazZbNpisbDRaGRmZltbW9btdr1uuOz39vasVqvZfD53gDufz61ardrbt2/NzGw6nVqWZXZ7e+sg9eHhwe7v7y3P00YGpg2UiN9jPDCuHFbG16N5ieSIjn9KxkTKQlRWniSlnYsWSoS+mZAisJB6nzJZXrBMyJEQjhh4UR1KwOgfuyS0rIphKwKJEUBiwcLMMwVwUgAremeqLUpQPGYRcFBmHPX3OcT2W5aUFY7byv3SouOkCpEuvJTgi8ZGBTTXHQE1rRdMToUT94vXoYICXpdM99wWBS58rwISHgfO6Zfn8S5dZaj8n8dzFY/hErXnOfXpeKxDYdex9oNLNA/8H5+VVop4FgspfYcqE/ysrhetl/lLCtQxaIjAHo8P158SgMo3U2Nk9iSvUm75CADotZRywHOAMeA6GDRFykCR7FKF9CULp5Hj9kRuddzDCq7ZsrJglu5fyhKfks16L+pVQI02cXwpNjVtbGyY2WNMablc9ljSu7s7MzNrtVrWaDRsc3PTZrOZPTw82N3dnVUqFQeo/X7fZrOZ3d7e2sHBgQPdnZ0d3wyF9wOEbmxsWKPRsKurK5vP51apVGwymdjt7a3d39+HPDvCGuhntIZ5DPi6KgNaj37X9aGYLnpPUXmWBVUrLepgigHwcyrwU0SYEkhFizFFyEVMmX/T3HmRsIvaiN+U+Wi/o7aqthe1LeUOUYH/t7Q19TsWSTTHen8KhL10USCVivVMASQUjXHiOvheFuzMcFF3SkAqw8a1qD88b5GLRYWjtiMCa1F7UmMYMRmtK9Uu/B6tRxUy3J5oHKLvRaAoGs8UkFmHEs1hkcDXedfrbB1MgbhVfDgl+Pkav5OFf0Tr0XqIABv+pwSd9o3rjEJ7ovmOrLRF9fCYRCApmoeoLTqWaqmK5E8kV9aF9ypdpXiLzgH4s/JSVsjRd1hB1eocvVN5TTT2qhThc6n0uHkJO/E3NjZse3vb29Zutz3O9Pr62kqlknU6Hdve3rYsy6xardrDw4Odnp5aqVTyMICdnR0zM6vX69br9by/i8XCKpWKzedzm81mDk6bzaaVy2W7vb21ra0tu729taurK5tOpw6iYYG9v78Px11pCCXCJ0yDaE+E25QfsKEDY64eNVV+tT1RWRmDqh1TBrJKEHNDIgahglffhfdoPfg9IvZo4aaEPv8WMRJ9LlVPJDiVwWh7ImGCEgFd7T9/LgKwOi5FdeGdfwsRrYtbPypKdynrhdnnwoL/p+g6YoSpMY+ELN/H33VuUm2IwBj3la1xbN1MAYRo/LRNRVablLBgEKEAjAVF5OaN+psS+qm5VRDDbVU+9tKFxyeyduM7+hdZnyIhgsL14ntULwuaaM742YjXKYDQdysoMDO3WHF9lUrlszby7uY8z5eEKWi0Uqn4hhJ18XJ96oUoGgNdv5GnMBoHXvvRfPD/IrlV9NxLl6gfDABT8l83UTEtqGyN6C3iQ9FaiXg331sul61SqbiFtFar2cPDg1tGq9WqbW5u+j2DwcBub2/t9evXVqvV3JpaqVSs2WxalmV2dnbmcaI7Ozt2fHxsZo8bogDKYWkFeL25ubGNjQ3b3d2129tbWywW1mg0bDQaWbVatXK5bLPZzLa3t+3u7s5ub299HSgfU56Y4tlm9hmY1PjWSEakxpTnEKVSqSzRANNHqjw7zVQERNU8rg2OiCJaVCmgpgMXCSxlhGafW71SQpyfiSYuGgdllKn6tG06BkXgg5+J4osi8BhZriL3L35LCfFUv6O2F/XxpUvKtcwlYpoYR411igQy6CwVA8YLMSVYwFQYQKVAGgt0rieK19J1ETH75wpJvAMAN9KCo/allCZ+Rt+REkpcivgK6kmtVX4ef5oz9qULtzslaBXwRHMQAVXUo/WmxjNaHyl+quPKfBhFwR33EcAUv8FyVa1WXQBvb2+73KlUKu7iRN1mT14wtiiBfhUgqmzj/jAoVYseg9uIrln4RrTHYRdsMeT5i4BVtE5Xya7fqhQp3imZyePF86N8lvvJlnW2+KH+iHb1P9+bZU9HjlYqFQebOzs7VqvVrNPp2ObmptVqNatWq1YqldyaOZ1OrdPpeGzomzdvrN/vO83d3d3Z8fGxHR4eWp7ntrOzY+Vy2abTqU2n06VxQ5xrpVLxjVVQ2BaLx3jX6XRqi8XCra+TycQGg4HHu2Kd8BykQoX4mlpGizza+oyCzYjfsFzjz6sMA8+OQVVhnhK+qWe500XCOgKC6GQRONLfFaQW9S8FXPR3Zr7KSLQvZp/nPVVmqO+JxoD7puBDf0dRRsq/8fhHxBoBImXmWt86CXeUiDbYipLqB58Uoq5Us89PyIjAl9JiEXPkOpU5RDQWfVZQEoHC5/xetP7UdcPPRyEKulb0/tQYcdHxZ+CO90TvjfgE1x+5c9ep6HzgGoqCGR1nCHsF3imeFhUFokw/PBcRL8cz+hy7AQHQAAyq1ar/Vi6XbXNz08zMP8Pi1Gg0lkIHEP+HPt/d3dl8PrfNzU0bjUZ2e3vr44LNJVhryp8V5KT4dzQuGAdWNnUcVOHltaZ0GAFTrgf95fpeujA98lpL9U/lJc8BX8dv/DkaT62LxzZSJECDsIyaPR4/WqvVbH9/35Whzc1N29zctHq9btVq1arVqu/Kv7+/t+vraweyWfa4U//09NTG47G9efPGvvvuO6vX65ZlmU0mkyXlC4rTfD63h4cH29jYsL29vSWDwHw+9/RT9/f31ul0rNVq+buxniqVivX7fSuVSu45wBqLDBcp3KPfdazR3mheUljk/5XPPhugojABqWCItP0U2ON3RIOi79A2FQFIfWdRf6J38rv5mYiR8fUioaJ9id4ZCRrcGwlV7r8uPn1fqq3RvdEYp+pIjdVLl2iutN9RHxgk4hrTdWRti2g2YpRceB1FYFnr4rFPgQymEwWBOr9K1ylBwvcrHeI9kYtUx0afV8CpfeR+RfFNfJ8CT+VR0dgXKYrrUCLGH+3YRVG6UKtiysKEZ5Xui8Yk4vUp3hsBAliqIKiRngcn7sBShRNzarWaTadT29ra8hg/M7Pb21vb2NiwVqvlJ+7c3t6amS3tjN7Y2LCHhwdPfI44Pw0J4PbquovWNI+h0mck+3QtRPPIZRWfXaewFLP44JaUfMF4scfqOXI/4kG4v0ge8WcOIQH4bLVafk+v17ODgwPb2dmxZrNp/X7fzMz29/dta2vL+v2+jcdj29/ftzzPlzZQLRYLu7m5sevra2u329ZoNMzs0dKJ5P0PDw82m828z/P53La3t21zc9OyLHNaRuzqYrGwfr9v1WrVWq2WzWYzu7u7s8ViYfP53CaTiVtdF4uFTSYTp2n8LwKfqXnifRvMXzQOGHXxe9TIEq0jjFdR+ZtiUHnRRm7NVLyeLqiUENTPKQHL90VMmp+JAIr2h/ur92mbUkIs1caof6nnU/37W4GwvlOZrV7TsU+BAXxWbVTrXIfyt1jruTCQSi0urlNjaCImwLSoQp0XPq8t9QBoHGDR+BcxZ24/W4sVnEa0oq4i/k37FJXIlcrv0PGNeA3Pawqo6vzo2orAR9Ha/a1LtMZTIU7M81LrkscuUnR1rletbaYV5enRPVn2GEcK62itVlvaCf3w8ODuVPw2Ho/t4eHB9vb2PA6wWq3aZDKxUqlk29vbVqvV7Pb2dkmQ46zzVqvlCdFLpZJNJhNrNBqW57mNRiMX4tPp1IFwygig66pI4Ko3ROchmkOeF7RDw2i4Tdoure8lC8vclLUZ33ksuP+R3NJxR9Gx43nUseF6YLVHmqhGo+GbnG5vb61Wq9mbN29sa2vLOp2ODYdD63a7lue5zWYzP9kJbdrY2LDJZGK//vqrTadT293dtb29PatUKp4iCnHTGCPs2h8MBjafz63X69n9/b2VSo+pqvI8t+vra1fMoJwhNRWS+W9sbFi9Xvc1dXNzY5PJxMMBIt5aRC+RnONnMK4ckhNhLS6KyZ5Lr8/exc8NwAtTQgq/R3GgWqcSXrRAU4CXO86/R7/pc1EboknRe1EiK2rUBn6HtilaUFE7onal2qrPK6hQMMFtXhXvpv3Te1KM+SVKNF7cRrhF+P7Ipa918P/oPfq7CnOeH2WuShdcly7uImEZPcvrib/rM5FGm1pjkZCO+h6F2+h3pqHUOOq7IiAOYafWRm1zJPzXhXaZd6Z4ga7HSMnQPkYuUb0vxT+UNzCPjngCfoPrEX/b29sOFAFWAQJKpZKNx2ObTqfWarXsq6++ss3NTbu5ufHdzJubm3ZxcWEbGxvWbDYdXPZ6Pcvz3Lrdrk2nU7eMYZ3f3t66ZapWq5mZ2cXFhZnZUm5JBfn6Wa1KbCnik610PrUunuco1IAFusoJVYrXpYAuUv3ldWn2eX8ZTOF5vu+5bcAz+p3BIayR9XrdNjY27OjoyHZ3d63RaDgtsOs9yzJXfnZ2dizPc5tMJra5uWnVatWm06mdn5+bmVmz2bTj42P3CmRZtpS3FMAOO/G73a6767e3t61arfopU5ubm7a9ve2J/9GXh4cH63Q6trW1ZW/evLGbmxsbj8d2e3tr5+fndnp6ajc3N+4tYJd/kUzHOOl1Hl+d02iMIxxh9nn4WiRnuax08acAiTY2ci1G4QBad8QoiwaRf4sEujJhvU/BoNabWghRn6P+6G/8HIN8bhvHRiqYTC3SiFB0/CJGG7mTWePVdmvdqP+54/JSZRXITLmJip7lOlJ0whYUpV+lR62LAWgKKOpnXfyq2JXLZd+NyaebIL6JBSu308z8OaXh5xTud2ThiCwfWjePQeSxiRgfX08x4BTgXif6TY0Jiq5b5bU6Lsyfo/HROcZvEdBn4aJ8E1ZSPod8e3vbXfFZljlAbDQa1m633Z1/cXFhWZbZ/v6+NZtNB6DT6dSBw+bmprs2t7a2fHd1u922h4cHBx79ft9TASEFz3w+t9Fo5Lknp9OpVSoVd/cjtyQDR+XXRRvqImBWJCtQdC1gTpUf/a0y67cuCoCiGOUoV6qG+vBv3Gem4YgfcOHneC6gKG1ublqj0bDXr1876KzX69ZsNq1er9t4PHbwWS6XPe7z7u7OxuOx7e7uOoicTqd2cXFhk8nEd+8D2IJeKpWKW+vhNUC4CSyr4NXT6dRubm7M7HHnf7/f95RXULyQ4grZK2AFHo1G3vfNzU27vr52WkJbihSwIpzANBiFemGOdN5TIWzPodtnxaAy4aWQsYI/1WR0UUbuKB6EKG6C2xUJoCKAGLU5ugffU67bSJBzuyOBqeOpAjLqY5EAjQS1glWtKwKlSkwReFKwm5qP6PNLloh2+Tp+02eK+hfRdxTbx2OGsdY5iuqP2rTqfrOn9B3s6kTbqtWqpz4BM8Ocg+mB0T48PCwF5oOx5nnuLi3E7UXu+sgirIImEhpchwqrVWFBuk4i/hTRMCsDAOzrEtO3KsZOeROvZfU+oZ/R5kAdB7Niz5ha7xWs8eYPuBur1apbenZ3d12w5nluvV7P4+tOT0+t0WjY/v6+uyux6cPMnEa3t7ft66+/tvPzc5tMJvbq1SuPPd3a2rJ2u23z+dzu7u6s0Wg4fTcaDbu7u7NqtWrdbtfG47EtFgurVqvW7/dtNBrZxsaGu2pB72bLcedMUxHQ1DnjZ3QuWc5E/JvXUyQPmU7Wwaoayb8U7+X+M5+I1n+0ZjWMgMERCvMOXK9Wq9ZoNKzVatnR0ZF9/fXXZma+OandbttsNrOdnR3b3t7252CBLJfL1mq1nA9Op1O7vr624XBojUbDarWa3d/fe4wp9xd/w+HQk/yjlEolB64IeQFoXSweU6bhmFSMwd3dndVqNbdMo76rqyvb3d31tFVZlnnGAYyLercjGa88h2Uaz01ElxFG4Wd1jlKlEKDi4Qgk6ffIxaQNToE4Jqwo/oYZwqoStVWFFwZXT9lRt1WRMNX6te5o8rQ9XI8uWH0H9y8C5/zuFGDUNqb6lwLwCtwjhWDdSqr/+M99U3C1ajxSwioCq9oWrTcF3qJFbbacvBrAoFR6TH+C2CbsSIXWDXAKwQdLFjabsOICCwLAzcbGhru9FounHHnsolSLp/bBbPm0GR1fBrnqCsR/bHzhcVOQoHxLx1DXrbbxpUsEhlQ4KG3wmLIAMSvOqYzfo/eneKDyeqZDCF4kNu92u9btdm1vb89arZaVy2Wr1+s2m81sOp3azz//bMPh0Fqtlv393/+953bE+2u1mj93f3/v1tbd3V13a+7t7Vm1WvX2IN4P9GJmDnSr1aqZPVq9AG45ldVsNrPLy0sbDAYOFiKwpeOmv+lY6rhqfVhn0fg/p+51KBwOwoow/870qnLILAa5zJeieSiSh/ifZZkfOdrtdq3X6znNdrtdazab1uv1rFar2WAw8I174HVw18P9j7jSwWBgZmbffPON0xuOJ51Opw48zcwzS9zc3PiufihJZua0xqmsYERg2ri7u7P7+3vPMpDnuW1tbXkaquPjY3t4eLDz83Pv08nJiYPs6KRMDcPi9a28JxrzyDihmE3l43Pww7NiUFeBJxBcJOSjog0sitvRulIdUiGVencEPiPAkao76ge3i8cpAjZFoFUXZ2Rl5rqi96watxTgYUYQCcSIMegYrKOQT4FSFuCpsdbvWiePVwQCIkHEdTOdqhKnsT24H398FF+tVlv6DLdllj26ebrdrufeg3Xr9vbWdnZ2bDab+Q7Qzc1Ny/NHaylSqSAJdLlc9hQm8/ncLVBg4Fj7KfrF2EXxTQo81YUXjXtElykwlSoq5NbBCsVFASlfN7Ownym652tFvJzvY6GD68rncB8EKVz6ZuYn78xmM/v06ZONx2OrVqvuyn///r1Np1P76quv7F/+5V9cCapWq3Z1dWU7Ozu2tbXl7nkoR6i3XC7b2dmZDYdD63Q6Dmy3trac9mFNrVar1mw2nX43Nze9vkaj4a7U0WjkfZ5MJg4GUnJP1y/fEyk/ERDj9R2BL/XaKfDidfeShcNB0A81OKXWJq7zkbMR7aWAaEqegl9ub29br9ezV69eWbPZdJ4I0LlYLOzy8tKq1apb0+v1uu+sBw3m+aPlcjKZ2NXVlVUqFWs0Gm7hBw0i9hNpzzgpPwwAALsApGaPnoKtrS0PLQCdox+ckP/h4cHXGt7d7XZ9p7/ZI+itVCoe2jKZTGyxWHh7dXz5swLK6N7IEot7dM5S4LeorASoEXDkSjGA2gC+P/oedV4XX4oIUZTRKmBjrZSfSdUXCcJVbYwYto6bAj5ui47Fqrbxs2wpUgZQVM+q/um92kcFD0XtfqkSjbPZ8oJSN5FugEiBLbPP55Rpj4Ev3h/NkV5HiVwkOE0HYBJAM8sya7fbfh+nMsF/uKTg6kRcHoL78R8xfAAAtVrNxuOxn1wCiyssCvf39/47gEgqzimlNOJetoryGEV8gOcjeialFKy6Z51oV+cf1yMFMlV4rFIC5rl9Ts0haBDxes1m02M/YaV6eHiwg4MDOzg48ATnJycnNp1O7Xe/+5398z//s7sn8zy3s7Mzm81m1m63HRg8PDzYaDSynZ0dtzRl2ePmKGya2trasru7O3t4eHBazfNH6xLAKdq/ubnpAh6Alq2wlUrFrq6uXLFDWh8e24h/pgBmKuwqArJ6LfIaRJbHdSjcHsYG+jvksoJygECUSO7hOuQ7jzHzY9RfLpet2WzawcGBHR8fewzq1taWg8bZbGYbGxueQL/ZbNr29rYr32jbbDaz0Whk5+fnrpBhExL6gt32oEnQL0BjqVRyus6yzC2ubE01e/RsMXiFZwx1zOdzNxpASYTXC0rV1taWffr0yUqlkg2HQ/v48aPd3t56HHY0tvisxgSm1+gazyeHD0Y0ytb2ovI3bZLSxqtgTj3PzFDjeRTssEbIi1TBhHYUwIDrwA46LeqyiYADt40HUUEo7o2u6zsjq1hUF19PTaAyNxStPxLIKaCVAqm8KJQpK/GtC6PkojRkFruCtJ/qFdC5UAuoAlouqWTJqlCgsOUBDKharS4xVzCjXq/nLjWkOAHoXCwWtru7a2Zme3t77rpCW7FzeXt724bDoeX5405ovBcbVcbjsZnZUkzrw8OD1et1GwwGdnd3Z6VSya0C0Xiqtq1xTUUMkecxCk9JWQQBfjnnJdMAt2NdSoomUuPD9+p6Vp7D/LFIuYzWtoIDKEmVSsXBKXJFHh4eeqqn2Wzm8aVYG9gR/fr1axfiNzc3dnp6atVq1ba2tuzDhw8OJNH/fr9vWZZZo9GwUulxk0ir1XJXq9mTJY/nFhuseJMK1lKe555FgK1iiP3jpP8a96nyLGUUSY11NObMUyJlVnlwBARfoqgs1rXGa1FpM7WWU3ILWAL0xICIw102Nzet3W5br9eznZ0dt4zu7+/bfD63t2/fOvAbjUbWarUsz3O/F4c/3N/fOx3e39/b4eGh01GWPW2KQt/RHigXaDdnBphMJk7PyCYAoIyxBNAEDXOoCgPdXq/n+w/A67rdrsdzI+QGYQfD4XApDRXGC2WV8qMyU5UMlYHM65kfr8ILz3Lxq3AvAq5RJ1hwRwJC60D9EOr67gh0KUiLmDwmnXfMR8/hjwmL+82ujFQcKzOqSADrYtNFyROa6i//zotbhY+Og86VCrZUiZikvmedBL1Z7NLHdZSUYMZvKQUgEjTKZHUh414FqRFDgPbPrlMcxbe7u2ulUslz5KF+7IDGs2CgCJDHiShgpgAPpdLjTtbhcGilUsk6nY4fFTmfz63RaFi323WwOp1OPd6qVHpK4wOQC4bKY6M0rgwsWsvRfJotb1yJeBPTckSzPA9g0uvk4o+ACD6jREBFeZqCqOg9yuNYkWL3LM8jwCGOhNze3ra3b99at9u1TqdjnU7HXZNQkuAuhSK1s7PjOU+z7DG2rt1uO08F+F0sFh4nPZ/P7eTkxBOfM40jXIV5L4T2w8ODDQYDjwdEO8bjsb+n2Wza119/baVSyS4uLqzRaNh4PLbr62vPSsAHAWB8eLyiOdB5ihSJVXwzihPGc5FC/BIF487Wy0hhYr4XjQmeVXnE96AuzLNmm0A8dKPRsKOjI3v9+rX1ej178+aNtVotz6F7f3/vm4mgrMCSCUUK8aboH2T8dDq1PM+XAGWeP4YAAPzBVQ8Plpm5gQC0jfdgc9ZsNrPFYuG7+29vbz17RavVMrOn2H/QO1tNzcy9AaVSyXZ3d61SqVi73fbUVpVKxc7Pz5eeUfnINJwyDHJIRsSz+Dtfey6vfZaLPwI6kSBRItN7o8ar0MIzfF+qjhRIKtLE+PcUqGWmHDFw7Gw2e9q5ijZAMzdbNskzYSMORccC9a+yTqsA0vEzi3fHRa4+1BfFUOkcRC6uyKqzDuU5Cgs+p4Q7AxcFlfo7l5R1Klo3qfk3e9qFX61WXROu1WruRuWdnYhbAnAA00QM3nA4tEql4kABDByWozzP7ebmxra2tuzy8tLDARaLhcdRbW9vW57nS67XwWBg29vbNhgM3FWFkAAGimpVSs0Hjwt/Vq9HxOB0HbDw1vtZEKYUupcsup5Ao9oPpV0e8xS9mcXhT1G8oI4JfodLv1qtWq/Xs3a7bUdHRx4DV6vV7JdffrE8f7ROIqRkNpvZ1dWV0xO8AkjlA7qChRXvK5fLNhgMLMsyDxPA+gBwRfwfBGae554TNcsyXx+Yc47/y/PHE4GQPghpgZDbslKpuLchOi5V5yMab74WyTYog7CGpmQcz21U/0uVFBjh36I+R+s2JU+iEC1c5/qxq73Vatnr16/diooDHmBJvb+/9/CN29tbzyVqZr7ZDyEesDwCcN7d3TlvYw8X+G+/33clH2EkbHTDhsHRaGT1et0mk4nHoALQwjuBsCrQKwAo6HE+nzuPR85g9MHMPMzr66+/9v5PJhMbDodL46prXhUGnS89TYplKOZRjTU8Z6t47rM3SeGFRcKVO6T3aGFCi5ix1sGd1Xt0waZANbcvInKuG8AT7hNoSmBwYE5ws5o9nZmLHaccYgCNGwyINRcWFAwUUyby1IQXjWdKCeBr0Tjy+PN7mEGn5u0lS4q56wLCf6XxooXJz+rCZFdTNB8pZYq/g8bAIGu1mm1vb1un07GdnR23WEETB4NDGqlms+kbmUCTb968cUAKSxI2V6FwkmrEYZmZM3NsYsEmAljIkEMQMVPYcILxgEUApWhMmcajNRoJd2auak3i9aJzHQG4dSkpPpqiQXwv+h2fdd3iPtAP7lO+gXnArvd6vW5v3ryxdrvtO6ORBxKCGZar8/Nz39A0HA7dovn+/Xt7+/atg90sy6zf7y/FMg8GA3evV6tV3wgI6xHCTWBF4phobAicz+dWr9dtZ2dnycsFYwNfg1DHJit4AgCyb25uPNZVDQ2gQbUkYhxVCdD5Y4UpklmoL1JMXrqofGaa5TbzmlR6U5rTOqJ3Mu/F51qtZp1Ox46Pjz0eGnIcfA8ZHEajkc3nc5fRl5eX1mq1nK4Wi4Un0S+Xy55+ChZQKONoD+pDFoD7+3s7OTnxjCq4z+zR0nlwcGB3d3e2vb1tOzs7S/lQp9OpTSYTa7VanmmFxxr0CqVxNBr5Dn/QGk5pg5sfdeM0tdFoZGafpzdTfsqfU7wzheVSNLqK9z77JKlUZSkQkGp8BIz0mei/2eduU31P0fui+/g/W0IxMdVq1fL8KWYCgfdYbFmW2c7Ojt3f33uSXuRBYyGOxQNi5xQcLEyZKeu4sPCJrJ28UHmx6twpo4wKntf3pxhm6tpLFmbwChT5Hv49Et74nccjepcqFmwFx7MRmFfAlWVPyaQBGLe2tqxer3tS8+3tbdvf37csy+z09NRms5l9+PDB3r59a+12e4l2OaYJcam4pm1AKpVms2nn5+dLzyK333A4dOvtfD73c9BxKlC5XLbhcGi1Ws1ubm486XSWZWHifxXGqTUcKRkRE9V54rmPlLx1LdxH8CAFn9yHiG/q9ecIiRR/RgE43drasv39fev1eq44tVotF4zb29tWr9f9LPLDw0MPU+l2u3Z0dGR//vOfrd/v29XVlTWbTbdmsfUUblHMM/I+sqseFk1YzZB1Au7V2WzmQhw8GWPCG3zxDgh7GCCQ5orPXOcE6PgP0MPy4TkgC2PNPCBV+D3Kj1+68PqK6BP38HhhPKLwqwhT6D1my6EoWfa4SQl5ThuNhlv46/W6u+7ZrQ6lB56m77//3t39nz59squrKxsOh+55An+GAUDbaWYeAoV3IdF/nucebgXAO5/P3VIKhQnfzcxGo9GSIYzHEX2GR6xUKtnNzY3lee4WYngooDziCFZ4NBCehbGMjAb4jP9Mp+ylihRrnj+eW5arqbJykxRriKsIhRvNwifS9COApd9TC08ZbhH4VMGE31jTgzbEeR/ZXA+QubW15cSS57kzajNz7RxMEPEhGAPsekZwNBgtNHEwZxQQjM4HzwX3TUGSgg+z5dAFnYdoTpSYUorDujBILtwfZoj6W/Q9KkrbfJ3XCFv++F59t64nMCUI0Far5RbSw8NDt1JCwO/t7bmF6OLiwi4vL+3f//3f7fDw0L766itrNBq2vb3tDAhuee4zLFXYEID2YC2wCxSMGWAJ68LM3I0KNxZ2/yMJOhKpR7FKKpB1bfMcquKgcxFdT807z4POyUsXXdcR41fgnQo3ifgAv2fVumYAB8Vna2vL4/lgqdrZ2bG9vT0//hGKExT2er3uQtLs8YScZrNpZ2dnHg/NZ5YjlhnHPs7nj2ePTyYT63a7SwdJwLKf508WOTxr9nj0JGLvmM45zRBvXoGFNs9zOz4+tqOjIz9MAID39vbW8jx3Ny8X9rrxWGvh8KiUYEdbI8WL5doqQf9bFF67zHN1jSlNal8ibKBjpXJd3ebdbteVolqtZo1Gw+r1ui0Wj9lJptOpe32QbL9er9vR0ZG/8927d3Z+fm6VSsX+/u//3mPu4WGCwgL6xRGoCOP79OmTx+iXSiWbTCYOntFHTp+mBritrS0/7UzXIbKn8L1Yn0g9xXyUc2Njw1ipVLLLy0vr9/uW5/mSJZjnE7RcFCqln7kAU0W8ZpXcLQSoan7nRivgQ4lcR9qhFMPleyLQECF7fU4Lo3wUjm0yMwejSL/DhI7JBhhotVq2s7PjJ5WgPzC/476trS038U+nU7cscCwfgu/L5bLHv0Czi3LK6XhEQD4SNClBrIyAx5A1tNSYRnWtIrjfsmj/9Voq9pBpJkVz0XrQ97BlL7UgmUGXSiWPM4JAB6NF7BToEhbLnZ0d29/ft7u7O/vxxx/t5OTE5vO5/d3f/Z2Z2VKOVA6Qh5A1s6WwlPl87ifpwMVp9nReOWgc7iG4/1EajYbNZjMbDAbuTgUIvry8dHcrj/Vz+YiOHYAAj2sqxCKaI647qv+lShFY1j5FSroC+4iOub6Ij6AOtlJDIMM6BZckrPtbW1tOb+CbLLAB+tg13mg03O2PfI39ft9DTBADCFqdTqd2enpq9/f3trm5abu7u24MmEwmDqAZ0EDRqlarSxZOFtpmT3IB43J/f78UzsCufsQjnpycOBjj8WMLJ8aQM3noHEdglcFZao6YX7904fAFBqncN7O05y3qoz7LRZ/Nssy9TVDwzcwNT4grvrm58dylUMbBw5BWDJvw/u7v/s7DROByb7VajhFAQ7Vazb1GFxcXZva4YfXi4sJDtJCcP89zd7VnWeZpqtjjCqPVeDy2RqPh90EpQt8wlugLDAS8S55pHPIF4Q1v3ryxLMvs3bt3dn19bff390sKr9Ijy0ylaZ1r3K9esr+lFALUIkbPhBEJAG6sEhN3pogRR99Vk+SB0EWuLgYGAhyDVK/XHRyAsWxtbXneMuw2LZVKDhTu7u48ngTEViqVPDAaMXvYdFIqlTwAH5oYa+0AyLyj1cyWYlVVCKXmIDXePBeqxUZCLAIPEejjOVoHTV7LKgCUUgAi8MLujMgaGAF2/V2vgQ7BPPb3992Kc3BwYJ1Ox2q1mnW7XRsOh9ZsNv3dOM4xz582eZycnNiHDx/s8PBwaTcnjoEEYMBu01LpMQH09fW1p5CCpQvPmj3ljjQzB8ugZwYF8CDkee6WC2wAAIPltE+ptczjpcpSnudLmxN1nnCPPp9SRtYFnJp9bnEw+5z583+zWNnE80VgX+eA62ChApfg5uam80DMe6/Xc2UGv4Mubm9vnU5gqUF4CDafPDw82M3NjQtlbNyYTqc2Ho+9zbVazQUz3KSIHZxMJq58MTgEGN7e3nYrPmgVQMDM3EKFNiCnb7vdXurTdDr1dFlmZoPBwGNqeeyUrzLY1BQ7Sp+qfKRALZ5JGRJ+68LtS1n0zZ68q4hhV1CKokYSVfJZeTIzjw9FqMnm5qb98MMPnpQ/yx7TOX38+NHPrM/zx02fOzs7Pi+gZaTQ++mnnzwzytXVlcfvX19fuzIOTwEMTltbW27tPzg4cICIcQGYBMbg0JPF4mnDlh4UgM2Eaj2GdxaGMF4HMCzwPoV2u+3ejizL7OrqytcaGxCUhiOsFclCnsOUsvIcun1WHtSokoghMhGlSgpkpYAq7kPn2CqlbjCuPwKnWZY5YcAiwAHE2JSCWI7Ly0uPjzo6Ovps0heLhbv1S6WS7wbM89wF+Hw+t729PT8rGoxuc3PTOp3OkmbG8VYQ8izYecyi8VQ3iY4b6o5CHlLzofPJDDHFMNelpMYjpfyYPY8mo7pUEHGdTIv4z4wb9AarO9wv1WrV8+2B4UJzhxBF+h3U0el0rF6v29XV1We7mhlIcvqe09NT16gPDg5sc3PTJpOJp0pBqAtAMAt/aONZ9mSRQuoWzr2Hk4SyLLPr62tP0cLjzDSqwk5j1VK8ia2qqTlWGimKL36JErn7zNIxYCirFFUFPlpXRNcQpojTa7Va1mq1PE0U3OZ8jv1f//pXMzMHqOCbOMYUG5kGg8GSgAVf7PV6SxYqhIuAP2dZ5vl4cVoPzi//9OmTn7cO4QjawR4C7Mjm1FE4Dhj953hApCNC6Fe327XxeOy5iBeLhW+MiUpEqyn+Eim2XJiX4LOGE7xUiRRvs8/3O0AOY6xTihZvNOP6dYxwDdbT3d1d29racm8nW9Xn87l1u123tmdZ5vwU3k7wWcjhb775xra2tuzjx49WrVbtl19+cWNVu922/f19M3tcK7PZzOV4nud2dHTk4X2QxbzDnjdmmT0p96CvarXqoS/1et0NFzAyoD60HWGD6MN0Ol3K74q9CFmW2atXrxx7HB0deYYNeMeeQ1up+VCMESkqRQZKlEKAyswuslCqZoNGRo3HdxRmsHw9Ak3RfWbpJP3aRvxhIxRikzqdjgcOY1MKNCcQx6+//rq0MQQDW6/XPXk5dvvBWgRNh9NRwbIKDafdbrvwh0aG4ylbrZYn8QVhA2TovPC4MEGoYMIzURwJz0n0Wd9RJPzXwdWkhemG3RdKYzpGRfSrjDUa92jTG98Putvc3LRKpeLnJoPBZtnjJjyc/gQ6m81m7l4Fo4LlJ8sytyYhGTM2P8GN2mg0ljT629tb293dXbJ8QsBE2i/GEYwSY4T1xUdNbm1t+foB44a1CXF8OkaqbCqIRXvQftXsi5Q4LRpusA5F+ZiOTxFvVGERrelI2VV65voh4Ov1uh0fH9vx8bHzrsPDQ7fiQ5GBFYmt9aPRyK6urmw0Gjnohct1Mpm48oRNVaA/CF6ARu7Lzc3N0k7r0WjkYQHgteyCxAYZ1IvNKTAywLJvZp4CC0Ia3+/u7uzm5satqt98842fimVmIUiNaJXnI+LJqlCk9hfo3L50iWRDpHhGVjgGrFCgI6Vf13qe50snme3t7XksPngSNpyOx+OlMDzgAljxt7e3bWNjwwEh5hxu9dvbW8uyzA4PD63dblue556fGnQMjxSfysdjgXvgPUU8qdnT8brb29tutYV1F/TMeYI1D+lgMHDgC6ssQDrGD16yvb09V+4uLi7cWMYeLjVG8XxG3oAUPaTuWUW3K3fxRxq2Cg3cp59TjWNCA7FFjVaNLOqUti9i2OzOhzUAMSFs1USM1c7OzlKuyLOzM0/62+/3HchiEd3c3FitVnPGx/koQSw4XxpAFkzUzFwLHw6HPh5gzHABYGcrQAgWNYNWHjMGplrUbcL1ROMe1aWAJaKDlyzR4jKzzxibWtrxvwicRt913FOWPgZ0pdLT8XSdTsd2d3dtZ2fHP0OZYlrCueLdbtfjlfnMcbwbApNPDIElCNavu7s76/f77u5HXYgjhFUM1iNs7NO+4HkzWzpdCGO9tbXl7QXjNHs6xQp/RYwOY4n7ODsG5jWigaLNKqrdrxPtcoHCq2uyKNypSLFCUdrn8ee4yY2NDWu327a3t+fHlTabTWs0Gp4rl1MyQajDeoqyvb3tcftQ4HAMI7xRSJxv9iisx+Oxjcdj29vb81Ao0E+z2bSbmxuPgwYvRww/LLd5nnucH/oLnszAgHkp+s1xgFC+Go2G7e3t2d7enn369Mn3EuR57u9DYTe00ij/pgAskntom/L9lBL8EiUyjKB/qkCm+qj7L1DUjcz3gM9gQ1Sz2bRms+l8E+vi5ubGvZkos9nMzs7OHOCinfAAIC8v3O5fffWVtVot55k4AWoymbiH9uzsbCndGcYBVk98xo57ZFhB/5nO+RQr7F9hwLmxseFrDcAbsdOQF1C2sAZwytRgMLA//OEP9uuvv/q6wZyoQsBzivWreDDi30qjRfdqWQlQU/EFnKA10vBToIavM8jRRheBXB6wiGAhOMFgS6WSA04AU7ggITQRWF2r1bwePvMcQhjnM+M6GBaYNIgbzBQuUMT1DQYDXxxsUeVdqchLhnPVcYwfgqojC4hqqimBXfRZx1rnSZmGfn+Oyf63KkpHvJD4HmizLOwjmsZvDGq0/wpSzZaZKv8HEwJ4Ozo6cmt+r9dzJoMTnFCQFB/M5vb21mOsAFJBR41Gw931oEu4IhF7yjs+mRFDQ8+y5QwU2LWKNTafz11rbzQablVlVxncpbDylstlG41GnhRbQ3Z0vFV7j4QXzzl/T8Xv6b3RenmpEtFd0b2gVwhVpfGIN6fq4PdDMYLAb7fbvhnk6OjIY/fyPHcXPawzCLWAZWk0GrnQBZDFRigcmTsajTzUCTQE3owUUvA0YSPf0dGRW6AQ9oLsETBEMK3AWrpYLFzoY30AmCKFEANdbILNskfPwDfffGP39/f2v//3/3aLqgIwjAFb/JkmVfYpn00p0jxnEfB9qRIpONzH6H7+r9dVcQTfZBrHtWq1au12296+fWu1Ws1pdXt7237++WdPpYQ4TfCj4XBo9/f3dn19bb/73e+cB+LQkfF4bFdXV7a3t+cx/sAOAJB3d3c2GAw8BhUHnWxvb9vV1ZVlWeZ82ezpJKmrqyu7ublxXJJlmXtROVRpPB77aVfYvIW1yWARm1ExRgCQ2BSLdYf9NT/++KP95S9/seFwaHt7e54TlsFySg5GxjEtKkcj48YqvPAsCyp/ZmGhC0tRdtRg/Q/NKgIPKUGk7UmhcwBTM1sK8IfbFJMHkMCn8nAiZpx/u7W1Za9fv/bBhcsUpnQIZ7MnpgeQihgpuLpwTjXv2jMzPzrQzJzxQgDg88PDw5KCoATAgjyaOx07HTedd51zJax1AaVRQds45omL9p+fizR8LH6mzWh8UnGN+I6wjt3dXet0Og5EOQyFFSzUi7yTAHjz+dzOz8+tXH48MxrtQ0gAYqABDj9+/Gj9ft/XK/5wAECe5x7XavYUZwoa5k19sABsbW15jlazRzqEVYHdpAA7k8nExxL95DOnMZ4ax6S8IAW8lNYjelBafw6zfKmCuYricM0sOWarxgAlug/WfeQ3NXvaAfzVV1/Z7u6uhzNhfvP8KZ0N5jjPc6cBuCzhQvzll19sY2PDut2unZ6eev+QqaLRaPi94KPwKmEtzOdz63Q6bsHF6TisXMOzANoGIFgsFp6ODcAYABqAul6ve3oiuG7hAv7Tn/5kDw8P1m633UoGoK58IcVPMUarXPxaj879OpRIXiutMjjnfkX8skhRYwt/qVRyUIoYfLNH+Xl2dua79judjlWrVRsOh364CXJIf/vtt/4d/AgZI/7xH//RgSKUdVgr4f6H0jQej52OTk9P3Xhl9rShD2nPZrPZ0hHACBEws6WwgW6364au4XBok8nE13m1WrXRaOSe1uFw6LHReZ77xla0Hzz8/v7ebm5u7ODgwP75n//ZhsOh/fjjj/bu3Tu7urrycWB8hjnga8xPIXt4jjQsTOd1Fe0WAtRUzIwSUGT54N9TRKcE/RxtPwKxUbwggodhBYIVB+6gUukxL9n+/v5Sygm4+WEaBwhALB/imBC7xLv3EUu4WCyWiIStSe122waDgceGmJnXj3eXy2V7/fq1DQYDD3CGtQyEE7nn2JWi41c0FyxgorFOKR3R93VhligKGPl6iunzeJrFYCYC8vge3W/2tGBhVcSxpa1Wy+kQG0FarZY1Gg2PeYLwRaoSs6dYaDNzAQlBO5lMPPavWq3a69evXQkCvTOtQLByfBTirjhPH0Aw3oXPv/76q8dLNRoNd7eirdfX15bnuYfHnJycuBsKVoyIhxSB1IjeUoAsEuzKe9alsLJv9rlhAPOhwiLiw6usaypkcA3vBh/rdDq2t7dnR0dHtre352FQ4H9mT6EICEeC4IZFHmn3JpOJnZ6e2uvXrz3EBBtX8jy33d1dMzN3y0MpwvGm+A3AFQejwG2P9XNzc+Op+xDHivpgET05OXFjAYwW8HDBamtmDkQAhC8uLqxcLts//uM/2s3Njf3888++QUZpGUVpOLLuRwYANkZwPVzvOtAvx/umXPm6flFU1oP/8hhxGBCv21Kp5N5P3Dcej63ZbNru7q4dHBxYrVbzxPSQ9Xd3d3Z9fW1v3761N2/euBEIPPTPf/6zfffdd+6Fury89PAqzPNgMHBgihPGNjc37fLy0obDoQNU/DWbTfeMImwEYJgV+g8fPtjp6an1ej0ze0pzCRqdTCYelgB+C88tx6vmee4egTx/3LQFz3Gn01kaT8iTzc3NpU3bGGdO0cZzbva0ERf3Mg3ovDKeWMWfVu7ijwgJJRLgej9/T1kstNEMtoraknofBhGgFLFG0LBwVNnu7q7nGIPVFOA1yzLXrOv1uu3v71u5XLbr62s/Bg0WoPn88USd8XjssagAs1n2ZN6H2X5jY8MuLi58NzMWRpY9bSzhOL5Wq5WM39Nx0HlQhYDHkv9HMZMsALmwZhQJuHUpRQqUjgtreugfg9tU/FRUnwIMvg7LFKyOSHIO132z2fQQlHa77dYZPH97e2vD4dB3IqsyAiEP9ygyA4zHY2ekiNUC3cHyCe8BjiaFm4djRNEOxMZiFzWAAGKpzczdYEjm3mw2bTabudX1+PjYU13xcajKA5g3MNjXccc4RLt/dZ54btdBuHOJBDqvSR2nlCIfgfoIJER8AXQKK+qrV69c+QBf4lOfEDcKXgtrEUJAzMzevXtno9HIarWavX371nlfnuce6weBDXcsp0GD+xLgAMLx/fv39urVKwe2cNNisyDnpAZvxdpA36GIwbWLceAMFTAoDIdDq1ar9g//8A+2WCzsL3/5yxJIidzuDAI4dq9InkW8eF15rdnzZHNkMCkCMXyPhk+gAIxxgv5ut+vhcUh2z7HN8Iy2Wi07ODhwyx/GGoYgKFx/+ctfLMsyT5kH/sin5UEJQsomGAjMzN8NRQsbl/I898wUeZ7b+fm5e1vfvHnjRgoYNJhnYQyAQTi91WKxcGMaNj41m023OMMQhu8Iq9ne3rbt7W1PCcdzovwkpQAXGX147p+jWD37qNPopSlrxqqFp3VGjWRhaPZ5LF/EvHE/BCgsOEgl1e12HUAizo83n8xms6X0DpeXl55sHK5RaGJgZhDi0Mivrq5sPp8vpTnh3a0oHz58sGaz6SEDqn3i1Ily+TGJPwK7EXStbiwVuKyBRos/+sx16bw8Z55T8/6SJYqL5UXF/WVApIA0on/+HI1x5BrJssyFPtw43W7Xk58jPRly7+Z57vTIFquLiwuvH7GcAAqIA4QV9eLiYgmw8g5PuDoRvwfmBbc/6oXrE4wMDB2gBTlWsdvZzNzyNRqN7PT01DY3N204HNrl5aWH2pydnbmFAuMDuo0sLHxNxxfX2H1YBAT4+XUCqtp/FcqgVY0Di9qvFmgU5Zn6LrhN+ajI7e3tpcNHLi4uPKYPIVJmT7lEsUkDJ/UcHh66og863tjYsMvLSweG0+nUXfbX19fWbrd9dzEfuwtwOhgM7PXr17a3t+ftxm9ff/310kZWhJ7AtY+wFQDQ4XBorVbLLVrtdtuVPvB8eDTgSr6/v7der2eTycROTk78/TyePKYRTyiSgUUxputCryjc3sg4wB5ZXXfqBdD1qEYUNpDgsJLd3V2rVqt2dnZmnz59ssFgsJSfFqn6zMwP4YGFEqFMiLMHr/zll19sMBi4/P748eOS4nR2duYgGCc/5Xm+tEkQ9IX1gTAYHH8Kpev29nYpxrter9vd3Z21221vE/perVY9dRS3F3iD5wDHV/NxqTwXALYIN+t0Oh5Hq8YonusIY6hBS3GcKhqryrPSTGnj+LdVQDQl2Pl3PBO9L6VB8rvQYQhoaMNw8YOxQkPA72bmlktsYIJ2jR2cl5eXzpT39vZcoF9cXLirvtvtOmHjLGgUNoHz6RDtdtsODw/9HmgyICxYeeHCarVafpoKQgoiga1zEwHLVMyvEm8qxIOJM9J216FE9KkAVK9F/1N1m30uYJhWUTcvXggv5LMzM3dDYRMK4pgRUworPNJL4Y/HHwoY59RFOAloGVZ40GaWZZ483cxcqYI2j1g7MFhsGlksFr5TGodRAHSj4P1wP+FIwQ8fPnicFZTFh4cH6/f7NhwOHfSu0tx5/JU+1bLKY2/2eY5RvneVu+m3KpEClVrHKNpnHSsG7eAx4H18H8AVb+JDmjOEn0BwAaDd3d05iOMsEeB5pVLJ9vf37bvvvjMzc4tOnueuFCHONcuypZAPKGIIGRgOh+7ZQh9hWeKxgLBGf5G7FPQM2obSx9YmM/PNgNiYNZ1O/UhUzplaKpV8XBDiFSlTDLpUjrGCrHPMlj0u2t91MAywYg8vBtMhZKEqQ3g25fXAvUzD4K1Q9svlsm9whhXw1atXS8o3Pp+dnfkmVGxmwvwsFgvnoxcXF650I95+Z2fHN+iNx2NP1o89K1C0ENoCeuA5h4cAvB2x2VmWOXBGmAIfQ62ptzA+AKWworKrH3wY48v8kPkKjBXAR8zPQbeaOUXnknmVuvt5/vg6z3lUnuXiL1pU3NDIuhYxTS4qSLROvcb38n08UbBm5nnuKUEw0TiSD7vlsavNzJzIUU+v13NNCO/EWc17e3sOHhE4zcfzgbGo5RegE6c6LBYL33CCRYyFjXgnxLoC1PAmlWguMCasfRe5S/V6xOyUIfJzWsc6lAgoMxBhgaC0p5riqnpxHwsYvoax4SwSyOrw3XffOcCEUEWsMeIzb29vPR4ZoNXM/H64iuDWQdoRTkmGnZzYvIJxAA1ijYC5sMY+n89d+ILhwhIBpoWxwdjCGpVlmVtVNzY27MOHD3Z+fu7th0cCoJxjGhWcKSDjsS+iPYTW6JwzaCia29+6cL+1X6n1y7/zeKlSht9ZeeJ6cA3eHvBN5GbkVFHgjZPJxI9JhIcKoHU4HNrFxYV9//33zq+Gw6HPCaxWZuZCHpZ6xDjDzQoFHydMZdmj9QxHSXNeSO4L6kM6QYRZIYaVjQNYSwA1AJ8IM8jz3Ncd1jRbTTn9D8+fyjKdW147Kb6sz0VAYB1KRI+RIh/9nhofrhf3wQK/s7Pj58v3ej2POS2VHs+sv7y8dD7U7/ft22+/9U2cyKrDlkLI7tvbW/vmm2/8qN37+3s7OTmxfr/vyj42KCFeHzQLQwLnwubcojiNUlNEQSnM86eUUMgmAeDJNAcaRxgD1u7d3Z2NRiPb2dnxTVt4H4wKPMaoF2OPjVv4jnFnPGG2HO7Ha06Bp64B5sVF5VkAlb8rceGFKQDEHcC9EdjRd6SAa8SUgcwxaVmW2e7urjMTxA4hITnc7/xOaPLQOpCEH+Z4aCc4dQQhBP1+fymHHsAsLyDVnhBnimsgYsS9MCNFHTj1Ks9ztwSg3shqxMSUWujR+EdKBj/DLkOmB/5btxIBGBbY0f38HO6PgIEKGAVOfD/HoELJQZonAD7EzQHMlstlpy+4+kHjYLJgUBCoSOAPKw/AAgQ5NlzBagqtGTGDrOgB2EJAY90wMAVNsLUHAgIAAadKHR8f24cPHzynINYtrBmaRobnTl3RKZCqyoK6SaEIclknuo3c8spHzZ76rP2LQEtKgdX3MN/a3d31rBE4shFxbLCwPzw8WKvVsh9//NGur6/thx9+cH45m83s06dPHk+d57kn6kdaJngKoDjhpCnEMfNuYoBAKFAIh2FLLfqNcQHfZqEJKykK8l2Wy2VPe4W1hzGBexTtYtmGzSSwGoPnYz0Vza+6O1XJLeLFOp/rUBT4MF9khShqL9Mr4wkUlmlQruFxgoxst9uuIJ2cnLgXp1x+PLkMISYXFxd2fn7uc4j4ZbQLBqpqtWrX19d2cXFhw+HQhsOhW0cRNw0FKMsyz4CC+WVQymsLsakAh51Ox5+DJRdjBaUJ3+HBUCCMdps9hjDgEAD1DrRaLcc5TIvANHxiIdOv8gnmuQpcuSjPZv68ymv1bBe/WTGY4aLWCb0vRahouC5GJdYIrEIgm5lvQkGgPTRmnBIB6yOsRzipgXefQoCDgECQOK4PRIr8qBD8CB+AYEYsKYgB7YTmzv2CRRa7BZHzD5o9b/pCXaxF6ZwoYUSCXuc1Bb4gBPVdXP+6MEmUSJlKlQjEcB2R8lWksOk74WqB+6TX6zmzxOk5CJ7nU8byPHf35Pn5ufV6PT9PGgxlMBj4BivUw+4q0FCeP+3yhxsesalws2ZZ5m5MjAcsDBGYAShGfxeLhe/0xxpgt+fW1pa9efPGfvnlF+v3+77zml1ORcI4RcM8J/ie4l9o57qWFK+L1qz2U8cuel6VKQURsGw2m02P3T8+PnbLPQvf8Xi8ZL0ZDod2dHTkMajYMIJ3wW1uZksHRyA8AMIXFkzdRJplmYeiMN+GIYGNFTpWWINmT4Afh1iYmbXb7SUwi/+oixUxjDnWDzJwIEsFjr1mWmOhHPFM9RywDNW0gsyv1kW5Uh7JSmCkQEYAFOMRrc/UmEGhRzows0clZH9/3zc/39/f2/HxsZ8GNZ/P7ebmxnfpw3CE50GXf/7znz00ycx88xB4K/g0MqBwaIP+h+KG2Hyzpw19nCQf13q9nst34BYo8g8PDzYYDJz/8iFDwDU4JtXsKXPLbDbzzBWazB/hPL1ez/7whz8s0ZkaGbXoeotkKdZU9FuqFAJU1bCVqaWEfwRK+ToLF2WcaHRK+4+EF1tOAUbhtsdRet1u1zWUyWTixFWr1ez6+tpzpM5mM6vVam7Gn06n1u/37ejoyDUZsydLKY4lhVUTmpGZOeFfX197ugkwcmamEAosoDWVDMz9YJrs7tcxxGd2d0UAMlISVKAXgU8m3nUDp8wM+btZGsxEjDMl8PGd36d187vxHYwUGjPn0UXC70+fPtmvv/5q3377rR0cHNhoNPI0KKC1arXqMaCIX93Y2HB36+bmpm+OQloV0BkAKANRPlsddMNrCkKZLQIIj1FBxBZQHgdYPA4ODqxer9u///u/28XFhWXZo0sJa5LHmV3RrMDyO/Q9/JzSRCpMQOfzJUuRwsNjE/HYIl6qQgS/8fhybD5iLeHSBr3CYoT5z7LMjo6O7K9//av927/9m41GI/v6669d8B4cHJjZo3X0/Pzc7u7urNPp2GQy8QTnCD+5vr52oAtlBzyOwSLoDso/gwMVhAw4s+zRuq8b/pAjkzNAQEHEfXmeu9sUhomtrS3r9Xp2cXHhKQyxgTWaO54Tpmmz5T0ESgdsuNHQpHUpSq/KH/G7evaUD0eeErZm8+9QOrIs842kiG2HxxEgDjzQ7NEy/91339n+/r63hVOWXV9f22g0ssvLS08lBRAK5R88CPHSWBOcjonDQ2DtBY8D1gDYhQIPpV2t7/AG8Il+wBs47IKVKfBVtMPsyXjHBw3gOoxgvOZZNqoyzJ/Z6Ij+RbSh/HcVDa+0oHKJUHQERnWBRQ1NCZciobHKKoL4DeTug6UTLhvsisuyzF040DIuLi6cgD59+uRB/z/99JPt7+/b4eGhEwismGbmFlowM+Q0xa457MhGzBN2kZo9meMxtiB4uHKxoQUbvUajkbtwEZuIcYmAJhMKMwUWSCnNVudA5zcScOtU2GrBlogiwF1Eh5plIWLCXFj7RF1gVrwrHwnymYE+PDzYZDKx//zP/1w6fSfPc6vVau7Cz7LMkzZXKhW7ubmx+Xzum5KQbBwClfsC6z2sVpyAmnd78viVSiVXlDhvLytV8/nc3aTsisKcYM1Vq1U7PDy0s7OzpSMsOcm5AkrUAYGtCgjPoc7zKkX4Oczytyq8rszSITuRgI+Uo4hvRu9AwRxhnHn+ENvG3iLQ9Gg08iNRYR0tl8turen3+0un36B+5JPEMZRmZjc3N+7pYg+SmTm9AiwgTGY0GvnxluxSRfuxHtniBYHNyhjGC8AD3jIAEAB5xKRiLe/v79toNLL3799/5h5VQc7zsMoSqnPF15hvv3RR+lMaVQUfJVqPWi/zXh4H0BfCpVimmi0DJw6L6/V6S5uO2TrOShE2NSMfOsAu6ATeKTYgsXzHZ3g/8T7QNQxWqBeKD3KR8nMsa0ulkmcpwvtRN35HG+DJNXsKJwQOgZIHjzNkU57nS6cG8lwxf+HxxVzhvshAA5xThBG5rIxBTRENBiMCRSxglCgjwcP1pwixCAgjrgN/AKKz2cwajYZdXl76TmkEDIMY4IIE6Nva2nLr1nA4/CxmFdoTJg/vx+4+MG4IdGzI4pQmiD+FMDYzD0eA9gIXrJktnbLDrp7UjlGMTcQMUhp9NMYMslLP6LyvS9EFU6T98X36O/5zsHtkmePPrBzw/CCdB+J8KpWKp8/h3HXb29v2u9/9zm5vb+39+/d+ZB8WN04YA8OFBXQ4HDowfHh4sNevX1u5/His6GKxcEFrZr47Nc9zB7Cw4HK7malCuYNlDOsCdGhmHrpiZm6NwOYrjDEA98HBgf3DP/yD/fGPf/T8q6x06diydZvXAc+hznEEQqO6U4rGSxRdTzwfKZCioF1BS2SVing7hBeUYijysMRcXFy4EQCWwix7dLvv7u7aYrHw3L2Xl5dmZq48YXcyNvvh/9XVlVutWJHK8+WNIoihw65q0MDZ2ZkbD/I8942w3CcGxACiAOF5nrt1HzzW7EkWITPL1taWp6FCSBfSpyFNz+npqSuivO5T/EVjTqPCci5SyNZFueL1pvzXLPbYmS1nKtD1ynXr2sUcgj9WKhXPuIC2IME+vE1m5oYo8GBsHgIARegeeCYOadC+4ZQz0D9yBDMtIWaVT+RjmmMPJIAme4SRAxVrkvmCel0ZD6BeZF/BKVloA4ApgCjWKDbwYr0rz0nNZeqz3q/8axXfXXmSlL6EPyuYBNEwuub7lVi1kQqAlIFGBI53YnIACOv1usdQvX//3vM5YtcnJhfaULfbtbOzM7/GuzEvLy/d7L6zs7OUSB8WUwYGHG+IYyWxaYQFB7tOEWAPpgwQDW0O44m+I3ekjkmkTav5PRX/w3OgFqqUls7CNHKrvlThPkTCuCgGSjVh/Ga2TIMpgG+2bHHFdzwDNxQ2eiDNCBhjnuceC/Trr7/6RpHJZOKaL5grZ5Awe6KLxWLhp4cgpg8KEKz+yJeKscKYsPWMmR6u4UQrgGb0lRnrYrHwHaTIEWj2lCR7Z2fHut2umZlbKRAHxjlReYw5Fkrpjudb54xpN2KcPPfrUCLgGAETXI9CpiL+HAl5rQd/4KFwlwIkNptND4Xa3Ny08Xhsg8HAgRiOUMSmvNFoZCcnJ56TFMIaO6Bvbm7clYrYPIDEPM/dcsWWIRgJOKMEh62wss+gAuOETU1sbUOYDceXmpkLas59jSTrjUbDxuOxffjwwXq9nssEpATkTYU8ryklXxUInj8U9rShrAvPjeQQjyWHMPA6jhRGpvNovFBg/OE4eISmMCaAp4dB22Aw+Cx2eT6f29XVlT08PNj19bWdnZ15iAeMW+BpWZbZ3t6e9Xo9Oz4+9j0ASIM2Ho/t6urKTk5ObDweL60tWC853RToEQYHM/N1wi549AMAGQY6syf6gJUWRjIz+8xTYGY+Vre3t3Z4eOgZVnCPKr4R0GSZmFJ8GR+ysWdVWZmon4VxBGiiRqa0oFTjuU5mmOxa0sWrGiQYC9C/CrTRaOQmc8TrQVBjtxuf6DAej/1kKfQJhA7iAEDN89wT6WNH6N3dnZmZu2tBmGgjQDXM6kgDAYKCtRULENa3crlsl5eXbi1G+3lslJAYYEQCTucg+l8EZqP5W8eC9qp1iktEX3ydr2m9kWKG7/gdisrl5aW1Wi2PfUNsEQQ0tGnEtMGCA4s/6An5+CAwLy8v7e7uzvr9vu/e5Bg+MFu2vvb7fdei+dhf3sQH5gpgUq/X3b1rthzvzIIVO62RqgrCiXfgNptN63Q69vDwYFdXV0tARAU8xwiqwqRgM1KoVVjit3Wj2xTgVo+GCga+F//1fn2W78c1uBoB/MDfAE6RUmpra8u63a4LxkqlYhcXFzYajdx6v7GxYYPBwO7u7uzy8tLzRfb7fev3+w4YQPvoJ9YEBLGGf4Ce8zz3LAHgz6AxtIt3MsPFj9CSWq3m9A46wZjhvRybq+nRcPgExg1eEaU1Hu8oXE6t3Gzs4XFRulbA8FKFaRNzEMUjMjjl7yirxojpGCAMPASnQ5mZXVxcOH+EpRRgDukkkSZtOBx6ndPp1D59+uTgFJ4jgL6Hhwdrt9vW7Xbtu+++8+NLWSGCqxwGrcFgsLSpGko7cpwjnRkwBQxfANnog6Z/guWX3eagHexFWCweT/dDTC76ztgI1uKTkxMbjUaWZY/5WXFMsWIzvF8Ve72PAa7ZsiyM5l7LszZJ6Qu1gayd43qRBUDBY+o+dChahMpYIeTNzFNEIb4I70GgMk6Lur+/95x+2EAFRoPjGWHNhCaPIx3NnhLuAsgisBl1o01wa+3u7vqigKUI8bGwioHJ8SYpjEW73babmxv/Doatlkslmkgo83yxi4E/M3FFikZEE+tS1AIcaedmn9NbxBQjIKBjwpaNCMCaPeX9XCwWS8oOrJlm5lYX0AesSldXV/bhwwffKQxGidNSEBICmgRN4d1g0PP53DfYmZnv1s7z3DcUapA+hDQYNTYiwlMA5oiYUyhhAOPVatX6/b4H3qNAuACsRm5tLgyCMa6YCwZiWphHsQKSEpwvXSJgqgonF1xnK7HSsypePBYMhvEZghXeJqTq4XfifgBB3oD36dMnOzk58dCNUqlkg8HAFouFnZ6e2snJidMb76QHbcGLtVg8pfiD5ZXlDCypAHMAtCioBwIeyj/GgL1v+A6hj/7gfeVyeSnNVJY9WuuOj4/tl19+sVKpZK9fv7bLy0v76aefQuAY8U4dT503/j1SpNaJ90b9QV8ixZ3vjerhOnAPx1He39/bxcWF8xEoUFtbW55qCeEYbK0EiMQGKE4VCQsp4o5h7TR75M97e3v27bffWrfbtb29PacTGK5g7QSYhov+06dPnnGi3W67lbVSqVi73XZ6B1g0ezrhCnQKIAuLKYcsYtNeuVy2T58+LeUVhtUf9A9vBwxiWF+1Ws1Go5EdHBx431OeRP2sCgp4El+DrEh5tLSsdPGrxq4NxWd+YQpha6eiOlPCHYw5Ah0sgFCgvUD4IQYUzAyWGGjgsA7oZphqtWqj0citR7CoYqPKcDh0axKf3wviQ2JrXM+yzNsFIALCwTsAcjnNVbVaXTpbGnGIzEB5zFYBM77GQDgSZJF1Zt1BatQPlOfQod4XPRcpbGw1YIsHaAlAcG9vbylQPc9zzw0KusGcI5Rjf39/KSYUVqfxeOyW2KurK7e4ou4sy5zu0QZWRKC4wYV0enpq1WrVut2uu4CyLHM6hIDgfHn4jtgtCAQ8y5tjkIcP6dtwv7rwdd7YRcjjz4oVz01KsSoKBViHElk8zT7nj0XKkiq3yj+ZZytfgECE0g0QyCAS/Ac8EXMMzxSe6/f7S0eG4ghKJDgH/cINCRAJaxKs8LBKYsd/lj15zQA6sPdgOp264YHDP5BfGkCG06Gh31DKcIJgt9tdOmoS7YO3A6dbARQjVY8qUtGcRkJc+W5kIMDv61Y0jIRliipEqnShMM/k9c1rnBVJKNt/+tOf7O3bt1av163f73u+3eFwaNPp1IEd0irB+wQ63d7edr6JnfvwhHJavmazaW/fvvVjQXlvAHjzxsaGK3bg8VDwzs/PLc9z293d9SwroH/wbJYJUN4+ffrkngakywQghSFgNBrZu3fvPI7/+++/99PeeK1zCISZuTFte3vb3rx5Y//0T/9k//N//s9CV3x0jXkMh6EoTlC+VFRWuvjRqUgY8+/a6FWCnRusRQlZr3P96CyYIrQnCM/JZGKVSsWOj4/t4ODAFouFu2dgysZmlfPzc/+OOmBNms1mdnFxYWZme3t7fh1WBtxrZs7U4O5HjGqe53Z8fGx5/nR+LhhsnudLGQcQK4t8egAREArIm5YCWEyQvJtUF7jOVzSXPI9MUCroo7l8yVJkqVArDN+P61yPLtQoTCBlNcBn0BW7GKGQQDiCoeJMZsSRdrtdj80EA8BBETs7O+7Kwualm5sbP74XcUzYzAElDCerAag2m00HEoih441QZstCVccMTB8KID8DdzDqAqPU+HGO1VVhF82v8gidD53ziGbXUdin+F6KF/M48O8R8DFbPvpU+SqnVYIwZNALVznCSECvcGuaPSath2KOfM489+DX7HJF2ABbicA37+/v3UWZ5/nSjmzERLPQw/pBm5GvlOOmQftm5huebm9v7eTkxPMNI8yGlUkAaBgXDg4O7Pr62gaDgYOVaMyjuWGlSa38kbWc5xhzGN33EkWNHClgEtEpXNtMlwzOFeyicKqzX3/91VqtllUqFd8YhdPOQB/wWgKgIRYeFlTIVvBNBt27u7v2+vVrB6bACAhlYVpid/3m5qa1223b3d21w8ND570AiicnJ77BFekuwUMhJ/b39+3t27fu2cDYIAvLeDx2w8LXX3/ttAkAn2VPhxEBLKN9Zk/yrFKpWKfTcdpHqIbyZNSpRgN8RimSk/hcVFbu4lczfIq5qxDh69pYfU4byik6IsLU9+Dd2IU3mUx84mHx/PTpk02nU9vd3V1KUwKmiJ1/vAEKp0LARVSr1ezg4MA6nY6DT2g+MNvn+VPsLHZrw0oLKxrSSGD3NgQ5mC6sCbBcINUELK6LxcKvaQhEanx1Tlng8JimhHUEEpQ21lXYc4mUH2b2Om7MKBUw4U9DLLgeCCxo1dDIJ5PJ0nG5UGK63a7NZjMbDodLlniEq+DEKLjiweSur689TAQuHcTBwSIPJsobC5BOrV6vezvhDoJChh2gZuaKH6wOABlItA5Agh2tUBIxFkh91W637e7ubilnIc61jpQI/s7hLzreUVGGyhq80sW6FG4z959Bjd4fgR/8xuER0VrnOu/u7uz6+tpj9uC+BO/BBqqLiwunP95QgVg9xCvzWegId4LCDa8WwCtoEbwQ2VUANhG6AhqERRRKFUIMcHhFqVSydrvtaQIBuHkjyd3dnQ0GA495/Zd/+Rdvw2g0chmxWCzcgACgDdCqVmtuY0SbCloxBymwqYKeFeB1od1I2eGNZ8pHcY/yXYybgh/lseg7No9WKhX79ddfrdFoWL1edxwAJRuWf1gfsyzzPSlsBEJhvt7pdOzVq1dOFxw+iH6CjvEc0wU2QTP/7HQ6NhwOrVqt2qdPn8zs8ZQnAF8ccIF1hTFDX/EZY/d3f/d3S2miED+K9T+dTu3w8NCVSOAUBrswJkA5ZLoq4pmRPFRjwP9LedYmKTQminFSZsnCW68x8CwCSCnUHSFvXIOL8O7uzo8oQ+wUGM319bWZPcZyghCQ96zdbrvQhPBfLBa+0+/i4sItAph43tGJWA6Y9s2eNkgBLOzs7Nj29rafSAWmD2sT+o40VbACwNVrZm7ihwDAeESgE+MeASjMQTRvCsR0vsBkmJGsS/weSrR4Uhoc//Hz+B1Fx4uZ7qqYMeSiG4/H1m63bTKZeKods0dGc3NzY6VSyc7Ozuzi4sK+/fZbm8/nvtud5wCAgzfhIeQEa4zToF1dXS3ROjO8LMs8WB+n80AhwzsQV313d+daPJ4FQL69vbW3b986TaO9Gk/Y6/XcCgeAAfet0qEqAzonKAwMUrQQCfLnavIvUZT2FFyrwcDMfNx5g43eq2AAnxWknp2d2VdffbUEzBBHOhqNPOE9hD3ciXme288//+wWf2Q4ubq6stPTU08nhbmCoIfCg+wPef4UUwpLLaf3Q0w1PAR80h8Uv0aj4cCCPRkAJPiO2LyDgwPfEY62YOyYPgEQALwRewgvBo8zj7XSNN8XgYEUvSp/W8eisoSVdrM0r9Q+Kj9Amc/nngkCBzsgXv/w8NDyPHcvKSzgnL8XfBKKEOgL8ZhILYWT1BDWB8sk1hjAKCs+bJFEaB74oJkteRB2dnbs6urKeSH62G63Xekye9y81Wq13EuV57kbvAAszcw3QSEuFQpiu912oMxyCWsL6xHzAwzFdAarKs8F05+Ga0TKdKRwpMqzXPzaiOhlKKmYmSIiLAK8/J7oGp5l4AsGsVg8ntDQ6/Ws1Wq5oAbABJFNJhO7ubnxoGIEI5uZxzS9efPGGScA2cPDg11eXromdnV15eZ5/M6uC7izzJ4mHwwWghwpWlqtlrt9Hx4ePKXJ/f29dbtdT2GlR+pF44txjZQJjBnGC8AlAmupOVk3cGr2ec5AXhTq9kRRJppaQBFAUMuHKg6wpKLOm5sbu7y8tOl0aj/88IOP+adPn/yZ0WjkG/WgZCGOFHXAwsNtAl3DrQllB8oSNmVdXFx4Un2mWcwnaIs3rugGqc3NTfvw4YPd39/bP/zDP3wWT4cwGIDbvb09F9Y4xQ0gHWuY3fzRHEXzkrKw8HypwNTy/622//+rwrSaUqZUWVRLC1ufIp6sgJ+V2fl87p6nDx8+2Pfff++W+/l87jzv5uZmyeU4Ho/t4eHB/vSnP9n19bVbrAAoYeUEXcGiifcjvyoUMIAKKErYR8CgGOsZ1lgzc3pbLBa+o5s3srCVDCf9wVUPBY1jYB8eHnynM+YCoVawunKIF68Bvp+/67jjN7Z0R4Au8gDguZcukUHDrNjIxX1E3KJ65syW4xtZXiFH6Ww2sx9//NH+23/7b+45Qszxr7/+utQexNrnee5KxWg08mN7h8PhUjq84+Nj38QEJUrbyWAVrnrwXuTRxToE/eV5bs1m0/k0Nncj96luKIKRA9cRZw0DF2/SRTggUr0Bl+DP7CkjRalUchBbKpXs/PzcLi4uPjOOKT9SvsIeg4g3Me0/FzOsPElK4w+UiUegh69F2pCib71fAQW/S9+HwcFOZjCpjY0Na7VaS6ALmgp2znE8J8zbiMNDHdCums2m1zUYDMzsaYIxKZx2BL/V63Xfwc/HSi4WC+v1er7haWNjw8/fZc2lXC47wZqZHR4e+s7ter3uuxCZCah2zmMejbsyBAZ3+l/Lumrv6GvKzbDKoqYgKAV4UnWm6BsJx1utlqfcGQwGnl1ie3vbOp2O5flT6jKOt4N1CEH7oG3EWcEyb/ZIn7Dew90F+jN7CuY3M89SAesuFDS0+fb2dun8ZvT93bt31mg0HHgiDhUxWlmW+doajUa2t7fnMXwQ+sPh0DY2NuyPf/zjZ5v+UlaVaL71ugKFSGlbF1AaFQUiZsXHTRfxZqVNFR4ARhBy4DFQZF69emXX19e2u7trX331lSvNZ2dn9vHjR0/yjc0mEKJQ6pG2B1ZzZCxpNptuFWWrPBR5tA19B8BEeyGEQTdQ9hETC1BtZh5OghyX4/HYLi4uPGwL7lcW8AAACOlCCA68H71ezy4vL31dHR4e2s3NzVLIA1ylzD9S7v9IiDMvYs+HzudLFwWUZnE8uG6+wRjjs8qfCBSpIYVDUdrttqcGq9fr9ubNGw/jQOwpFGycuJdlj6mcsFEKMc/Hx8fWaDTMzNwLCuWF85mCRszMTzbDvCwWi6WMLcAJMAzs7e2ZmXmY1GKx8FPXYNwCDwZdD4fDpc1OPGagN2y+Alje3Nx0notwmSzL7Pr62nq9nvX7fbu6urKtrS377rvvbDQa2fn5uVuUeR6jueF5LVJKQB+K76KyMgYVkx8JbCVAbVh0XTuppahzSuwKujAJWZa5WwnMdnd31/I89wmFCwhaDogGGvF0OrWbmxtvF4KRQYgAwGbm7gXsCgQYRlu3t7fdCoCwAFhGkWql3W47gwcBAUDAvYY+I78b0lcAqBQpEJGA0ntS9/N8FjHUVXP7WxcFp7A0qMKljE7dFGbLWrLZMlBQ152+EwwYGv9kMrFGo2Gz2cx2dnZcw339+rX1+307PT11F+jp6amfZpbnuV1dXbmyhHfB0sRME/SKHZ2Ir0K6KViozGxps8psNnN3+3A49NAXM/Od241Gw923r169sna77QIdMaZw23McVrPZtGaz6QwPVlUI80aj4RtqVFvnOUVhhSqyijP9rqLLdaLbiEZRmKZVeY/WOQsE3KegFkIU98NSc319bd9++61dXV3Z/v6+C9B2u+1CHUnJe72ee4DAd3Fc49nZmV1eXnqGCijXELCog3M6coYSuC5BWwAFGA8zc8GN3JEPDw82HA5tNBr5RivE7YPmKpWKHRwceFweeD/2CIAH393dWbfbtcFg4FY0VgLNzHq9nv3rv/6rnZ+f208//eTrSw9ZYaMA85YUvUcGHJWL6+LBYmNQZESK+qdrVHkyPoN++T/kPHKZ7u/vezYF8DDkHgUNcVgag96TkxPf/Y80kK9fv3YFCq5wKHHwNLFCDUsm6ocShV32UEwwZwhrwWeEqiAPK2KyUWAtvr+/t3q97gYArDOMFTzIHJ5ye3vr3jOkz0J9sJQCKDebTT+0iMMIlRYV0+ncMa+JFOvnKFXPcvErI4Q5PmqYlkhoa+ei73w95T7Vd2Phs1YDhtTv9z21D6eeOj8/91glxHNAU4G5HpYhNo2j/5hkMDwwt7u7O+t0OnZ2duaxJ+Vy2ZrNps3nc7u5ubHxeGw7Ozt+YguEA9xl0OSx2QDmduRmRQyYatTRuDNji7RaZRRqVVViVIb7HG3oJUoRrUR9M4vj/VAH5oeLhlZETBfPIRXT+fm57xSuVCp2fn5uNzc3S2cil0ol+/Dhg+3v77sril2O/X7f69/a2rKdnR3r9/tLwhsupG636wwHzAwxVqBdpMdBaMzXX39t3377rVtwcT82lDSbTbeqYv7v7+/t9PTU/v7v/96vsXWYD6Go1Wr25s0bz6ABa3EUZpKaV4w/X09p+ZHywOthXQR9SoGMwDYDm0iApBR7rpvpGs8gtGk4HNrh4aENh0MHq9fX15Zljy7+P/3pT7axsWEHBweudADI3t7e2nA49A1LEL4QlFDmmUdxajIoUGZPR/OChnEaFEADAAhvfkIC9Pl87sehInsL1ghCv6BQ8QEsMCBgkyAUMWTNODg48I2HOFGo3+/bZDJxt//79++TgpznmwW6zh/fo/MXgYWXKgBrRfRmFh/ZivuiNcpgj40C+B1xv1dXV/bHP/7RKpWK7e/vm9kj2Przn//sm6Egtzc2Nuz29tb6/b798ssvvov+6urKqtWq9Xo9p1NYYuH5wd4QtB+8DG5y8DAAdowLjBzoA1z/jUbD24NYVRzRWiqV/IAhs0dFrNvtOk9l6ygOEDB7wiXAFo1GwwaDgbcTGx0hZ7DWYSDgja1ot1pI9bPOtSrY6o2PaEPLSguqVgRmFpW/ZbGA2FIBt6r5PwcAQ+BwSpDF4vFsaLiU9vb23CUEYY5JRZwGhHilUrHr62tPhss7sbvdrnW7Xdvd3fUJhnmcj6YEuETdYLIAArC0IlQAbia8C1r+aDTyM67NzMMVADAQnxUJaJ1PdaGkFIAoloqL1pNyp79UifqVEtjKGBmk8vNYjCnLvi5YXC+Xy342vdlj/CgyRcznc2u1WjaZTGw0Gtnbt2/t/fv3lueP+fKGw6FvrkN4SLlctt3dXbu6ujIz8w0qCFlBHDXimmBth9aN+DmcL65xn3B7mi0fCQngUK/X3fMAxS7PH122oEVYo8bjsdXrdX8PmDHiYpHGp1arWbvdtvF47HHiPPY8Z0UKrc4XX1eajd6xDiUl3LlgzZktpzOLxkbpkevANXwGLWxvb9t4PLZ+v2/NZtP6/b7TGVLr7e3tLVngEbKEsQUIGI1Gdnd357vozWzJjY7d/nC9gkdybB+EdJ4/HVfK9eA3PIfQrJ2dHd/NjVRrZk+nHSE2FuMIqxnuA/83ewQIoHMocqVSyTdXbW1tWafTsbdv39qf/vSnzxTWaA7Qbp3ryDAQAVyex5cuGlLDPLHIOxXx4JTiyQo/riEzD6c6MzPfFLWxseGbpAHo+PhTxFaXy2XrdDp+0hMMR3gOHiEzcxc9aJdPp0SdDPBgWOI55TysPLfIQgBvA5TG7e1t3+CNNiEPL/hwlmXuxUCOdux1Qf0A3hhjGAf29/ft8vLS9vf3l2JMixThyAjAeIzpgXHCc3DiyhhUbVBKGEchALg3skahpFxzuBYt7EirRKcxMRwwD9cNiOn6+tr+8z//08zMd7VBUGLHNDSIXq/nhAmwCiYLpgbrK9y4nU7HTeiHh4euteR5bicnJ76DjwOXYXnlghACWCPQr52dHQcEcKPqRpkImEYCm5kHj79abfCMzjMT3HOJ7rcoHOOl2jdKFIyP/qhVFCVlAcFvPL5K/3CdIicfYvNwMhQUkFKpZJ1OxzVoFr48L8ifByaE90CYco5AZKxADB52PJuZ7zIFyOHNKEwXaBussdi8BUULiiHAw+3trVUqFet2u1atVu3y8tK1d1itACg4vUqKhiIhHDFGZqi4nnKh8riuS8E8pNZUBCpViLCXh3/jcVFAq+PY7/et2+167Okvv/xih4eHtre3Z6VSyZWM/f19Py6SPTxsrc+yzIEiBDZokmPz0HamQ06Lw3+LxcLj97FfALTFMX+wWiE0C7+jz5ANUPCRS7NUKi2FoDBgMDP78OGDXVxc2NbWlr1588bu7++t0+lYq9Vyi5yWiLdG88y8Vucn4jHrAFAjNy6HY0ReOb4vklM6XlH9Zmb9ft+2t7ddQQYPx8ZOHAiB2Gk2FCG+E7vhwRezLPO4ZWyOQptAo7w5DQAS7nqsL3gVOC80PoPm4dUtlUqe1szMfEMg+oMYVTNzYAyDA64jpAAHrcDyiRAYrEXQPRu48H9jY8OGw+HShq6IZvE5iqeOjAmsVOhvUVkJUFdVEAFWvR79zv/ZXawF15TB8u9YBEy0ICZoVjD5I4ZkOBzamzdv3BUJt9HZ2ZlPCph8vV53YYt347QSBDTjeQj+ZrPplgbEGULoHx8fOxFD4+PNXWD+SGeBvG44SAAJ2kulx9RW19fXS4JZx0/nQJmaCm79rnVHGhPP1ToU1b45PoqvQ1DzPQo4GeDgOdYOWUvkulVTRJvgTtna2nJ34GKxsMvLSzs6OrJ+v+8WKjASuCCR0w+gDjSGtsISrzFbiKcGQ4PmDq0aO5+x8/Xs7MyVLWj9oHezJytbqVTyk9EQEgPrE1yi0P6hXCGPMLf9+PjY/vjHP7rSx2OIuWAmCQtFRMv8n59lxUMF3joVBqdKW7zmFBDg9xQtmz3NWwr0mj3FS97e3voGKYQsYZMT3j2ZTOz//t//6xv3Li8vXXBifhDSAWsm3rWzs+PKEdoGXoxxYP4O7xIEOZ4BHwU9g8axSQTuXRgEeAc3LEsALLgfbUAe4p2dHXv37p199913nlEDm21/97vfee7IVqvlKa04XCVShnXctaQUB76mcvUli8oa0GAUFhXx4ciwoiUC81CCxuOxp5zi077a7bZdXl7aZDKxZrPpys9wOPSjywEoobhAweJwKwBJhOshlASKOfoKOuLYZ/ZwmNlShh8zc48W3PecrhJp0HCs6/HxsbcNmYMODg4ciJdKJdvd3bX5fO6baeElQDgB6BN/8GzBkHZ5eelAvt/vJ7Edz5vyG6VVnlMdj1R51iYpJRpdZCjq4lUCK2KKKVStxBhZFpiJgSEipQ7i9SaTiQvbH374wU5PT30jSLfb9XoRbzQcDp2h4nQFuAVApNjwgdN8zB4HHjF6rVbLzs7OrFarWbPZdNdrp9Nx5n5zc2PHx8e2t7fnjByME33qdDqedHqxeNz9DzAM0Mou/lRhBsDaa4qQOG1FBGCZKCOQ9pJFATXHNUIAa0xM5HqIhMkq6we/X90kEIjT6dT/BoOB0xosNjgdBBs+mPagFOEagN9sNrObm5vPDnYAk8UGKM47iVilLMs8DzAsXbu7u+76AV0yADB7ysuLNlcqFev1elYqlRx4o9Trddvf3/dxhAu01+vZTz/95MJiFW3meb5kHeT5Sc2fuhd1btexRMpfdI3HiIFLKo6aaZ7BO+rgsIHRaGQfP360er1uu7u7S3H2Dw8PdnNz4+eKAxhgIwbCTfI8X3JNwg2KMBSc/43NKWgzNuXVajU/EpfTSYHuKpWKx1ovFgs/QYjTpIGuQeuHh4ceb2r2lLEFMeEYC3jBJpOJ/dM//ZPnoUR73r59a61Wy0FJpVJxsAvlDHy9SDbymGu8n97H88qhbOtSUgogrvO+Cf4N/9WbZ/Y0PsxXIfcQWgG5nOePB/MgNV6WZUseHsQhwyt5c3PjXlIAQSjtsJyC36DtyHDBR+oC3MLqyQoKywG8i8Ea9pNA4cL7ML9I84YMFPC67e7uetYCvAcxrPCumpmPA3g8Qr4Aajl0YbFY2P7+vv3+97+3X375JVTkI+VJ5yyiX53PVeXZMaiolJmbNjCFrHkxpRoedVw7xZ9VW1PgCtP10dGRffvtt77zGMc77u/v22AwsOFw6KlQEDyf54+xf6gbjBgaCOI/EH/E2hosXJeXl57nkk38IGyOCQTAZVcIiAdJ+bFJ4OjoyGNpEEPFFthUHGikaauQ0msaH1wEbhn0rUNRBqkuXo1F5P9F7mB9h9kyLWpdfC+YM4QWGB8sk6enp5bnuf3www/25s0bu7i4cGvOaDRyTTzPc7dqguli9yp2ZSJdFNLjQEiCUeE0HOy8Z4so4mInk4m7eeF6QpthFeG0UtDGUZgWTk9Pl0JhHh4e7IcffvC8xFC42M2v/ANjrQBN3fkKZlOKMfOlCMC+VInWZEpRjARFNAb8e8QLzJ7WMAqU7CzLrNfrLdGL2aNy0ul07P37925hReoe/Ed9Ozs7zks56X6WPVlM8Rm8B65O3tVv9pRo38x8t73ZI8jc3d1dCqlBhoxms2l5nrvRAvwSyh9ACVylANl4DgpXqfSYRhD7F+BSRrq/8/NzlzOtVstOT0+XlH2eK7PPZSpbHJWfRjyJFe6XLsr7uA+RTFEaxX/e46IxjErTsOZjzA4ODjy95KdPn/xQEvC0s7Mzj5tnTxG/D/wR92FOOEsQgCA23umxvDCIYY1qOA1kN/7gITN7OrgCyh3wDE5Zg7cAG7kwxvibzWZWq9U8RBDXgCmQSnM8HnuoIei+Unk85e/4+Nj++te/Lu1rUf7IRkHMScSTmD6Yj6OOovLsGNToO14UIWgVKCpU+Fn+juejurRdvHixYx+AsV6vu4kau+Dq9bpdX19bu9126w6sQti9j9Q/iDMtl8ueOgUWpfl87rvdOA9qlmXO0BFMP58/npwD5gbXEKxf0OweHh6s0+lYv9/3HG6wViD2BIx5MBj4qUN82EAEJnXcIithFJ+pc8KfdU55LlP1/NaFQQfaxUJCwQzHcOI5/q5gKRLuZk/xSXqdlY/pdOrJwb/77jvPCwkQUCqV7NOnT74DE8nuB4OBu2Fg3cI7ofSA5uAWwsk/6APoGxtREDeFOhCTDSsOdnPD2oo0baAlxE1jAw0AA1us4SIFY4SVCZbfq6sr63Q69vvf/97+9Kc/2cePH71fOvYREOOSos1IYVBmyfP2kqVIkVJeHCmF2i8UvVctcXgf1gqUmzzP3WJvZu423Nvb89Rh7969c2GGpONYb1hPABH8HzuHYf2cz+fW6XSctwGcwi0J/ttoNHxTIDwLoFHEmrJihw1/2CwFgII5R95L9J+PoM6yzEHDw8ODW7IgB6rVqnW7XavX6zYcDu3169ceg4qxVPrl8VYFC3PF/3n+FSysi1GAZUu0PtFu5Z88BrgP19mTyPUyf8nzx/R7yBaBzCKvX7/2OavX634UOdzpOF4U8alQ+M3MU6Bhoylou1R62lUPBYvnGQX8jWUL2q7yATzVzDyUBRtMzczpFMo7wlAgK6DU4cCCUqnk3mKEEkABQ9+yLPNQFOQtPjw8dK8ENocx3aewm9JtkScSNPBcI9CzXPz8gojRp4QEdyRC15ggtmLweyMhowPDBM/xFAgyhnCH+X9jY8N+/PFHT7jMSZ+xcw4FcadIowIzON4L7QEWMGhqYJ7IVwqXf71e9zPPESMLwIH8Zngf3FRgzoh1ub29dSswTqDgTTQpAjFLW1eVwTGTUTqIaCB65qWLWiiUMRYBGKVF/s6MF0wzWmz8mUEWhDDn2oWlBWARm6hAB/1+37Xbv/zlL67xDodDMzNnZhsbGx4Yjzgp0A2fh64blcBYYVnCdSSubrVaDkqQHms+n9vOzo4LdYDnLMscpE4mE9fQ7+/v7eTkxHfrc9wT5/tDDljlGxoDzHOcUjpUS1dQGvGRdSuq3OsYMG3jnsiQkBrHSOgwvSNWeDgc2sHBgcfUwy2YZZm7H0FjoCEINj76ltcllJ88z11JAt2aPcVLgz5BL1DeYdEye9oNDSCLGG4YHcDXs+zJzYlwBKxP8GV2f0I2ACghDhwxjey5gmft/fv31u/3PZcx2s5jz0pwkddL5y/Fj9ahaFt0LSpI5/v4O69Xs2XFEXXxmMHql2WZffz40brdrh9LXiqVnG6Q/rHRaNjFxYXlee55xnlMQbugLdAIywQo81DCoMghjzkrWLPZzLOecD1QpEAHiLHe2dnxjBM4hAdrCRkJsF4gh9iYAgDLckkNMQDi8F7s7++7QQzp1RA2BkzB86Jep0j5ijAbz+NzyrMsqFwU/KRATOraKnDDA8r18Hv4Pm4PQGq1WrWjoyPb3993ranZbHqs0u3trQ0GA3v9+rXlee7MLs9zt4zCNQ/XJQs7BhwQsLC0AhCDUJrNpm1tbXmIQL1ed6Z2dHRkZuanXHz48MFevXrl2hRAATYclEolT0F0eXnpQoFjpiIGwEQTaeRa1LWfEuCR5rsuzBKFmZzSFzN5ZlBmT8duKihQRqr14X06Njo3AG7IZfr1118707m8vPSUTLCADodDy7LHzUTY1ZnnuSsxcPMDnLLFCac2oU28w5nzSSIOFqAC7RkOh9ZoNDyGEGnWoMWDGWKc8R91I33QwcGBtdttB6AAKLBKTSYTK5We0gyxENcxREm5DKPC863zEjHRlypRyAKXqJ/cf6XFyLiA8VWQzvfxiWL9ft+VFcT9IYVYqVSyXq9nJycnS6fe3d7e+qY3WHEAQJF+D/l7ofADfMJCubHxeAoah1UtFoulzYC8SQWfzcz5JixmEMq7u7tObwCpfE464lEBTJAxBe3FxtuzszMbDAae4Bzjtbe3Z+/fv/dxLZJtZk9xjTwXzD/YmIPfeZ6Ud71U4fWlRZXDCDcUGUQiAwDfB1DV7/c9Lpo3CcGaXqvVPPsJ9oCAFrA5CfKcrbSgyX6/b2aPgI4NVXzIiYJbtJNjVFn50TR+m5ub1m63fXM1wCn6zZsEGSiiPuAPHmv0A20Fbd3d3bnnDuP117/+1TY3N/1IYxg6eJ4VH+icpzCB4pJVZWWi/iJi4cWnv+tnBU5RQzHhEXhQhh0tSFh+cD404kKxaxhxTyAUEBLM4GBYmHDspFssFm65xDMI3P/06ZPvRoU7aX9/37a3t5dSWcC6cHFxYY1Gw89Yh8ZUrVbtz3/+s5XLZfv973/v8VdZ9ngUGTSzi4sLJxYsKgCByB2a0mBTFiMWZspQWfBFQnMVQPgtSwRAlJb0ftX+zD5PncIAlIGRvpPHBzQNRQeWfmRogIUfJyltbm56jlszcwtqnuee7Jzvh6sfLiDkFwUQBh2zh8HMfJMWW4iwmSXLsqWUVHyyVJ4/eg0QVwjvwPX1tSeARu7W0WjkoIYZsdkj88apRPf390un7vAO/QisqZBGiYSZzkvEs9apcPuY1+l1BaS4x+zzw1X4OvPQLHtK0s88lYUnBBncnIi9B11B2dnd3fV80NicCkummS3xT6wHuGMBHmDtx6Y+0CzaAnckFH3Qn4YB5HnuXqxqtWrj8dj5KVyciFFtNps+pgwisGkLIVSNRsNDCbrdrn369Mmur6/t5OTE476Pj4/tL3/5i+X5ciwl6mcDh9JwJNsi17fKy3UxDLCVPBW2oHJf17jGJKYUR+4vUtldXl56TvPz83NXtJHirt/ve15fxImyhRy8DnHFaAunJNvf33e5DfpH+B6MVYxdAJD5EAj2BCwWC38Xx0XDu4Yxmk6nbmSDQoPN1AjpwvGoUOYAjvnQH47fHg6HvvmbDSeHh4d2cXHhcbvgGeiT4rNIyeXvrEDp/K5SrJ51kpQ2hK/p5wiA8u+q3asGpc+paZkJmd8FxrG1teXWnnK57GkVON4IrnZoJmCwENgwx3MC80aj4daCxeJxF/V8Pre9vT138ZfLZRuPx+6uRx/B0GD2Hw6H9unTJ+t2u5bnuR9l+dVXX9lPP/1krVbLer2egwrW5JmI8zz3TVeRFUmFMY8tE0ZKw+FrrK1H4CAFGF6qpEBIipkrgOdn8Jnr4I0MWm+kHDCtwsp+eHjodAihnue5x2oidgoniCFVWbfbtcVi4bs94cY/PDxcim3DxiMcBcixn2BwZk8Jp/HOq6srj9N78+aN1et1P4kHit7FxYWDErj2UTeftw7li0Fnlj1Z78bjsb17986Gw6FNp1MbjUafgdMIWEYCMPqc4jU698yE16FwW4viDaPxiSxq3De1dGk4CwM+WDiRfWFjY8P29/c9Jm48Htvu7q79/PPPfgoPQp2Qfg/tQ1w/3gO3K8AjjAXgy+C92PmPsBIzW9ol/fDw4Ao9YvDAy2Fxxe79PM/drYmYbcSfwgqGEByke8P6Y2sZLFwfP360drttvV7PZrOZ/frrrw4IeJMYC/bIaIB7lI8qX4osYwpqX7IwLaqs4HAiXFNwE8kl7qeOGazf9/f37tVBbtxWq+UHKoDW/sf/+B9LqaTAcxeLxdKmIbi44f1BaBIUIhi7AO7g+YFCBqs9+BzCCGazmbVaLQeZCH+B8o7MF9fX1x42hTHtdDoOUoFlSqXl1FbITIB1gd8Gg8HSUcBm5kAcuAIegul0aj/99JN9+PDB54bnSOcZ86TpxDjsgHmNeh6LyrPyoEbWpqhiBUjauahR0QJlYk11gBcmNAOc7IRzoV+9emXVatX6/b5bRufzubtp4EqEtmxm7qaHQMWAY4c+3AC3t7duTcIpEhDI2FyC67CeIpAejB3MH4wYp1z9+OOPvqnEbDmdCgAO+o08ktFYqkUV13nseY70OxNSikFEhLoORduiCyQCQKvoPRIqmLsUXUcWhCx7yruLeM79/X17+/atDYdDTx22t7fn1ncwzaOjI6cnaOBwtw6HQ2d4nKoM4BHvh4spz5+OOQXTwu59MHwwuIODA1fysDEPKbLyfPmc85OTE9ve3vZUb2Ce2JSAndHIWYiNK4hZ5THl+eBxV9rWuCd8xn8FoPybAtl1KbqmlPZ4LMyW129qParlIwJCHFbBGRuQygkbpsCDkAUCYBMb7CCcGJTgGhQXbAQBnaNtDLoQpwqgCJoFf8W9cOmbPcUN4p2gVcRTYyxgnIDllWNNeSc3p/EDgEcsKjbZ1Go135xr9hRDm/LaqJGF6ZT3FUT0wDS7LuBULcP6PfKOckmtXfyPlDNsNjYz63Q6dnBw4LwM9AXZ3Gg07NWrV/bLL7+4ZRz7PyaTiQNb3bQEsIf9IYvFwhqNhtMa5pkzTaAfqAcAFgoQGwR4AxhoAtlOmC7a7ba1Wi0/SABjyunMsEZ57LEGK5WKez9wKMx8PndlCwXeDIB05Fnncdf5Uw+Mzi2vxRQWjMpKgIr/ClD4eiTQUy5ks9htwc9Gwj2qC/dAm4f2g93E/X7fTemYfMR7Ioga74K2YfYIUheLhe+ihsUJG6+m06nv3mRGiA0CIBSOPxkMBp7EHwyWF9d8Prfz83NrNBrW7Xbtp59+slevXrkWh3i9s7Mzu76+dg1ONx+khJKa3nmsVUjxHEWgVIFa9Py6lFRfU0x91cLRMVLXqioELHBApzi3vF6v27fffuvWcezEh6Dn+NCrq6ulvHZZlrnyA6smdnHCRcoJyaFwwSIAmpjPH49ZBf3DigDFC5YPPAMvBVz45XLZj1tFDBZAMGK2OBtFlmW+KxaWX7PHU3l4I6CONa9/pVkFsVogFCP34brRq9lTu1LhJUWgBQIpNW6RMqqKFZ5HDmgItJ2dHTs+PrbpdLoUF8q5P0ulkp/qAyELdyrCPzAXHNeGOeQNKwyWYUWF8IUhgGP5WFnLsszpN8+fcm+Wy2UHLVDuAXYBTpGqivk3wDCyVZiZffPNN/bx40e7ubmxb7/91rLs0VIFbwjWF/MCVRCi+eO553FQuld6XpfCdIu5VvACOov6wX0vcgljnKrVqnt86vX6UkgfrJ7gwXt7e3Z5ebmkGJmZu/dBl3d3d7a3t+d8FLIWFnTQFitgsGxy6qs8f0pdZva0mY/5sJk5r2esgrAn7F3BCW6KkTAWwCOI20b9Dw8PzuOn06mv4XK57AcXYMyxEQv54d+/f+/rlOkR33nO+BrzlCKPTgojohQCVDAqaMRFjVGwUmSq50FW4BNpmzyAutgBSjudjr169cp6vZ7t7+9br9fzhLwgJgh3uOhx/jnCAsD8Li4u7PXr1x7MD/M3iBEWTPQLwh3f4c4HEL27u7NWq+XEig0pYP7oW6vVcivsu3fvfNNKvV63//iP/7CHhwePsXl4eLBPnz75aURINaHjjf5GhBAJZ9VYwRgxX2ppWkcBj8JtVfpEUaHNdBYtNtZqi4C+0izcRNDqv/vuO/vuu++cUYBhmpnHSUP4XlxcuIICSxXeAwaNOGjEtLJlFKl3yuWyx5OamVs94XpjpQlaOtz1XCfylQ6HQzs8PLRWq2Xj8dhGo5Fnl1gsFp7RAqeSIM670Wh4pou7uzvX0Dudjh+gwYKLrQGRJq+uJTNboltVmJUmdI5fuuj6iuiWaU6ZfWp9psaBP0OogQYQ7jQajazRaLhQw2Y68EdeD9hQAt4E2YG5h/ICsGpmbr1EWwAUIHsAEPjgCFg1QZ9QxOBh4OTl4Lmwjs1mM2s0Gh42g/ciXArrBgpjtVq1m5sbB+MAr69evfL47Ovra/vw4YPd3t56GsCUMo/CLlP+z3IuJRf5vlWC/rcq2kb0m0En7jP7PB2g2bKVX62tDHbYe7q9ve0bjTudjm1vb9vV1ZX1+30HrPD6gFdCIWblB56om5sbu7u7s4ODA9vc3HSAB9osl8u+BszM45RhzURIAJQlGMewEQ+GLdTFYDLPc7fo1ut1u7+/t2az6fGiHEt6e3tro9HIY08RPpbnuVv2kfrMzKzdbrtBAVmLEMZi9mg9RbgVFD8UVhx5rTJo5TlnemAli/nPKuv/syyoRVpMZEXSRYn7+LqCU647AljaJkwogtMPDg6cqcI1ORgMbDwe23g8tu3tbdvd3fUdo2Ba0DxgkSqXy54zD1Yk7JTHjjo24+NPd9CBSeK8ajB3aO9mtpQcF21CuwaDgX38+NG+//57WywWvuMbWhLynkL7j5SFiGj4t0hJUMuKulJTwj1650sXtRqrVqeFXcsoRTSM39UqgntRH+rGySSHh4f2/fff29dff+25Sh8eHjxoHTuPOTk/aBpMB3WzKwhniEOoc8B+qfSUrJmPkgQtg8EBoCIUBf0CCAV44WwY2BDY7/cdhIxGI2fKi8XCzs7ObGdnx5UwWFBvbm5sNBpZp9OxPM/9SFZlZjz+PJ9Mg/o59T1Ft+tokUq1GUUBTbRuua4I6KIeBpjIJV2r1ex3v/ud7e3t2cXFhX369MlDqGAlRSaGLHsM5ciyp5yMMAZA+R+NRh77Z2aeaJzDCiAAATyZx0IwwsoOHg1aQ4E1DLTcaDTs4ODARqOR0/i7d+98XcF9muePOTX5OFfORLC9vW3n5+c2m81sd3fX/u7v/s5PLWo2m0sAA0YDnQf0I/Jq6bziN/49uncdFSxdo/objw2HK0QKJ55hfACgVq/XPe9prVZzBRzxlL/88osbsuAhYks76BSfzcxl/ubmpnW7XX+fmbmcB73xusKc8qYoGK+Qbg8FFn/EhSJ21ezRiwswnWWPqdGQl9XMlsAsZxG4vb11QwTi+w8PD+2///f/bsfHx/Zf/+t/9fjw09NT33MDowI2raK/zWZzaZMu5jAyVLFBAf/VExAp1EXlWRZUfmm0cKLFkwK1q6wXCgC0XhQeJD4VB0eKHhwc+MCAaQ6HQ7u6urLFYmF7e3vOhJBGAcml4coHsUCbxs5rZqbQwLIs87ykrBUClPKGK+w2RV5IbB6AS+3k5MQqlYrd3NzY+fm57e7uWrlctv39favVanZ9fW2j0chjwZg4dA5UIYgWvjKNaJ5SDDSlOa1DYTrSVC76u9lyCjEzW3qWrSEoukj5OmL38B3CG1acVqu1tIBns5n94Q9/sDdv3riCc3p6ahsbG74RCfR4dHTkYQGcXeLm5satAnmeOx2jb9hMADpETl4IUlihwAihkaNOHBXZ6/XMzPzEEqyPzc1NGw6HVq/X7fj42Ont559/tr29Pdve3razszOvF67iarVqP//8s1swzD6P7Y1oi8EY5ovnSD09fE/q/zoVdXFGwp1pmEGqWubUghGtZ1zjAx+2trb85D2ETSHGHvSHddDv961UKrnrHBZ6CNXZbOYhH3D7cx+wzuC5gvU1z3P3UnFoAK4jtRXaXSqVPI1QqfR4oMDBwcGSNRgetpubG3v79q23B4AZeS7Pzs6s0Wi45woxsHmee1aLg4MD39gCEDQcDpeEMs8PW5yVB0WGg+i/ztk6lRQG4N/4dwYtbARQoMpjApprtVoeP7m5uekWT/A5bJKaz+d2dXXlwA/eIGxYgnIOgxSUZWQEghcIshYgjsNAGPSC/0LewwPw8PDgfDnPc1fYUO/9/b1vUoKRodVq2atXryzLMm8fLMKMUfI89ywEyKOOsADw3PF47CdMYac+vGDdbtdPhPv222/tD3/4g41GoyUMyJhCsYR6uvgzMBMb9yKcp6UQoLLALrIuRAKEAWRkdYqEDX7jaymhw6buLMuW3EXI2ffVV19ZrVZz9xFr2Dgu1OwJQMB6CcBp9pTXMcsyt3JBoIPwONlvu91eyuMIM/qnT5+sWq26hgfrL3brz+dzPyYV1uCPHz+6y2l/f99ms5ldXV3ZdDq1g4MDOz8/t0+fPlmeP7mkImtQNP48B6vc9tHcRkA2Bdheoqh2HoFvvd/s89hbs88VNYwtM1B+TsMEsKHi6OjItre3PZfpycmJ9Xo929jYsDdv3tg333xj4/HYPn78aJVKxY/Kg/VzMplYt9tdip+DNWqxWCwB0oeHh6W0acgwASGPnKYIWYHbnpUlWDqh+QOoMgDH2FSrVev1eksHTEDpg8UNghlAot1ue5tgjWPXV6QMqBKBokoFz6kyTH4mEoQvXXT98nWzz9ecWiaUJqM6uB5cBx+BAGs2m06r4/HYzB7dgnAr7uzsWK1W8xQ+8BbBo4O5xgY9FNABLPIwLiB5PsJHkKYKPJmzlXAIAcAkjwXOV0eKM6wlpOuDVQrCHlYkAFKEcSGk4OLiwr0GR0dHbpA4Pz93QIMQHnjpsClG6ZC/4z/okOdGleWUbFwXnouSWoP4LRoD/MaySsGpfke+UJwMCbc+Yk3h7cG59VmWeQ5pWAVHo5EbrWCJxDwgjM7M7Pj42HM+4wRJs6fDfLB2EA4IWaxZd+DVgpID7yn45e3trf31r3+1n376yczM+wUZMBqNfHywyx9eKhg+YIzBJtz7+3v7L//lv9jW1pa9e/fObm5u7JtvvrEsy/wI6sPDQxsOhx5Wdn197dbYKAyF+WYRrbIMNrNCgBuVlWmmirS0lKCPgGgkWKIFq0Io0i5xH6yPh4eH9vbtW49vAnGenJy41crMluJG7u/v3YKEtgKAXl1d+Wk+iH8CE61UHk+tADA2Mw8NgODmhL1Is4KUPMgNCbM+fuck68hI8P79e7u4uDAzs1arZcPh0IbDoe3s7Lh1DTtodfFG8xQxBQ1g1nujuUxZnNZJo9fYGS5Moww41bIU0SpvemCBqLTJ9ZTLj2ea7+/v2/HxsX399dcO2jDv19fXDmZ3d3etXq/7+8AIABQRJpLnyyEoYJqgQZyNDs09yzJPl2NmvjMVjLfdbrslAW5Q7OaEQre5uWk3NzeW57lvmAKzub+/d3rEeeTX19fW6/VssXhMsdJut21jY8OPJhyPx+6O+vTp01KuVNXCde6i71qYEaI+VXrVUrkOJVKoonUcWdg0FALPFgEa3IfxPzw89Ewiv//97/1UHMSsHR4e2u3trd3d3dnFxYVtb29br9ez09NT56XwLkEAj8dj53dm5tbQxWLh1lTmi7zbHnQCOsrz3HMAY44xjxDknU7H9vb23IrL1ljwZ8TeoT5kdGk2m3Z/f2+dTsctq81m09+5s7Njl5eXtr+/b81m05O+T6dTe//+vRshWL7wf/RfARnzIuW9akRYN3CqfDC1QUzlSOQpYOWLARLL3O3tbd/IVKvVljxNHC86n8/dos2pG6Fowe0PRR0KNOKYLy8vfWMcDGNQlrAmkOIJafIA7La3t5fCObLsKYwAoBKbrm5vb+309NT+1//6X34UdqVSsb29PavX6+5hhbyHBRUeCtDCeDy2i4sLV+ra7bZtb2/7Ju2PHz/a//7f/9tB7C+//GKtVstDXEC3APaYR6U1ln8a247+w8CC+9mLo/guKn9zHlRFy0o42ij+nX/jTmnn+VnuNAsUaKuIfyuVSr4zf7FYeKqPu7s7N8/jN7U2QZOBJg13Psc/VSoVTyUCRgfrKqc74RQ+DGjAyJAeCBZfpJlC3jNYVLPs0Sp8dXXlZwrnee4xVIgN2drasm63u3REazRvyvBALKrFRISjwlIZjL5vHUoElCNgqpoeP8f9ZHc97uc6FTSgbtaYzcwT3EMZQe7Q7e1t+8Mf/uAg0cycxs2WY1rVzQkLE5hylmXOSNmtUq/Xnbll2dM5y1DCwOjhdeAzn8fjsVuFQK+Iq4UwBhCH6/6vf/2rW5vu7+/t/Pzcg/5Bz41Gw3766Se32oLmtegYp+aMw2t47phvRYrUOln/NTQHJbXeVhkRihQoBvCgC8Txf/fdd0sbgq6vr202m7lVZrFY2GAwsMPDQ2s2m27RmU6n1m63/ShebFaFUGehBkCKz7BwIaaUcz8jcTjAKJL2c8J+M/Mjg3u9nrXbbXv9+rX98ssvtru7a91u1w8EwFjD8oU0QhcXF0t5sy8uLjy9G/p9c3PjoS79ft/u7u6s3+/b5eXlkmWZ6TEad50/VXhVmCt/WheaNYsNH2gjx+OjsKKiSqP2k8cKvKZWq9n+/r7t7u76JmYc8MA8GLGoHz58sMlk4hvaABTB+/i4XsRAmz1m4EHS/MlkYp1Ox+/l/iIkRQ1gzI8ASqEo4ehVhJu8f//efv75ZzeE9Xo9X18AfThoAhtZsWZAg4vFwprNpl1eXrpnAoogPAA4SvXy8tL++te/2rt37+zNmzfWbDYdHGPjK2SEYjzGDyoXGbAyztMwgVUGrUKAGoESboQSVUR8SnTMdFMWAq4jsiSUy2Xb3t528zcTwsbGhp2fn7umBOCIBLpZlvnGEx5UxH7AWgpwwOdB6+kScM3zTn9YIHA+dZ7nfloKtBHUj76Ymcf3IR0VgChi+cAwj46ObLFY2Pn5uR0fH1upVLLr6+slAkpNPDM01lhTDFSBbTRf+sw6FWXgHEuqbY0Ed2TB4FhWCFm+n5UoLES4mfL8cac6QjlQYMlBqp5ms+laP2KoQLfoB97HeXc5MB+MXMNs4LLkuGi4tnAiCtPxfD73nIGlUsm63a7XA2EMpY7XyGQy8dgmnCoEC4fZ4/oZj8c2nU7t1atXdn5+bp1Ox3788celfJhcmLFhzlDU4pJaCwxE181qiqLrqYhPskKk9yoPjpQqplv2EJXLj0c3b29v+5GHEG7ge+Vy2WkSIUvwLiGMZDwe22AwcFc+8xnOLQrLFwwIyMWrlnn0Bc/yWgB/39rasr29PTs6OrKvv/7a9vb2bDabeTzhbDZb2lENXg/Fr1Kp+KlE29vb9t133/m9CEOAUgeXMLwhkEF7e3sefqUeGnWZ8pzwvKToQgsrGS9ZovWqntAIxOp1rgfjgTHjkBBsMO10Op6fF0oOvKY4jWxnZ8e63a6HY+zv79t4PHYaZy8UNnDC7Y4N17zXBBkBYICYzWa+gQ6xz4hdfvXq1RIfBiYBz4dC/uHDB/vzn//svLzZbDqfhHIEyz/qgREN2WEQ0397e+u5hhHeglzuGFdsgry+vrazszPHVAiF5BhxnhdVaPm3FPBUmi6icy7PsqCq4FViY4SsjY3q0GuRUEl1DkIXLkmc742dw91ud2lTE57HJLNLh8/d5T4ApIJpA6CWSiUbDAauIYMZgmHjxB6kAtrZ2XE3KO+qZgss+jydTm04HLrbfm9vz/I8t48fP/quw3a7bZPJxP7whz/42HGONRU80VhGYxr9VqSpR0BM5/ulS0RDGqCt45WKp2EFK3oOv+kYYYdpvV633d1d29vbcxd7qVTyDU739/fOpLAZBa4bZKDgd3Je0yzLnK6guUP7ZxANiwNbVSGwOYQFlivk3kM8tdnTgRHwQHQ6Hbealctlu7y89PvxHjBbeDnQH1g5OL0LUv4w4+P5Uz6BccacaQowXI/40/+LNv9blYimtO0pIaDWilRRmgY/gqLSarXs8PDQ9vb2PA/v9va281XMm5l5SEieP1rEEc/M9ImclMPh0PkweLCZuTLPyj7iQBnccqgVvwNthxcAG6M6nY7Hp8Izgf+IXcV6xHghzAXxqwwyYQlEnk3s8AeQbjabS0pgStFlL5uCU54fpgfIGaZlVT7WpWi7UoC7iNb5OaZ/zA1ooNfrOUjEJk9Wcs/Pz11xuLu7s1evXjkPxIlj4GHghVBWMOawknNKKMSfIgwrz5/SlJmZnZ2d2ebmpr1//95DXfgAFd7DgmcRxlKv121/f99++OEHp6fBYOBHWWOtcHgf8EOWZZ4LHgYMtBl0e3d3556Er776yv7jP/7Drq+v7fj42Nc7rKy88YvnJeWRinBfdN9zaPbZAFXjuLgwUGVNoUhDigBtCsSqdRAMqtfrObPp9Xqe5sbMPLaPXepgNGDeYLIsnBE3ou2BhoJYFdTBoAZaGE57AAFiIcF9AIsUFgKsZbAI93o9q1Qqdn197UyXxxV9gSYG4c9jVjSPyhQ4TooZX7TJQktkuVmXEoFItZpFQkKty0o3avnQzwzeQXsIH6nVaktH2HGCcDNbio/DxiIGWqgT84254xOY4NYBc72/v3erGOgNDJvXJ/oHd5mCYMSzQtmCILi8vHSlka0MfFpLlj0mMQeY0HE9Ojqyn3/+eWn8VDilFATmNfqbxiJjzUa0vU60i8K8Fd+jNcmKYtH612cY4GdZ5jkke72eff31164U7+7uWq1W81hLbOhgOgWtsIKPz8y/mOfCc4Sd1KBXbAIErYLvwlLLscoQytjkB9coThVcLBZLsbBoz/39vV1fX9ve3p6f/IfY18lk4kAAa45Tv6kSxfMDV6u6tZmOFbhFQDOSjxGfx5p/6aKKE/clUvpTyla07nm9IqQDStTu7q6ZPeb4xDrAH9zbHz9+tDzPlzaxYVPfeDxessrCyg+Qi/cjxATfMe74jPlhXn13d+f7RfhgCjNz7xV4K4watVrNvv/+e/vmm2/s+PjYtra27ObmxjY3N22xWCyFD7CyhZzSiJ9G5gm0DR7acrnsVlUYG5BGEzQO5U03STEPZnmqijEbGiJwCppY5cV6FkD9fyGuIk2/yM2K/6nnYZXq9Xq2t7dn3W53aTcy7gcDBfOC2xNWJiTVhRCH5oOTmyBc0RcwSsTl8aRAM8FubQBKZmLYBY3nGCiAGNj0D7co7scZuw8PD3Z9fW1XV1euMULT4/GP5k7Hla/x+HPfcJ0JjeeX5z0lPF+iqJVBQxhS95otCwIwJHxPWatxD94DoYpjQaG0IAYOghjWHVZ6+D2wVvE8Ic5Jg+9B37wRwMw8By8EM5gc3O0oeJatO8iTxwIez8Fq0O/3bX9/30HJfD73gyjgCoWLFZvBOp2Ou0ur1aq9e/fOT36DpSOiU/4eWUxTgh/X1AqgSsU6lIgelTdGfBi/a134TcENCxUksu90Or6ZApZPHLULEDidTu3jx4/2+vVrt4SzolSr1Ty/43w+d+XIzPzIXjNzIIrT+szM4/KwPgAu2ZqK+H3wdihnOM6y0+nY4eGhr3vcB0sZXLqbm5ueb7rVavmuZc5CAQvXbDaz8/Nzj99j2oPFN8seUxlqKArzRlxTq77yW8xxxKd1niNZ+hKFQRoblSK5ju+8NjUcIOov5hmnO0GhAGAD/wKfg4fz+PjY3r175x4BWBt3dnbs9vbWZrOZx6SysQphJtgpr4XlBOQ32suKOHgqxoDbyQrdzs6OffXVV/bNN9/Y69evfe2Mx2NPp4XsF2yQQGm32/bNN9+4ZxUKFWQGvGKQFQDFCJcYj8eW549xuxcXF+7l1VhqnVeMBbCSKikp+bqKbv+mTVJFjJKvRZpUBGK56DOscYPg4ephMzyEKFLw4BSP8Xjsp0gAdPJOeySaBlCE+wrnNXN+SQBNMGn0gQkZBMOxTNAi0D8QJVwLqBf1nJ2d+U5RnOaAvuH8coBzBEH/+uuv7h5jYlDXZRRCkQKgEdPQedJ5Xcd4vlXWCzP7jCmqcGAAhDp5XBXUow4k9UY+3m+++cYTPkPR2djY8LPowVDAIHinJt6DOCS8AzRm9hQoD8EKpornAVjRXuyUxjW2ePLaA+CAC4zpF4Dl4ODAtra2bDweO+gEOAV4gSLV6XT8tBLUA28HxwjqPDGgivhHiqfwb3/Lsy9ZdI3pdR0X/p0FIFt2igqs4ZjbH374wb7++mvrdDp2c3OzFOaEDUj1et0Gg4GHIvF7oAjVajVPHo5d9ABzOFEPIS7go2ZPR0bDuqOKFIQs2g23bq1W88Mg8O7JZOI7sPP8KQc1wmR2dnY81dnGxoYfhoJ46Sx7sgJ3Op0lno8xZxDLeTQj/sr8Ql2kET+JFBXwc63jpYvySAWqZnE+9FRhYAcgVS6X7fDw0Pb39+33v/+9HRwceGiFmfkGJ34f5rvVajl/Qoje/f29h6XgGnt3AFLNnrxbZrZ0DDSwCYxdKFCcYDyDJwk8FlgGNLyzs2OvXr2yg4MD63a7bhjgjYSo08zcmwXaur29td/97nee2hK0OhgMzMx8lz6APNYPMhwAJzFNIb0cz7EqxXw9pSgr7Zs9DzM86yQpbgRf58mMGsrub3b3R/UzIeo78DsTKI5KHI/HfiIPBC6S96K+4XC4lNCZLZtwHUHTxm4/7LQGKEW/wODYWgSTNpgeYqxAmGBiWZY5CMaxZUhxMZ1Ordvt+iYUABx2C2D8YAEAaIC7ixmajrUKPWYUamVhwkkxEKUFvf7SpWgBRQBAxyBSqkDvCvYVrOIzmMu3337rghZAALsjB4OBn5hj9pS/FBuVHh4e7OzszK0BsCAxk+MTUfAerDe462GFBf2CuYIJ43kwc1YIEbeE/rOiNx6Pve2DwcDzEgJgQJhCEVQlCm41nJiCNC3qVtJ54jXJnozIpc9KVKSMrQvNokTKotlTJgcdj6hfCgy4XhQAAIxdrVazarXqXikzc8s3W2t4V32/33ceybFu7BWAMgSDAHgt3OxMFwADOL6RN3VA+CNUBpYf8HDQEayZEMR80ARoHgUCHzwYLmBsPsG7sZbY8wWZ8+7dOxsOh9ZoNOzi4sL6/f5nu7wjZT4CaRGP0nnjUDq+9tJFjRuMAUCnGlLEFrkovhb/UR9oqtvt2uvXr+3Vq1dLoXqQy+pV1Hh98Ip6vW6z2cz/I7cu+Ab2lUBZ57rAU1FnpVJZAq686Qhx91gXuB/xzs1m03knMhMB3CLsEGmfIBvyPLeLiwtrNBquWMFzgdh/ZGPhEDKzR3q5uLiw//zP/7Rff/3VNjc37fXr196HwWDgdMz0piA0sqgyLbDcLMJ3qfJsCyoTChd2nfLLIhAQgR3VqKJ3gDFWKhXf2dZut+3w8NDjRcfjsWVZ5syJzyvHrjucJMJFj31EUmm8E3FJYGJYAHBTcYzVaDTyhON5nrsrwuwpvyCu53nurq2rqytnshzjYrZ86giY+2w2s7OzM+v3+zYej90txuPFY5kSxKn5jAhNv/Nz66TFozDNpTS3CMTiGR0zrS/6zwoZ6AT5+WAZR2xyqfQYZA+LS7vdtjzP3fo5GAwcvIJBYSMe5zwF04YQhiuKNykhxgkgA+0DjYOm4H7N89zDYHq9nlsaIKThEoPLvl6v29nZmW1sbFin01kKT2AAafYUM1sqPSbLxg5S7KzGJgEec1VuVbkCGOF5ZJcUrikdMw1Ea+Gligpqs883G3AoCARxyggQeQnwH0pLq9Wyf/mXf/FNfTg1ihUasyc+uVgs7OTkxJUR7H6HCxZKFXZQq0Kf57lv1ND546ObkfYMgnU2m3mqNligsAsfnwEiWQ7Aa4b1BZCgBhRYZ9EeBrQ3NzduqZtOp/Zv//Zv9v79e2s2m/bq1SsHpTc3N3ZycrJk5WS5g3FHvxWI4beUbIxkq877SxXGA6rIM1hlvsD8k+tRkI557nQ6dnR05HwQh0Pc3d25Io01ATc2wjsQOgJjQZ7nXge8Wt1u16rVqjUaDbu6ulqiD9D67e2tg1VW6Dn8DxukOWsPFCq8E4oT0gvC49vpdBxYIpQPnllsSry/v7fhcOiyoN1u+/gjPGc2m9nFxYUfB4vfZ7OZH619fHzs2VYuLy/tw4cPfoABxk/5K+aU+VSEHXB/CkesKisB6nMqiQBKBIZQlJFGgFSfgzAF6ANDwq5JBAhDSINYkFgcmhdblMyeXAIcDIz7YPU0M7cWId0JrA24Dsvpn//8Zz9VB6B4Z2fHiQx1zudzu7y8XGLsap3DIh+Px9bpdFyrQz/B5CGkoG1FViImCAVdWpR4mNlEgf9R+MBLlyILhDJKvhYB7ZSCpYwY/8F0arWa/eu//qsdHR1ZtVq1er3udAf67HQ6dnZ25hZJto4fHh46AAVtQjHCXKAPHPIyGAzcWmX2lKcXcwTLLFu74HZttVqejB2b8XjdmZlvGMFJWMh5ifczHWNtgYkPh0O7ubnxpOZIuTWZTOzk5MQVRvRR5wCFBRjeqeuH6+A1ENHruilYZulwEhUQKLrm+Zr+Z0UFh510u13b3d1dSuHE8cZmTzvJt7a2POMEeN90OnXBBqULR0ODhrDzHQAMYQPsvoS7EWlyOMaOQwNYDgCEqLsVihXGAwYIxBzChblYPObOhhXOzHwfAGgSceQQ+pVKxb7//nsHNDc3N3Z7e2uXl5dm9uQGTs1tNC9clD8pTTDoW5cSyZrIWMC8VrGCglmzp7Cot2/fWrVataOjI/vhhx+WrPQwKMGCCJwBGY+2IG4VhiTwoHL5MRMJ4uf5kB18hjGINwZy+j/8hjXC8bIAibDyI/8pftve3vbwFM4UBDo2e9oQCDmDNcx9QJgYACnLAuCccrnsmwnBOxFr3mw27erqyteGzmmKn2CeIgNQZAx6Dt3+P+VB5d+ixjOhReiZG8dgVTV8vhdMC0Hui8VTwto8z911Op1ObbFY+FF30OIxkWBWYI6LxcKJ1czcPQ8GBxcnM7V2u+3xH+y2vb+/t93d3SWLLbQPju9DjjK48BErxW7dxeJxYxQ2dX348MFzpLbbbRuNRnZ6empZlnnMFsaLNdTIpcL/+X4eb51H/FdNKnIlrkNJ0aEuEgXzuM7PRto//1fhwbGecDuWSiWnC1iO4E6vVCp+ZOL19bW7WOGyh5BX9zUYM9oABgOmjHyqmDcwaihUsHCC5pDHD+5euFIxz7g3z3MP2MdpVXCLcdwpslpkWWYfP3609+/f+25U1L+xsWEnJyeeRJstKzp3PB88RxwzBtpWGuA55+cwlmrleumiNBeBT7PPrRZM0xryo9ZkjimGsru1tWX1et03kWD+8G4c62xmvuN/sVj4iTyz2cwODw+tVqstWcPhys+yzIUpLJ3gsRy2gnYiTo7HBbwfghX9w7qDWz/LMvd6Yf2wpa3f79vx8bGZPSp+4/HY2u227/zGcaqcixgeK2QJwLvhCkYObh4npl2VdZHgNzMfD5WrkcKyLoVBivJEM1tam7jP7PN8qErrWAcwDH3zzTd+zOz+/v4SbwQ9QemF12kwGCyFc8DSjQ2ryJdaLj/m/8VGPKSzQnw91gNc6HCjo4DXoyCGHxgFsfatVmuJhsFrNzc3/TMMEwg3gZcMfJXDvsrlsl1dXVmWZR4iYGYuBxAOyftozJ5c/qyIXlxc2OnpqStYrNgDP0VGH8UXTAMRH8N8F5W/KQY1hYaje9FoXYgqcLQzURtgyu92u3ZwcGBHR0d+EhSsmjhqDO4pAEZo2Xd3d8508S6OPRqPx241Ve0bjALxKWbm5nkw2uFw6BYDEBcvCABhHJvHG1iy7OnkHwbkk8nELi4uloKccRIP6sEYcCxIZEVhxpCaO52riBBRn35eJy1ei1oiWDlCURdc9HzUR6Zj/EEbhptmPB679RRAD7vZr66u7OLiwuOPIfRubm6cCSL5Mgs81APGDWEMcIgk0Ogb3FLQ6AGQUQ82imAtIuF6nudLFgLEKCHpM294wbs4NyofWPHq1Sv73e9+5yADfQcjhHVCmZnOYSTQFcTqPGKsWKGCtQ3tXica1nUXKVy4jz/jd41ThGLJQh+0ir/5fO5hSnCbMuAbDAY2m81sMBi4goxUf/f399btdq3X67l1E9dZsJk9uc0ZQKJ92EgF3q0Cmb1d4JMAtbgO+oQb3+wphy+ssQgZA1+v1Wp2dnZml5eXfixku912CzHLDMSJA3hgDQEQwZChliWzz2NFUzSXomc2PkTGn5cs2o4IZOI+7h/mTUGQhrFkWeaWxDdv3vg+E9AAZ124vLz0vMyj0WjJQo7z7DFvWfaU+gybhvh0yGaz6RupkIYJa4PBOEKqYLlEPDN4Y5ZlHmMKrGH2dGwrx1JjfLAuoDC1223P6Q7++n/+z/9xmYO1B94GzxWUPN5wDS8arLK3t7d2fX1t7969s7Ozs6WwQbNlEIr+sDGA13hkyMEYRTgwVVZaUBU9R/eggQpWIiCkzJSfgbDlATB7yhcGLQcxUwjeB9Hins3NTWc6AJB5nrs1iC0LHGMFQcngFBMIqxS707FBZT6fu6YFQI33IBYE7cNmkO3tbbc6sZbF1o/z83MbDoe2v79v33//vTPL8/NzH9PhcOjMMLICqbBWAmJCwWd24xcxWNXkUyDuJYqGOkQAR/utCybSEnWOzJbHGEyu2+16PkkwHwhFhKB8+PDBptOpff31177xCfUhxgib6rLs8YQpZlzsDgeogHseISGoD/n1OGYagBYbUjhnLwt+HsvJZOKun1Kp5Dkx0T9sUoRVA9ehTLGlY2dnxz58+GA3Nzd2dvb/Ye/PmhvLkjNd2DfACSRmjhGRkZmVVapWS6aWtVn//z+gvpFaJtNRlaoqM2PiiJEYSAD7XNAex7uda5Oh099XgQu4GY0ksIc1+HJ/fVi+rqzf7/tzdZ5SSjiCT52vKAz185RxHGXCJlAKfKT4WNd8BLEpXo4yulKp2PHxsX3//ff2448/2tnZmQMsZBWKdGdnx71OV1dX9o//+I9Wr9fdU4r8Ja8aftB1A4CEuBb+013E6v2nn3hXNffPbK3A2XCIYwGCR1erp537HBONtwnvEkbc0dGRA1Et2Ua0QQ2+PM+t3+/baDSym5sb+/jxY+GgjJQih1KyNs6T6jCdz9Qa+NYU9YkawWX4IK5zNa70t+6gbzQadnx87DoZvQW/krqCY8tsnbKhB5HAs+Rv1mo1T9XQEDrtBF8gH+FvfuPAAoDu7DydzEZuv1agAGCjM0gJYDzwwjMetBUPMHtjqKTRbDYdiCpfwbvsMRiNRlav1x2nmJmXUfv5559tOBw+K6sVjQs1JHQ+y2RMvL/MIRnpv3SSlL48lfvyUmguIm9lRhodAYHuCt3b23OvDgIDawHGI+dU3dC8T61nHSgFZIvFwu7v790DmmWZe72whPSEE+7X3E/dLa2VCfT4SO4l4R/rD+tsuVzaH//4R5tOp/bb3/7W/tt/+28OdHu9no3HY7cQqVVGO1/yRNNv5iqGRlPhp9QzIkVvzSZQDDExD6kFVAZ+yvhbn0W/FdxnWeb1cM3MDSnmGSPs/PzcLi8vXWCpt0l5HIsdw8jM/D1smhqNRnZ1dWVm5htXogJTRb+3t+epIVmWed6TepZ0DBG2i8XCd6be39/byclJYcMgOX46D9Vq1Yu6m63LCP366682HA7t5ubGNznSv7Jwu46/9q2MB6KxG+9V8PS1QvP/3xQjT6nvoSiblS/j90pcpyX3SFcivw6Dy+xp/LQe6Zs3b7zYN2OLgX9/f2+9Xs/L2lBTFQ+o7nTGcWC23sikUSuVrZVKpVA8HOXNBhjmGr2gYAVdQx70/f29h4xvb2+9XZrXiidK05pizqF+NxgMCqkqOpdxzqCYdhHnv0xvRtoE4yr2K4LNuGZVFylpn/U7jrDltCPkLdepUUMeKAa3pulRmxp5Rok9DBz1MCJ/AZikwGidck33A2wCQBUL0E7lcUAwmAM+ptSepvzlee6GD06PSqViNzc37jXmOakDXiqVim+8mkwmbsjpWhoMBn5gSkwr03FWfo7yJ+K+FPb4Wn59NcQfH5QShCkGSzFkyjNS9h6zdT09FB95G5wbqx5OSE+C0EEEAMCIZkVgxQRRGFe9E/RHQw95nnuYnlIogAhoNpt5mQcdGwoME/6H8RHWf/7zn+3x8dH+5m/+xn744QcHNrT57u7O8jy329vbQt03tWiiUlKwnjI44hzH+dR7leG5dtM8UNHbGa28OLdmz/N0+Yz74jik3qkVJhBUGCpYvYPBwIvna+09NXj03akTpVDuZms+xouP1yIW7Vc+Vp6Pc8czIAUEhFjN1oX9Ke6ON4m2cx/jwhhUq09ltljXbJqij9r3lPcoKjwdNwVmKVCm7WLsygySb0Vf047Iu2XPUeAT1zsKUo1U9Xoy7ihuZI1GfOAhxp5KK4TUFWTC48vl0iaTif3888/WaDTs8PDQ9xfQXvKuzdZ5crpZUNcBJwPRTu0zvIqTAxBKvixRLDavsMFENxGarWU7z9a82MlkYre3t/anP/3JNwvG8oBRFkdDuQzAcn2c89fm/luQylyz57pGqQzQaKRWnUhm5kdGU6+Tsk3KE/BjPEd+Z2fHBoOBbyBV+cka0Bq5tVqtUK8ZPa2OJ+QkZf7AGEQD2BiolYW4h4gDUQCqAtAPxRGkwCDHAdScHjWdTj1FDJ7W9C7Gm99sVlQ8NJ/P7erqyj5+/Gij0cgdZdwTjSadX9qciljRT8WIZfIoRV/tQS172Eu5MKlG6zNTCzUqUXV/UwKi2WzaycmJ74QnpEioG6ufZzJIeHF2d3f91BAsK/JYsfQRpljLKFcYCoakT9y3s7PjNdUYH4RdnucFy55NT41Gw8fg7u7Oer2eHR8f2/v37wuKYrlcureKQv6qvFNep9QYR+smpaAjqOMznWMFqzp/m0CMefRAmK29iXGhxbFQD3iksrEh0X1vb89arZYLUi1R1mg0PMyCsFAjC77SHcnsKoV/aB//a+UKTRVQcGm2ViJswANUVCoVLzWlO5+Vx9VI0pA9m7/YHYtwBnirl4nPd3aezpb+8uWL5zaqUlFjAVKjMfJwNEbK7mdcdc7i35tGKaWvYCDFn3Gd6r30FQDW6/XMzLz0TLPZ9PlnzjFOSGmqVJ7K6rEpBJmLDASI0g48shoROz8/N7O1AYQXSo0TDc1TYodn8E5O2QE04l2iHWog8q7Dw0P79ddfrdvten1JAAWkEQYFp4zh/f29TSYTG41G9unTJ78PGa1jHmWrGsL8nTIoFBhE4ytlUH9LinxqVsQA8fsYJk7hAPT57u6unZyc2Pn5uZ2fn1u9XveSUmbrFA/kyNXVlW/G1MjU999/7/U9dd8HG6DU+N3d3XVHguYVI5O1XKS2Q6O8mmsKlgDsYkwBUBuNhuMY3dCtOAisoiew1Wo1a7fbBcMJhwG8H/ljsVjY9fW1nZ6eeroYckDxC1TmtIlzr7o1hUtS975ELwLUVO6APlw7nRKIKWBUBmZTwKlSqfiuNjymAAAmkHwoduOR5M9ztKzT/v6+l2gys8LkIQSjQKEdavWw2zWCCrwP5C2h3Hku12dZ5qc36EaryWRiX758sbdv39rbt28dDAM4Hh8f7ebmxsbjsX38+NFDtSQzRwbSuYgU5+C1/6MQjIDgpXd9C4rggzml3Qj7GD576X9ILXZIDRWzp010JycnviNTC9dj/aJ04ScVSCx0QCabkdSCpx3skNbPtf9qtef5ejMSXi6MKzwBZmslS14W4V0E+uPjo11fXzvghhd5L0IqphogUzCwyHcajUbPNvKkAFj0+MYQabwn8mhc32Xv2RRKgZUUWFVPSRlIj3KZ/09OTuzNmzf2/v17B6QoZQ2bs0kDBckRzdGxoPOonmottacbszjFRz30qpgBlQpo+BtATI619ksNpciD8/ncTk9PPfql0THdYFKpVNyYA8gCaPC6Up6NmtZRcce55HPd7KVtVIUe5W78e5OcAnEtRbkb+5lalynScmd4P5XYh0KuO3KNsmMqgwCteZ4XiuEzz8hKoltaTrJSqfgGJcAxshMiTE8UAh0BH4NpVG4BXLkeo8ysuH+A8dO0HI2eKi9QWYWUFd6nqQ6np6ee/gAons1mdnl5mcR4KmfUqErhRJ3zyCOpv8vo1RB/BCKvARl9cdniUYtQGVMtRH4jgChgS94FuaJYV5PJxHOeAJI8A8vfbM3suN0ZaMIGw+GwACjM1t7R6XRqZuvTR+gfz9L8PfKsNK8PBmRXPjtgyWH58OGDtdtt++GHH8xs7bFiTAmvDYdDBxCqRFJjzL0pAZEKE74kXGHGFEUg8K2pjAdT4WGu0//jc/g7Gl18jnKr1+v+P3OGt4fNRfP53CaTScHryXVxUfMceKjf77vyZO51ox7XIdA1J1s3eJiZh17zPC8k9fPbbA0I9QCAarVqHz9+tN3dp8L8rDn1gJEXDb8ARMiPqtfr1mq1fCOAViiIc1c2R68ZylAE7GV8Wsbbf23S/kVj2ex5HliKX77GqwZPvHnzxh4fH93rAlBk/vAeTadTV/psyuv3+3Z8fPzseE+z9YYiwAMeUAqC0/7d3V03WJrNZqEiBGABOR5TrpRPqWaSZZkfdUq4lnAq6wbAyRql0oYCapQ/1Vsoyk/fICobXF5ePjsw5SWei0aEhrYjwHttHjeFIjZA9rx03Wv9VQcRsmg0GnkUlXxNxpGqPMvl0o6Pj52HJ5OJrVYrr5uulUwwFjQtD7m2Wq08DYDTHw8ODtxbir4HgJqto2DgED2wR8P7tBm5iJeUUpQAWg2T67hqhAuwzdjSzzzPvb04KBhvahf/x3/8h/3hD3+wP/zhD55va1aUnfrsVDifz5Wvdf4Vp0TZXkavhvgVNevD+S6+MPXyaClB6omKVhbXkuR8cnLiII9yEVm2LopOqOXk5MR30jEBKEwUqJ4igoUCk3AvNUzpDwJ8sVjY6empWzqacM1nLCgsFd1RNxqNbGfnqbAuwnJvb89++eUXq9Vq9sMPPzjD66aWyWRid3d3dnd3Z9fX114SIrrio0UDwSwaHk2F5l8CmVHxvfScb00RaJdZ9v9fjKnU/XzPzn0NF2GAwGdsMgK0ctSeCjlAqG5+Q1jBG4RiKpWK7zxtt9uFduOd4jM87rRfPVNq+WuInvUCH/7rv/6rvXnzxs7Pzx20Rg+EthkBPxqNzOzJuzwajTzFRUtN6RhHuRH5DopCMlIUpPyOYatNMa7Mnq9JXbd8Fvut6zvydewbcrPdbntpHmo6rlarQs6pmXnZmWaz6fIMpfvp0yc7Pj4uGCXkmRIpwuC/u7uznZ0dL6c2Go3s119/9dSo8XhsnU7HN6Kw0QT5reFPLfUDyEDpw3uAUQ3zk193cnLia4A2UhoLvmc8j46OChukNO3KzOz6+tru7u58zHWO4LMyuaygmHlUPkhdr3O6KXyb8vaqMaVevhj5UFJAg1xCz3Lakpl5HjuyNR4zSrkogCJRIPKQP3/+7BHaSqXiucw3NzdeIkp5HqcQIBLDGl5nLanRb7b2guKQ0BzVPM8dKHMtfYf/tYoEGESP9KVur44lgJa8ao4kBmS2Wq2CMfXdd9/Zn/70Jz8Rjcox0cuvbTRblx6MMjvOIfOq+pS19xK9ClBj6CgujJfAqJLeE5WFLmBdfHG3JqcvROuEUg4AuKOjI8/PZKfd2dnZMxCswi3LMgd7k8mkAGjx1DYaDcvz3D59+uTn58LsMCMMSGgAYTqbzeyXX36xbrdrnU6nUD/yw4cPNp/P7Xe/+52DEiYW5p9MJr7jGcUec1BTlk7KmIhzoqT3KYhPzavOX7z/W1NUBtEwUn5Viv1UwZG6T3n/4ODAa+xdXFy4d9xsbTljHZPvvLe3Z71ez2azmYdNyXuqVCoe+h+Pxy6E8jz3PDuscQw5raFHm3u9ni2XSy/+v1gs7Mcff/RIAIYeOVbsYtWwlJn58au/+93v7OzszFNamP9er+dpN2bmed/UpUQYY1z9+uuvdnl56XUK41xEYyDOaTSMXuLVFBhVebZJxlUqshH5ToEVn2NsqEGcAgL8xovN6VGTycTq9bqZmSt0Sp2RWpVlmR0dHXn1CTw/X7588Y1OeDoHg4Ef5DCfz/2UKeQcxe1PT0+t3W4XAAzeT0pYwduj0cgNJuVNNbBwbOD1UoBM3ezvv//eQQcpAjgzALKsAdpEe1i3y+XSBoOBff782UFNLNCvHiT4DNLvVaHr3JXJqah3NwWkmhVBN/8rP0fAg76KBqmCGPTqYrHwcnvT6dQL6LP7HplVr9dtMBjYP/3TP9nBwYEb7qvVyrrdrt3c3LiToNfrWbVadX3OPA2HQ1sul54+GAEWpSL5/+7uzrrdrjvPzKwAWPlbDSiMQvgTGc8aQn6CRwaDgddbh4gQa7qWPsfMHBxnWeaeWg7ZqFardnl56ZGEwWBQmCf4FIqGcEwhUvkaUw+UvoZnv/qo05eEZbxOFUmq0bFhZaEqBoHJI7mYnBPdrIRwYhclYfrz83NP/ucEKArm8yzeRV08UggoqEuOIIDw9PTUrXUtBq1CRq2l5fLpdIZOp2MXFxc+wdVq1a6vr+3Lly/2008/WbPZtC9fvthgMLDT01NfkHjXCMMuFgsbDAZeT/ClyY+elGhgpEBonKeX5m6TFLsSfXvJ+kuNRcyxiXObuo+F2Gq17M2bN57bxAao2Wzmm4rIYSZsg3JE8FAQejgcuqIj/EieG2FTSubgTUUAAhzN1kc49vt9z7/meDv4X8v9LJdLP0lFc5cQxMfHx64IlH/4rYdd8Ews/N3dXT+J5/b21sbjsX3+/NlDSi/NE30pm4Oo+F/yykR+Zo5fs+b/WqRtVwXx0vpTPtf+cx/glc/wxjcaDet2u/bDDz84YNP0pTx/qhbCxj8iQrVazS4vL63dbtv5+bkDs/F47G1mtzzrgxOZzNY7kznQIubsI9vwlPIzGo18J3eUT6SQIMN5r3qglsulNRoN3wimClSjYAACxoK6p9SS5PTBxWJh/X7fWq2W3d/fO3COfGaWrnSSkrE6r3qN8kbqum9Nuo5S+ic6ovi8DPBwLb85pXEymXgEkioOWmGCCNFisbBWq2WtVssODg7s9vbWer2eff782f72b//W+RHeYpMQjihSqNhcjffcbH1kLrr+8PDQN3vqxiwiSTijNDoFz2s6IWMRw/qr1crTbNiDQz4pGMHM3Puv1QE4thoifYGN4v1+3/EFKQ44WnQedB75G4oyK4UfdK7BIq/x7n+pDmrqc/277LqXAE58jr5P8yxwa5utT5YAOGKdcHwioSby8Bhcwuu6W5OJB1TQFhQ0Cvzy8tIL/xJCwFJSKx6vAsKPEABlsgj3Hhwc2KdPn+zjx4/23XffWafTsU+fPtnt7a2dnZ15SGC1ejqZhxIZg8HAjyFLjWHKU5qao6igI9OlmC8FAsqA2yZQBOBmxXQSBVdl+TG6yHSh6TMJcxN+QTHj8RkOh/bDDz+4UaSnQiGwdKd+lmUexjJ7OkEseoywqgnFsFGJvnAdaQRXV1eW57mfHQ5gwdpmQ0iWZQ5UqWXJpio95UTXFxsDo5eBtUuVDUJk4/HYKpWK54Tp2KsS07mIBkGZcNTPUqA3ZVx/zdr5VlTmDIipNmqQxPWq61QNExQoPACPsQmEawBuFOU3e1LieG3UMGEzB4BjPp97GkqWZR6619I8Ck6zbF3bkrZPJhNPsdKTebgvencA0foMTUGJ1VqQAfAvTgBAqpl5fqzmdJOec3Z2Zp8/fy5sxok8mpKLcW7L+FjnM/UcXe+bQhGIqEyNUdkykA5pqBzPNXOKBxxZRJ7/crm0ZrPpu9Sn06mndGDQw4MAupOTE9erOMT6/b7d398XKkIw1oS39/f3vQoQn6t3EkDKuChohffgRwXn9BMeJFqr6QzRcGXcb25uHGeQN616j/upNmNm9qc//clrc5c5A7R9vA8qkz1RV6bwRBl9VR3U6GlIMVR8cby3TKGkQE18X7VatV6v58wTay7yTrWIK5WKl/IxM/8NU3CP5o9GIT+ZTGyxWFiv1yswHxYVVg6C1mwNHPifHa+ADBbFn//8Z7u5ubHT01ObzWb2+fNnq9Vq9rvf/c5Wq5Wf60zI4P7+3kajUaFeJL9TwFHnKfWdjnMqpBg9q/H+KIg3xYovo9g/9XKo9Y7ASo2dWdFLEAVDpVKxs7Mzu7i48E13R0dH1mw2fa50YxO1HgkXYZSwiYo8aM0BJV+JsJYqWd3Rye88fzq95De/+Y2nqWhIjbp9agzqOvr8+bNNp1MP+6tXtVp9qmc6mUys1+tZnud+BKRGJggdsfGm3+/bf/7nf/qGLwA9bY6eplQKSbTK4zzF6/UefQZtTM35tyLaF8ehDKBwLddEL1R8LiCs2Wy6Fx4vEKX0mDcz82OkzZ7kqJ6u9Ouvv9rbt289Nxrwx1jrjmW88KqMyTHVDXUAgTzPPcXg6urK0wDUC4Uc5j7GBy8n3wMK6DvjxJq7u7vzHd6sVWQ1TgVyakm7wat8eXlpnz59snq9bjc3N6UeopShFQ3oeG00MiLoi8/dBIpRlagzUhHWeI9ZsWTZ4eGhnZ+f28XFhR0fH7v+I5qY57kDN81L5f5ms2nz+dwuLi5c7hAV1fHEMwmP1et1B6kYRjjA+LtSqTjf6HzrRm3+15A/vBX1LJ5hbQdeXjYnwgc4wEjRwQjj1EB1uhF1IzLA5r/pdGp3d3dWqVTs9vbW20qOt/Yh8mGZkZGa9xRPvEZf5UGNjVJ3tDamzBNR1qAIRONAIATevHnjlge5FwDH1Wrlu/gfHh4cSKLUUcAxsV+9DuQOHRwc+HXqXeh2u74bn36wK5CQEsnMef50kgrW9ePjo11cXLgnCsup0WjY27dv7fj42JbLpQ2HQ2f26XTqydlsKMnzp7xDFqXWgNOx1HFMGQURiKVCUXq/CowUOIhCeFNIlTyWLBQFZOTfFL/G7yLIMVvv3ITf2u2282OeP3n52YEKwOQ+EtYp6cQaAyQoSMPTmue5nZ6e2qdPn2w8HruHXueN9dFqtXzO1aDSPNOUwaJH5xFlIAQFePjll1/s7du39u7dO883BayQe6jFr6vVqn333Xd2c3Pj3gPmQb3AqfmK81Fmset3Om8pg075YBMIfo1rS/ugn+t46fep/qL04ZHz83N78+aNez6JVsEf8B8b8nQT3+npqdVqNRsOh3Z8fFwIO/J+jRRwpCg1H7XdUc5o/4mM0S4N+9JGeFrz/5CVFEGv1+sup7nn//yf/+MRMQCtbsgBdAAKiBZkWeYRhTzP7eLiwr58+VKQq1DKYaCfRSDKPJXNYZRZ8X2bQqm1FfUPlMpj1DGq1+t2fn7uef3kAQPCdFOxmblRrAdKoPN3d3ft//l//h87OzsrnPKk7UD3I/eIvpqZAzfK6pEeuFwufc8LzwCEEjXAo6q10VmPurGU8SGaEWv9LpdPJ59dXV0VNohTyUXlP4aetlvLw/Heo6Mje/PmjX358uWZwzGC0ZTcTRkkKXmUwhRl9FU5qDw0IuJ4TWpBRUbT+1Oeu/hsckpOTk7MbJ0Mzy7MDx8++LnnChTMzN3+gE0+h6lQthzbuFgsvIbqZDJ5dj4uypTnqIU0GAzcUoORYHIStLHc9/b2rNPp2Hw+L+R/VCpPRbN5DqE3Nm99/PjRc0oiyIrjnFLoEYSpwRGZqUyh67zxt1q/mwZWdUHEMTAr1o+LeXxQBOY8V78zM89Xfnh48NJPOjd4jg4PD92Tqhv9tNQUyp0keXZg4pXF24VXlLwrwpPkC2pbq9VqYRcyPK0KLoILgAVrSPNiOS7vzZs39ubNm4KA5VheM3Nv8u3trefSsnNfAazWYVWjuAxQpuYyXq9zFGVUCjhsAqkTgP/5re3X7/ibz1Nzqs9WWcnc6viQm4fSZSMoPAUf7e/vW6fTKez+5zQmjCOUIrw0HA7dgAE4RE+ieun1VBw1knRDCEYbAIT9AeTVIt9Xq5Wfx04e//fff++5e2pQMj5Kl5eXrm8wMt+9e2d/+ctfXMnjbWYeVL7oZ/oe/T7ypcrWGPXR6zaB4rrUtaa6QvlUKV7DWFQq63Pu1duOAaEpItPp1B4eHjw3GNmiRjn7OjSaxPjTBnVq4aHsdrs2Go0Kp57BjxgxRMfM1o4s2qjrVeW9rkm8ovytp04eHBxYv9/3lAIw0PHxsadf0QfSqrQ/WsYKzNFsNt1TrECW+UzNr36Xks+R4vhG47uMvnoXfyonocwijFTm7YjX6PeE/nZ3d+3x8dFarVZBqSP8ut2uu7PN1mUYWMAAAKwX2k7IBkWsoJWdzprvZFasxwZoVauKxHktg6JlUvhNW2Fk3cGq404eFAnZHAuIl0MXefSqpIRESrnrfSkrNjXPEZxyTwxJfkt6LRQWjST4JYaYU0YZwE4/V8/L1dWVlyJjjLSyAjtG1ZJmJyZKV4Uj5U9QrsPh0HdYA/jG47Hzvpn5CWvs3If/9HjAPM/9nepJhUc1FKTlTqrVp4LYbFpgXbDBig0ohJroT7vd9uL8g8HA0x8ocxR5R2VOmTFWZpQxlzpn0XCO922SojcrGtIpAxNKRUFSAN5snfOs8uX29taOjo4Ku4lRpmwWJZcaTzybQPkB4AHyNG8Ob5aZudy8vr62Tqfjm1b1VB9ku/YNj2s04JC/zDffA7jx6JOaQh70crm009NT37lttnZo6PhilOE8oIoM/WVDDXsMPn/+/CxMnWXrPELmNWVEpYAZcxZBaSrK8K1J+TBGRdF9Sqo7VDaiS/iePM/Hx0cbDAaFfR5qSHEN1G637S9/+YtXqSCvGl6gJi9VcpDHgF0isnmee54p+fPaR+Y2y7LChi3Wha4nLcIPUS1IHTw41HBU4ZkdDof2+fNna7VaXpbtp59+8r7Aa1mWuQOBlBR1guDsQN9QxjIVuVHjgvliDDSFIDq2uFYpGmKvydyv9qDGRaMUG6gdKfNcRa9U6tkgfjZZkIsKYIvASsGCCjgtfaJhWIqlE37HU4ClohaP5tQxqVm2LshvZh6+JQwEYFGG1typer1eUPpm5idVwGiVSsVubm7s9vbWhsOh76jVcdcxTSmrCNQgBSp6T5m3JRoYUcCkQrHfirQ/KS9GVOaqkGLoKfKm8rWGzMnN0/qSZvYsZxheVsUDf+q71bo3Mz8DHauYvGQOsSC/abVaeZ1JrQGpihvhoptFFJzyN8pBx0q9r6vV+thT+kfqTbfb9eupEckmBzwQ5Iyl+KbMQIryIiUgo8dG72deI79vEu/q31H+prwYytup5yhQUuOBED+eQyoqoMxQ6JSZwmHAWd4YJBgwqiCzLHPPP3yA8T8ej+3h4cGOj4+dF+gn4Jl2mK2jVeS0qkzjOkK9GiJlXXEN39Gvx8dH7xtKG0CCQyTPc99HoKWC+O7f/u3fHGhHfRf5lbX+UgSsTNGnHAubZlwpeFF9HHk46hbu1WfwQxi+0WjYmzdvnm2M0xQ5PITco1Go5XJZqO0Lz5NLrDINw4TjmylPdnt7a51Ox3kLsKl6BgMwyklyS1Uu8bniJjy8rKd+v2+3t7f2n//5nzabzTzlkLJWGJXq5MCxgQxXpwrrkLzzLMvs6urK9vb2vCYsVMbH8TPFYBFrRLyg+ug1ehGgphRzGdhJfa4AJtXh+J7YaAaeXcWNRsNPeyLBGcsny56K8sNQDHaWZTYej31XNcIZr5CW5nl8fHRgqWeem62tJN1co+Gx4XBoJycntr+/bzc3N55/iBWvIQGeR7khM/O2Y4UtFgvPY61UKtbtdu3XX391gR+t0bJx1s9TSk2vj56aKGTKeCE1d5tAKQWgoFCvo89qEZqlLXqztUBiLthJikdzMpkUjtxFaSMsaYeOG+BOT4miDfAbpc+Wy6eTo6bTqZcEuby89HAN96lBpMYZRDgMwKyWP5sAoyFCSZbpdFo4qtfMCqejUPItz5+8/+SknpycuCcY41HnK5WSofIlyhl2WyvoZNwjH/O5fq/zuQmEsoqAMwXMdf2lPBwK1PlOr51MJs4byFrqS8IvRHHwBulOYrzfo9HIQ+YKGM3W4wyvkBaioUSNMGFI8R1RAQWa8CX7AKKhhdwFHBN5Qp7u7e3Zzc2N73DWPQNHR0eesnV4eOiGpkbwAKTUNEaWw/+MNf3XtuncpeYoFa2MFJ0Gm0BxjUUAGjGCgnZ14ih4J4KTZZkb28w995Fewj4U5BvGM8AUo4t60/v7+56WlefrQ0w0FYPi/pw+OZ1ObTAYuMcSL3oEzeThx/HRg3zgJY0E6Hfka+NIazab9tvf/tam06ll2ToNUH+0PjayPBp01BgGnFKBZWdnx4bD4TPdpPdGB4/q0og/dP71XuRC1LUp+qpNUspcvCxlKelii4sudiAC2WhlmpmfZkLxfbxT8/ncz3HmRATdOQ/ojGVsaA9H9x0eHtrh4aHnwKlVY2YOPhCM+p0ukDx/SpIHWJDboaA2z3MXtFhFWbYuD8SOwUplfRoVgGe1Wnk+H4BCTwNKzZN6UvXzFDilXylhp4wan1VmzW8KqRCPVl7q2sifcbxS4F0BLPmfLPrhcOgpHnovvAP/kDOnO/0xnng2ShuDDKv44ODAj3gkrWA8HhdqlUJq8VcqFQ93cjAAQJqEfA25QvRvb2/Pvnz54scL01YEM4p7f3/fcw7p43A4tH6/73ldetqL8loKqMb5iLIjKr0yvlYAyPrbJNIxjwrGrKggFMyZpXNuoywmrQNZqN/DX/f39wXvucpHxh5DHqXZ6XS8HA7v5Qf+BRDjTee4RaJXaszzHmQnud0KZPB0KQgFYDcaDQewrJcsy+z29tY+fvxo//iP/1gAxfxdq9W83/pOxsFsfe76xcWF/elPf7L7+3tPVYmyJo4h86O/U7yamvt4TTRcviVFg0n5OK7HMr7WfmoN9MFg4JtDSXODF3U/CIYJm+TQw+qNn81m1u/3C22bz+eFKJTKBOTm3t6e7wPBQL+5uXHvKF5c2q91hZG3EABZI6ur1dMBFxh9VKM4PT21g4MD6/V6dnZ2Zp1OpzBuVFqh/bxfN3vB42bmG8JJc2RN4jzh/hRvKU8rHtLP6FPK2FI89Rp9VZkpKBXujUAndiyC1pQXIOUtgcFJ0gesYsXgoaIAbr/f9/AkDJtl6yNDyfPUdlIwnBwoBK32syx3iIEnp4S25nlesKRjmJHfnIZBjlRkLMANDHp3d+fh/xiejFZ4CvzruEcgHoVEnHsFB9EzE5l3U7xQZumThsyKofOXgDm/ywASz8biNDPfod9ut33Ba5hQrzUzN6Q4hEI99xoKRCHjKcLSR6hSBoprNO8uWusIfoBxrHKh82xmhd3Tq9VT7tZgMPAQ/mAw8M85de3k5MQWi4WNRiPLsrW1z3pi7SK0o7DTsHBK6cV1mJq/KFPivPF//OxbU2xzylhS0jWp8rSsP6vVquBlGo1GVqvVCgXG4aNer+dgVncer1brzUnIXDYNoZhUdpqtvetEj1arlW+WAmjymXpUo/Kj4kqWZYW8UXIEOZ2q0WgUdAk0GAxsMpnY3/zN3xRkMA4PDDv6CDhmbWgKFofAEP2K+oz5i79jGFjnPs5x9IJFpb8p4DTlEUsZkimwnuqDAitNFyLNZLVaecge4Ko75+P651r4iudgKGlkSb3/mhZFTqeWq8SAwmMKf9BefuAlUmZiJOvm5qZwomClUrFOp+OlJiuVpw1NsVTgcDh0PKH5sPAkkTwO0eB+UtL29/ft+PjYD31RXcE8pAyt+HmMYKXwhP58DUj9aoAaF1lUANr4+HnsWLwmKpn4Oec8f/782UuFjMdjB4Kr1cparZbn5mERmK1DR7xTFzo7Uzk5gaPLEJB4sngWTKyKW0Gxhr8Ik+rZ0PSLEBr5VywGvLucDIFrH6sc17zuYozjllLEkQl0Yem8RtAW5zX1O4LiqJS+FUVeVEs7pUS4JwJ+BUMRtCqhYPEWEh43Wx+DhyFzf39v1WrVlTS8ps+GB5W3lLfVEIulpQg54sVUPqStmuqgwg6QoRsO4D2ELG07OTlxkECpLDYWqFeLcUAgs852d3ft9PTUy5rouOvcRQMqJXvKeCCC05RBEhXnt6aUl0lBVlQE8doUONexwlvE3APkiESph5S5Ig0EpcZ71YsF8KSEjcoW+Jw+kKYCD+tmJ4w5aktiSMGzmieNpwZ5a2b2448/Wp7nfiwqa4bnHh4e2tu3b83MvJLE7e2tvX371vI893Awz2ftsImEqEGWZV4dYHd318bjsfcpzqMqZIBESlnH/6NnP877phhVZs+PxkzJ1xS4LsMWzDcgCr5l0yhGBQCVw0TgxdVq5SfysaseOUq9afQVuc0pMEa7kHF4znVDlVkxHUy9tvFzardi2ANcWZNZltnx8bE7zXBWsa+BMWQ9akqLvos1Rb54tVr1sluDwcBTxmgTGAidkZKVKb2q/Ky8UIYb1LP+msx9EaDGkEaK+coQc+yQ3h87GUkXMV6W6XRqZ2dn9vDw4KdF9ft9F2Jm6wK6KmQQZvp+BR2aD4ElhXeWXYOEe9hpipWP94CzxCGKApuZ1y5FEHLyBSFYPcGF55JsT/4LG6RWq6dQP3VRo4US5yClnPUaFdzRmlUhoUoyMmZ83qZQNHqi1zj22ex53q56bvgs3gtA1zwilH2WZb5LuNVquYWKkKEUlC5uNRIU/Otc4gVVwal9pn0IbxUIhDFjiJv34vE0M69dquCgXq87uDBbhznN1kKevrP2OOIUhdHr9fzwiwjMVa5Ezxl9i3JI568shFg25/Hdm0CsywjUo2EV+xvBQGpdqyHJvE0mE1f6HNcZN9OR929mnscJ30VvFUAxGhw6H/yogleZjDJX3uZ/VcJK7C0Yj8cum/lhbAi/Pj4+HZtKP96+fes5irovgTEgv5uULNp5enrqB7rgSGD8ItiMpPMTIwNmxU2sapyk5NEmyN6U7kmBT35HQB77iIHAQQpq+IINuB5v6M3NjZ84pml4vBOv62Qy8ZrMqofjHNA2nlOpVNy41jGPDoAIWmMajTq9AIiNRsOP8cXrSboKOfzk7NMX5DBRBdXV9GV/f9/34/BONlBVKk/F+dUjTRRZ5ys1typbUzIohVEiX/xfAdSyBVAm6HlhSmhG616fqx3W9zKJw+HQdnZ2rNPpWKPRcDCA0MQKgakJ2eDhxDrR83P5HOZYrZ5KOehJIiwOlDznTVOcP8/zAmMvl0s/RlK9r+Rb7e/vuxWkpX0YG/qlCdd3d3f26dMnBz2E48ififOi41omHNXgiOAzkjJqZKwypt0EiuNS5jnjWu1/6roU2Nf+wj/v3r0rlEDRmpB5nruSRTBxDQoVfkDJaRkq7VsEICr8ABVm5nmEGjJlQ6B6NpfLpW/QUh6N50fD53rEoJaxog3kbJFDjpEHyFmtVp5TBhiP8/Ka0ZUynFKeG72fcVJFtElpKVD0NOlnZWAl5ssrKahn3tlJz7xTjQFlxW94FnmJrDJbK0bN3VNPTuyL2fMTdniP2fqwC94BXyGjcVzAyyqn6QPriHt5z3Q6dTA9HA6tXq97vVRtD/IencJZ5uS3wmdsLmFcdD2ogcGzU8ZCyghR/tT/IyBQ0BTH+FtRBNURIKqRBdEX7ok4YG9vz3q9ntVqtYIH32xdJ5TnwAukV+EoIueStuzs7Pj+FcaOXHiVO7ohWXUhxgi8hnzGGaXyRTcD6ryi/09OTqzRaDxLt+L5VAPiWdHBAMimregVrcCC4ZXnT6f94SAzMy95iKc1ZUSksGCKd3VOdQ7BXDoGXyN3X/WgpgSlKtxo/XAfv+OER0UfFUkcHLwwlP1AoeGyZ0MHzMVkwFyqtIfDYcEyQVEzkWxsUete646ame/wRyCj/KfTqQvE8XjsniYAMx4zJoj30lcW2mg0cuvm9vbW+v2+nZycFE6Turq6KliPZVaMKuCUlZ0CYmUel1RoJjVvm0Qpg+i1z/mtysPs+dGokI4XaXFs4AABAABJREFU5Uhub2+t3W4X8kkxWvReDKE8X3udFDjijdQ0FW0P70RJmlkh9KllVQ4PD61SWR/Pq2FVohTv3r1znmVcULaaitLv9204HNqbN298EwC1LxFEGIlZlnnEg7HFYGSXqubv6XxEUq9WVHbaZq7V+YnCdpOMqTJKCXz9XD9LeVy5RvvL5+y+XywWnr9MpEoVIHKJ6BIyFZmoHk41tCNI0X6oktZ3IJvN1ge0sKs4z/NC8X014jF89vb2fB3wbE0dqFQqXlmCE/zM1ocVpMaeqBVglueSTkaoViNren80riLQjJRS9HHuoh7dJHAKxX6YPfeoReMy9R0yBKCl3nXlI3LZiU5qfWb0+vn5eSEdijYDNlWeqMGrcl8rjrDRqlqtep1frXuuUTB0gdZwpZQl/WUt8G7WoTrWzIrOPnCE8gbrVNfDzs6ODQYDX+9m5kadmfleHz5XMB7lSYwOMI7RYFaeLYtWviaDXwSoqXACD4d0IZd5LbRBkeICjsrn4eHB800pVD8cDl1Qca5ulmV+Di1WRJatC5OjsLW0EyEeJqlerzuzIgSVaRRkaj4qE4G7vFKpWLvd9nt1gwl5UeQxcXLDeDy2xWJhnU7HNyuwo+7Tp0/2+fNnP0GCo/viBJcBrTjGcU4jE0VrKYKA+JxNpbIFo+OhFn68LyqF1GLV7/Cg9Ho951HGTi3saESZmZc3I7eJ57IGFODB1xz0gIBaLpduyFCWhE0f8KD2C6HJ8aq8T/ldQeHu7q7d3t5atfpUdH8wGNj9/b2XVKP9eJpQ6IwPXmaseTaSxTzxlAHB+tJxV6H3GuhMKfO4djbFk/qS8Nax0XUJH5fdp3OQ57nvcEdBk4tmZg64CCfmee67feFHjAzmRRW31t6NXm54CvCoQOHTp092cHBgFxcXNp1OrVKpeMk05RMAMn+rV5j380xNZWD8dAMjhCFIe4jAXV5e2vn5uW/O1WoAOB8oAo8RGvPwlZfVy638qoZvnD81PPSZmwJMIXhQwZlSlLsq01JyVb2p5JFGo3+5XPo8Aqy4T40UDCcFvfAyzh6tzYy8Yy75Tf9UnhLNhK/4YYO21kPXaBSEQaf1p+Hd6OxT3QEuAqewLjA8NYoxn88LG7zYK4DH/+7uzmvG6klokM6NOgmgyPPqVU7hFNZuChMqfVWhfl0gusC04ZH5ouWulBKiqecBKPM8913DCAmzJ4Y6ODhwryMeJxgNdzuTriFVGBABDbNpCDaGT1gIeG+1rVQS6PV69uOPP3pbALS0ZbFYWKvV8hASwp/Td/I8t36/b3d3d15KC8FHnT48Tikqm5eUdRst8hQAjWC3THiWGSDfkhRwR4DCd8zvSztQWQORl3Xc4DfynxaLhW+4Y67hKZQhli7vJbySArDKjwADilJreHR3d9ePBOR7DfmoMaX1T6vVqh8BiadKN/jh5apWn07BokJAq9VyDxw7W1mzrKt2u+1G2Gg08k03Nzc3NhgMvH2pcWd8FYjyO/Im8xuBZrT0lfTzTeNfs7ThH9dmyqDUa82KoGe1Wnm4GyWvnvrd3d1CHprZk9ecmpKaN2+2VrC8k2s0LUQ9+uqNN1sDA0KuKHwiZ9HDqYA8pp5oPh/PoP9EELif6i20nzQwQM/19bWdn58n01jUw7xYLKzX6z07DS3lIIhzkeLreL/+rXOekm/fmlQfRJ2QWofRMFV9i1zGyNYz6YlKEsKHlyeTSUFu4r3UmuIRTCELCZGz5vQQHjUIVRdgrBNJ0goQmiuaGgszK5SLxJjScLxGKWgP+fyaaogO0hqvitfYrIqcxkk3nU7t06dP9vHjR7u8vLTLy0sfhyhrtO+MY8pZYLZ2xOh30ZBOyepIrxbqj4tLG50a8JTwjJZRShGkACudoqgshZXr9brlee6bNbDuUc7qPWWwlFl0V59aVCp0EIrsyjczZ36YXgeeieZcas2ZUobVnJD5fO6eM46p5GSr8/Nzm06nNp/Prdfr+Y4/rEQNb0ShqHMXx94svds39Yx4bcr7VKbwvzXFvuliiOOmljzfR36M3uX4Hk0xwZOOl+Xh4cEGg4HzLfyA4cH9WMQoXt2hDy/qeeN4iiCe8/j46IIrHgqgCpZ74k5h1gF9wrofjUYOcvEQNJtNXz8q3FEG6k2h3be3tzaZTOz+/r6wk1bngL+jtynKHg0t6Xfar+iZ0pAd7WMsNoG0vTp3EaAoT0feVYMz8jNzO5lM/KAHM3OegmcoeYNxhIGuMhLDzGy9WQoDCf6HMMB0TunDYrFwLzz8ohth4C8IRY7hZWZeI5ji65q/ildfQYBZMbxfqVSs3+872KDWJBGrZrNZUMrUb725ubGbm5vCekzpx1SYU+dH+TiCz9QaiM/ZBErJ0Gjca/+iXknJAnax41mH4FWMEv1hdzupKWZWOEo6ynJ0rx5CYVaU+3mee2offK1pTaQr8S5C+KwL/k7pIi1LSXsUdCKDs+wp4gXg5juzJ16u1WpuZGkdY9YvJ6eBe/r9vuX5uoyX9ivK1DIPf7xPZVLEF6lo5Uv0KkCNFnzK06ETGYVpmdKPlmHKg4EVQy2+k5MTu76+9p3RdJaC31Fp46FUYEjbCM0oOKV/AEGAJmEmck4Wi4Vb1ovFwvb39+3XX3+1yWTiJU7IDdExwfP16dMn3/n5/v17n/z9/X3PD+HMco43fXx8tLu7O/c4xTmIuXkwio6/jrkqhzjH+myddx0jfXeZ4PyWFL2iKf5KLThVJMov+oxUH9k4hDJDGGDJ1mo1+/Lli+8q1kgEPIjhAsDjXYAEvJb1er2QtwngZCcx3iEz81JsKGjmkE2CtEMLPXMdyjmOx5cvXzxnFUGO4CfNoNls+rvxxl1dXVm/37fBYGDX19d2dXXlfSuTGwrQ4NWo/Pgd51Etfn0e86mbbKKi3BSirRHYqMzUPr0kbxkHvOYod+ojki5SrVYLdR6JBJgVdyuXjRf5raPRyJW0tgeFHeWQhtlVUSPf8OzQdni63+97DitOC2Q76Qt6bCmRDt6humGxWNjR0ZHf/+XLF6vX615uS/O2J5OJ3d7e2l/+8hcH/WX6UecwKnz4r2xelQdSAPdrPFF/DYrGYUqvpwypMrCtwIdKKNQHNVsDVMrbIbs49MHMvKKDylgMLG0vhj+gj3A53wMYaRP4Qw0d2kwb0fcKoKl0gtzTI3SR45oOAE/hUIPnV6tVoR4qjjzaCn+RBjadTgu7+bWu/GQysaurK08zVHlMn1LGsfKs5qan5k+f9V8xrrJN8RpsaUtb2tKWtrSlLW1pS2Zm397s2tKWtrSlLW1pS1va0paEtgB1S1va0pa2tKUtbWlLG0VbgLqlLW1pS1va0pa2tKWNoi1A3dKWtrSlLW1pS1va0kbRFqBuaUtb2tKWtrSlLW1po2gLULe0pS1taUtb2tKWtrRRtAWoW9rSlra0pS1taUtb2ijaAtQtbWlLW9rSlra0pS1tFG0B6pa2tKUtbWlLW9rSljaKtgB1S1va0pa2tKUtbWlLG0VbgLqlLW1pS1va0pa2tKWNoi1A3dKWtrSlLW1pS1va0kbRFqBuaUtb2tKWtrSlLW1po2gLULe0pS1taUtb2tKWtrRRtAWoW9rSlra0pS1taUtb2ijaAtQtbWlLW9rSlra0pS1tFG0B6pa2tKUtbWlLW9rSljaKtgB1S1va0pa2tKUtbWlLG0VbgLqlLW1pS1va0pa2tKWNoi1A3dKWtrSlLW1pS1va0kbRFqBuaUtb2tKWtrSlLW1po2gLULe0pS1taUtb2tKWtrRRtAWoW9rSlra0pS1taUtb2ijaAtQtbWlLW9rSlra0pS1tFG0B6pa2tKUtbWlLW9rSljaKtgB1S1va0pa2tKUtbWlLG0VbgLqlLW1pS1va0pa2tKWNoi1A3dKWtrSlLW1pS1va0kbRzktfHh4e5qvVyvI8tyzLLMsy/zvPczMzy7LMzMz/5+9K5Tn2zfPcqtWqrVYry7LMVquVVSoVW61WhWfxfaVSefYefZe2pVKp2HK5LFyXuqeszdo32pZ6Btfo+/UZOjbxGtqp7eZv2q/jEe8vawvPTb07ti3OR/w79l9J+xfnhbnM89xms1n27Oa/MtVqNe+cthe+5DPl0zhXZpYcs9jneI+OBc+Iayg+U6/X98T1xvopa18Z/73EG6m1Ef9nbUU+0zHQa5bLZfJ5+hnrXJ8Xx03HUnle+ZnPq9Vq4bqy/mjb4txtAu8eHBzkZs95UMeGcUBmlPGujkuUaXHNR7mnMkDXDRTlZZzLarVqi8Xi2TvL9Ej8LsrION/KF7GNZetM2xK/izrOzAo6RXlf50HHUNuXkh3a5jJ5kJKxZXJX2zCfz78p7x4cHOSqq5SX4jzzN+MB/8SxMLNn86w6j/uq1art7OxYnud2cHBgh4eHZmZ2dnZmk8nEHh8fC/JksVjY/v6+1Wo1m81mZrbm1/l8bpPJxLIss0ajYQcHB1apVGx3d9d2d3ctz3Pb2dmxx8dHm8/n1mg0LM9zu7+/t729Pdvb27PFYmGLxcJqtZpVKhWbz+c2Ho/NzGyxWNhqtbLVamWPj482m81sPp9bnue2XC5tsVh4H+E/fr8mz3U845qNvB3ng/917uKa1HtSvK9zHuW3vgO5ZWY2nU5L+fZFgJoCYylAmQJhsTE0WAc63hsZTxk33pOaFG0T3/M8nqnKlHv17xRYrlarhfelBj4K/bKFRr8YI/rFe3WsIjPFsYoTr9fFe1JCMwVEU4pC700ZLIypMuqmkI5X6nOzpzGvVquFz5gfBQCp9aBzx3NVkaVAUCRd7K/1o0zQ811UCjovKSCaUqQpnlMgrt9FUBA/j9fzfNoZebgM3Ksc0DaqINa50vcogIsCNM7RJlCZEtK1F/lO+6iKm/ug1FynQKA+M94fZZy+S98RjZQU/0WFGO9HdkdQmFpjL4G+svUVdU5ZX7RNKV7RMdRx0XWg86Jj8tLa1zmMMlbbvSlUZhyYPZdZEahEfRONYeXxyDPw6e7urq1WK5tMJnZ4eGgPDw+2WCxsd3fXjo6OrFqt2sPDg8t7QO1yubS9vT1bLpd2cHBgOzs7trPzBI/q9brt7u7a3t5e4d15ntve3l7BEKPNu7u7VqvV/D2LxcLq9bqtVitbLpc2n89tsVgUrp/NZt5vAHBKhulYKabRtRqvj3iDv/U3455yzsV1xbvL9IjiqhQA1va8RF8NUGPjImgpG4AIdtSaj52OzBsVYKTU4lWK3kqeUzaJ2p8yxR7HBeKZCmbjvWXAkXaUWYoRgDAm2k5l4jJPQlRC6sFOMUrZXJYBrq8BWn9NigaB/g0vLhYLnzOzYp8jX0UFg/GSUuARIKSUv0YK9P4yfk4ZGXyu98c55b4osFJgN/KffpYC6nGNc330qsb1F58BqXdYvSqpa1NrLM5F7G/qOVGGbQJFpZxSGsq3UbmnxilGC/itc8s7zezZ2knN+Ut8Gscz5UmJskufrx5x7RukvKJjE8FsnOuok7Qt8Vm6Hl+S2S8pfm1TChSkdEwczygXeB7ettcU/bcg5YGydZpqe9QxUa7wt44DPLS7u2s7OztWq9Ws3W7bzs6OHRwcWK1Wc8C5s7PjgHI2m9lyuXRA2mq1bG9vz3Z2duzh4cEBa6VS8WegN3Z3d/375XJptVrNHh8fzczs4ODA19xyuXTAOp/P3fN6cHBgq9XKdnZ2bLlc2nK5tMfHRzs4ODAz87lN9TfqKR3n1HiqDogGe5lXPyXX9XdslxLjy/v1txp5r+GFFwFqqoPxpanQZKrxZQwX32FmSWEYn8ngppRp6l16jQIHvod5XxuHlCKObYht5TsFxvpM2gHDlwHD+L8qlDh+ChJSAFbTLFJzFcc23q8MvEkWPKSeBcZbQapZkc9iqMmsCDZVOL5kOUbjJ6VcAW4vhQz12Xh2XvLM6vtUsJUB0ZSy02el+hYFXEpwIZTjd9HTE8GHzptS5PdUm7iO3wooIh+oVa9yII79t6YowMuAYJRHKWCqY6LAIALWCEy1Lbo2YluizI0eH3222fM5jusg8nj8THlPefUlWa8yLQUmlbcZg9g2pTIdlXJuxHnQ9qTCsC/1IfXM2PdvRbq+Yj+Uh/hMnVVlsgY+1TUdn8t3Ozs77tFstVpWr9cdSPI3900mEzN7AlJ7e3vWbDYtyzI7ODiw/f1996yq7N3Z2XGvLO/tdrtWrVa936PRyIbDoe3t7Vm9Xrf5fG4PDw+eGrBarRyEYmDe3987iD08PLTlclmY37JIqY5D/L5MZqoujPNRZkCaFQ0KnqfpZmVtqFarhejEf5VeRGWpxRIXcEoopJRGXMQ8KyqR+P4IkqCUAPsaYR3Bnk5iVKpl/YnPj8IvClwmKnVvmSCOi5PPsLbKxgUhUSbIzNaWWVQUcS51XnZ2dgpzHkFSvP9bU1SUUUlCCgD1M57B/2VKM15XNgbx3lRkICpbfZ6GOqOBEb2DsR/6fp1TPksBCr0vPj/lbUwpW32GtineG8N8L92r7U5dp0A+eqhTc6zzsSn8m1pTZbLoJWM+yk/lJV0TUa7GtkRej8ZVnAvlVVWyqkOQY7xD5RVt0rCptiuG2SPoS4Gj1Of0RQGl8kxK7quxGtdJBJSvjX0ZuNTnl/FmlOlxDr4FKWjh/6iT45jwG7CqFKORZuuUO9Wt8BM8tb+/73Op3lP4ilB6lmV2fn5u+/v79vDwYDs7O+51xYsKH87nc08fIOp2enpqef4U5tc+Pzw8eB8eHx891P/w8OBgFeDGNWbmIf/ValVIQ3h8fHwWQVG5DZXJSvgnjluUxaoPoixRHtTnpDzaus6jnErJqJfo1RB/auHF8CWdLxOWUYjFhZkKywEwokKOg1nG/JFUuatQ4voImmlXnIDUdXFS4jWvgRL6FhPydWzKGCA+L17/NSA7Cs9IKfCcEjSbRCkAGT9PhRp0gabm1Ow5X8S5SYW1IriK/KRjmwJi8fuydabvgqcUMNCfOI9QGWCNc18mG/Q5+i4d05SHX9uf8ippX/RvJZUR0UiLHjyUTAoMfmuKYxTHGoryJ16fkplRbjHXGGlxPqOxHRVNCsACLNi4oiB1f3/fFb0Cgclk4nO2s7Nj8/ncptNpYTxQ/DrHkTdje6JC1DHWNseoSGps9Z7UM+hzGR+ldKA+J3quIV2PSlGufWtKed75HXUe16cwhZkV8EQqGmZmBXnAD+H3/f19q1Qqdnh4aLVazcPmd3d3dnR0ZBcXFz6uh4eHfg/eUwWH9XrdZrOZjcdjOzw8tG636+8iZJ/nTxHQRqPhbXl4eLDZbGaHh4fuQa3X67a3t2cPDw9WqVTs8fHRAe7R0ZH3czweW7VatdlsVnBQpXBO2TrUsY/36tiqoRjBaYr/47xxb5Qd2gbVif8VzPBqXJuHR8YrEwJxAPRzBWPaiRgyjO9RQJDqVGxHKnypgCAlKFRAxOu0T/yvg26WDqXGMVEFrOMTGSAleJQ5IsDQ73j+SxQVjlo6+u44h1GwlIGTTSDtV2qM4/wpRf5OhaDi/JlZYUzLQtj6/BSg1PWQMoZS7y5TXBGYRaWYahN/p/gwjldsd1k7yoQkfQTUpARbjA7ovGp0wex52kBqbhDIL3nAvjVFPo3yL2XQm60BWxnP8WyVX/H6yAdxzrSNasTQPjxV7HgGgB4eHlqlUvGNKuxkxmtUr9etUqm4sh4Oh+7pol8PDw92f3/v79f+aD9TawWKuibFc9rHSCmAWsZHMSqYUvpxnMuMs/icyOOptn4rKtPRKXkUv1dejgAsFRHTdQK12+0CP8F/ZuZ5ovAs1xwcHNhsNrOjoyOr1WqWZVkhHahWq7mnVdsRjVwzs2azaWbm72Ed8K7Hx0dbLBY2mUz8Wa1Wy59JVYHZbOYpCGZP/ESbdMx0jCImiP3QcWYuMCh1raSMCJ6h709F5CJfx+/5Lj47RV8V4k8taBqf2iSif2uDUzsgtaEpd3HKS6gC4CVFmlrEZWDWzAr5ErEf+rwo1FJgVNuSAjd6TeodUQilQrCquKInLgUeYjUC+qwArAwspxRj6p5NoagkUt79MsUW5wJK/Q9F0F7mDTGzpPcvCpvIR7FfWfY8nKbAVtdByjuWeof2W9dqaq3E8NJLgC8lO7S/5Hul+D2lsLkuJaxTc8Q46TOjYN8kigomyrOo5KNhmuK7FJCJ/BbHhecrryhQBiAwfwCBarVq9Xrdjo6O/Hs8p7VazZU1Cnc8Htt8Prd6ve583W63bTKZuOeqUqnYw8OD1Wo1m06nHnZlc4nqguhJ1/6+FLFQ72cKIEYgxP1RRqbkSIwkvCT3Iz+iZ1M6KfLDtyQ1GOPaNyvigQjIGZfoWCDaAZjUZ8a5AACqMZTnTyH4/f193wzV6XTMzHzPx2w2s+PjY5tOpwVwyrtpW61W83tGo5Hl+ZPXlHbp72q1atPp1L2o3W7Xdnd3HbBOp1M7Ojqy5XLp+a/I3el0avv7+9bv922xWPjaArimDNEUv8YxLtMjWZY9KwfH9SmZHfUEcxN5P2UcarsidkzRqx7UFNjRFyo61usV6OkCigJC36Oep9gG7qEkkFlaoLw2GfG7+J7oseF9KYWmkx/Hi9+6YF/qF/+rcnqNUtZo9Lak5iOC4HitMlfsT7wmLoBNUfYpr3/qc76jH2ULJiUI9PrIY3pfKiSiVAaUy8AZAlDv0e9SHtj4rsh7kadTIDY1Hi9dFz9LWcxRWacAFN/FsYZi9ELHHJmR4ve4TjeFojzQz1JjlPIKRsWgYcIyI7zsGVF+ASKr1ap7Ss3Mjo6OrNlsWq1Ws3q97rl/eJDy/KlGJQCVzSbdbteBAmBkd3fXms2mzedzu76+tslk4s/BC8UObLN1Hh87r3V8oBT/Rd0SAWns/0ten5ShH8f3paiN8m78TgGctk2//9YUxyZ6PNGjZTrIrKjT6RefoY9ToGd/f7+QN/rw8GAHBwfWbret3W5bo9Gw1WrlpaJms5mH40k3gXiX5ozSH3JJDw4OHKAx75qnSiUB6qEiU6vVqoNoxmA+n/u7WQfD4dCfMZvNbDKZ2GQy8SgCwDUleyNGiXwTv4/OC9aCUjQ+FFfwjMj7KYNX//4anv3qMlMR4HztgMSGpEDiS0AovituNorPiYK0TFCkfqtSK1Okel3ZBMT7oNQYRuaBkVPAvwxYReCiwiwKBr1PxzH2OSqoVKg4ZQxsgqA0KwrHqHTKgLnZ83JE0Et8Fhemjr0aC7ENKf4pC61oWJu8PhXcZusC0LwD4RnTVZTPo+JThRJBe9ma1PFWims98k1qHaTGXT2feN00vPcSEND51nlKjUHZGv5WFMGp2cs1TTUFQnk8tSZj3/lMeb+MBynds7OzY0dHR/79ycmJHRwc2NHRkbXbbTs4OHAAyyYWbSObWbLsKV8PD+pisbD7+3vfuHJ6eup8P5lMbDqdeqkglPvDw4NNp1P3OlUqT/l9KHPlzxQfR1DKdWX8H9e7jpHOG/+rkZSSMfG6qE91PUb5FcHEt6KoP+J3UdbpOEa5nOqv5krGZ3Dd0dGRnZycWLVatbOzM2s0GtbpdLxUVJZlHmIfjUa+gcnsKZJzf3/vY/r4+Og8wz3wVq1W8/ZgFMErj4+PbsC1221fR9ETzGapg4MDGwwGboR1u11rtVo2Ho9tNpvZbDaz+/t7m8/nNhgMrN/v22Qy8QhCXN/I/JeiTykMx72p+dPPUzjrJWflS+98Tea+6kEtc8OmGCtlmev1KWAQwe1rnTIrrw2o18fJieAyFRpNURQIKYXK32Xe1Oj+ToEmHdOoJFJtiqGU+M6vAUH8Hze4qcJOCQyuUYCU8pZ/S9L+068oyMsEnZ7kEReiznUqXKiknkju0bFPCYMUT6PMKZmieX54k6Klb7auRYeiz/O8AGJT/JEyZBRU6Djomoo8FZVw/ByK74x9V9Lwn45hSt7o9zoP+o44F2XP+RYUwWlKdurfZfyaek4Mp+rzXwJejFmtVrNms2n7+/t2cHBg9Xrd9vf3HSC0Wi0P5atXS3nZ7CnHTo0PPLKr1cpqtZpNJhO7u7uzw8NDazQahejZw8OD5XnunixKCb19+9YqlYqNRiMbDAZ2d3dni8XCHh8fbblceuH2PF+HGON6MFvLwFQ6zksnc0U+jrK0zCiIPJjSjSkDUNfot6bY/siTqi/Nnhv6qmv1Oq7VfuqzkYf7+/t+8lOn03Fw2mg0HBxybbVatdFoZNPp1Or1ur93b2/PRqORVSoVr1OKzMzz3Dc30T7kMP0njD+dTi3LMvfcwku7u7uebqB9rNfrXk+VDZyNRsMajYYbWrPZzE5PT63f79vNzY31+32bzWaeF8tPCmxGnBaN/jIcUQZkdQ70c8UQL0Urue41vn0VoKqyjspMX6KovQzsRWUVv9PGc22qbIUqnThIEdylQIj+rQsn9i8lSGI/ysB0FE7xM22vPj8u2JS3KSW8yoAkpIwYXfV8r2MW50LBXuSFTfGaKumCjKDLrDhOOk/q6dD6dsp7ZsUdxEqRt2MkIaawKK/rXJAPTd4UypiEeY7pq1QqNhwOHaxC7ErlhBQ9mo9+qJdXE+nLlGnkFVXSZYBKxziWKkuBWv0/rg0FvXFOUyd+ab9SPFE2D9+atN8pJZGSSVEe6HWptZ6atzj3mrtrZh6erNfr1m63/e9Go+Fzy3fIiv39fT815+joyEOpKFYFDCh32ri3t2ftdttGo5F9+PDBgSbglJ3ai8WicMzu/v6+vX//3n788Ud7eHiw4XBoo9HIxuOx3dzc2Hw+d+8ToENlXSw9FWVESqZEvaFyW+dNZUh8Xhml1mBqnX1riuunbDz1WpWPL4EhdUqVOV2Qh4BLPPjwoVaUyLLs2clQj4+P7p1nI5+2me9JDVD9B5+YmVenwPNJ1IfnTKdTN94A2Mhos6Ls5vlaOqvT6djJyYnd399br9fzI1TxrC4WiwJoVd6OfKR9UONf5yaFs+J8p7CJ/v7/yp+vbpJSJaQLRb14KS+MdkAHJ54drdeoEIiLl8GL12g7dBDKwKU+L4I5HdzYNgUq0eqIXpoycJvqc3yvUlS6Ok56P/NRxjgKSuPc6vNY3DB2VNqReSOgiH9/S4rgqAzYAETNiqER9RrG+eTeCAAif+o7U+EWpcjTCC4S/pfLZUHBA/hQ2q1Wy0EC4ayHhwcPTQFQlU/0eD99ngpGTROIIDYqbigKO4Qz86Ae+wiQlO8iH6cUWGrctE5wXJdx7enfmwJQU46AlLCPyie1DlWGquc/FQGIxPPVQ0X+HD+dTscBZqPR8F36Ozs7NhwOrVKp2JcvX9zLqqBwPB7bw8ODdbtdq9Vq/l7WIzuvP3z4YLPZzFqtlhdGb7VaXrqHXFbSAqbTqY1GIy/Afnx8bKPRyEajkb1588bG47F7pIbDoQ0GA89h1T0HqbGPYxV5mPmL16nDIEYBXgIAKZ0W3/mSU+ivTSngnNLzCuoUR3CvPqsME+hza7WatVotazabtru76zKPTUxR9psVHUCE9PM8L4TyaQ/ysV6vO/ijbZSbwruKfHt4eLCbmxs7PDz0CMNyubRut1uQpzwny57SAJDf6qio1+tmZg5Q8zy34XBo/X7f+/XlyxcbjUY2mUys3+/baDTyMlWq/3UcogxgfpRfU/wYZbUaGipbovzV53wN335VmalUQ6OQi/ekQFUEdHHBpZ5XBu54TgRSZQCAz2JbotBJCesITlLAh8/LBH1qXGL/VfGndlGrAIzKSMcnKh9IrXoNl6k3JaXYtQ0RnKfavwmUAof6HaTAiP9TgjAFxqHIyzxH22G25vvI5zpu5OqxaxfPEs8EtKpn6vT01Gv56XnTWZbZaDRycLu7u2sPDw8OFDTUw/sQmghdwqGPj49++grehlRIjvFOgcKogPmdqiQRxyYKNRWAZbIo1bYITFPC+ltTCqzo3zrGCvRVLqgTgHsiSND3lBmcfAdIbbfbdnx8bHt7e1ar1fzkm/39fWs2m36SDxtGdnd37fj42I95nEwm/h74b7FY2Hg8dq8SPM/nlUrFfv/739vu7q7XjeRc88Vi4SWAdDf0ZDLxMOh4PLZ6vW5v3ryx1WrlNVfv7+/t48eP1m63vawVG7UI60YZr+MTHS2snSify0CtzpnyYFwvKd5I8fK3ppfAvIIfM0vyKhT1zEsGATISo2h3d9fa7bYtFgu7u7uzdrvt32FsLxYLm81m1uv13OuP3CVlinlfLpfOt1SVaDab/j2lopCH6gnlxCicDOPx2E5OTgpHreZ5XogkaPkpzeEm9M94PT4+2tHRkVcCWK2eNoANBgObz+f29u1bTwUYDAY2Ho+94kUKT5Q59PT/MsNB5yJSmSNCAe5L9FVHnTKQKGoWo3qYUmg4BS5VIEagFjsRB1I/KwMEsc2xLbFdcXK4T0FNBKCpCUoB3FTbXprE2M/UfWWKNtVvFZypMS0zBsramXpfCmRvCsV5Tc1zaixfEor6WQSZ0bvPdbpGzJ7nwiqvYTHv7e15vil1+lDihFCPj49tNpu5oQFAnc/nbkFzkgnCj40sFEEH/Gpb6MN8PnewS8HpPH+q0zedTgue0Vg0XcdEAUkcx5e88Ppdak7iHETPY0rw6vOglDH3LSnyUUrhp8B0yiBAgUegH8eEeY9jilG0u7trBwcHnuMHSGRHPsqU93KNhhopPUU+KEYy4VQt1j+ZTGw8HttgMLDvvvvOzMy9ohhTjBORA/oJSOl2u14FYDQamdlTrl+z2bSHhwc7Ojqyvb09Gw6Hnss6GAycvzHQdMd0meyPkRM+j/OTmkud85fkerw3rodNodQ48X9M2UsZQ1FepvRK1FUYNaSbHB4e2sHBgc+t2ROPTyYTGw6HZmae15xlmdcsjXqYdJDV6qmQPnmipFXBS2xy6vf77tnPsqejU0ejkfP4YDBwMKpy0cw895RUBDPzdzMms9nMdYqWz1qtVn6IADw7Ho/t8vLS7u7u7MOHDzYcDj2iFmWzzh1tUtyg4x7nVvFEGQ7TNZGqvlRGrwLUCECiFyi1UFLoXJ+V6pwyRZnlFNujHdT3xR2TXKfPjKHzVDvi7+gZKvM4qGJMjV9KSWg/tG3KRHEOUmOeUtARGGgINiquOO94T9RbrRSVYWT4b0kRJCrpHMa5KPNu8n38LI5x2VjGdunzEbIAUjaSAAQIsRJSBRAAaCuVis1mM7u6urLBYGDL5dJqtZp1Oh0PnyIYCa+S24oHijk+ODiwxWJhjUbD8jz38Odq9XRM4Hw+d28TXgX1bGrflXfLQJVep3wUgRLPj/MQ+V7HXd+TWo+pZ28KRdkUAav2JfZN+/I1/SpTVuSd7u/vexFz9TYBRuEx9cRXq1XnHW0nPAjopZ94q0ajkXu3lsulffjwwQ4ODjyVZbVa2Wg0sv39fc8TZ73E/u/v79vbt289/5T6lfV63VqtlrXbbc8VnM1mdnl5abPZzHdLU6NVN8roOKX4Kc7LSzoyGhQpeQ2h25Dd/L8ppOuJMaCNKR0Df0WZoWs8ys6oc5VHDw4OPC+Z1I7xeGyPj4+e4gQI5NQmPuPZpDhh8MBjg8HA81gBeFn2lFZCKsnBwYEbVoTqyUE9ODiw4+Nj94TSHq29/vDw4PmtEBv7qJqBc0KNpkql4lE30mIoowWQrVar9vnzZ7u7uyvka+t46xylwKmOe9ncRxkV51vluvJ+GX11iF8bEF+cCvWlOl2GxGPHYsdTwjm2TcN8GraM10agoNenFlF8ZnxOSvDE9+l4pO6PYDGG9+P1UbCVjaH+j9KgT2bPFXhMW+C3MrTOY4qBN410wcXPdP5TFBVOBKFcE8FC2TpQ5cb1WtNXqyLgmSLXzsxcCJuZl/epVqt2e3trw+HQptOprVZPOUqEYQnLA0Z598nJSeEM6uVy6QL+8PCwYBDOZjN7fHx0Qd9qtTwkipdMPU6qWFShKl/FtIgUqFIeT4GvaCTG9ZsyCqIM0We8Jiz/WqRrrCwdRBXMS7IxJaegFMiPPIxRo/VMCTtiNKEA4SOAK94nzelTLyfPYsMU4cl6vW5nZ2fW6/Usy54KpMPvHIGaZZnzLmHXdrudlIlmT/msZ2dnHlnAI3Z4eFg4SSjLMhsOh55uQOSi3++b2XO+jOOmY8v1ep22K44/853SQxCAvMzD9S0p6jnVOXG8zIrHj8dxMEvruqh7AHhawozd/JVKxUPghM3ZUMSxucgrbX+v1/NUKXJTG42G/4/TYDQaedUJQKdGvmJ0p1KpeJrKeDy24+NjP9YXHYKMVYNfc1zNzOU0+aV4bHWsAdPn5+cFh8be3p59+vTJI2gqS3Vcy1IwlK+jsRbnq0xXIrv+r0P8qRdGAJS6h0aVAb8UMEiBU/0u9TcUgZOSDkJKAKty4ieifP0+KtdUW/XzaA3rwioL5fHeuHh5nnqkdUz0GdputWBjm1PzG7/Td7wkNDbNA6WApoxnyzzsKaWRGgt9Rmr+9B49yUvnAsWNFUyxZnKPEHgAVaxz8uyGw6HN53M7OTnxTSTM+WQy8RNV8EhhiU8mE9vb27PFYuFlgzgtJc9z9yzgUaVwNeMJMEGoQngD6FtKaOn8qFFWxltlnpOUYouka1bnL8UHm0BRScfvomfZbL32ypQ9FCNQKYXEd1pzWsEoc83GJ06JQrkPh0P/7uDgwD32eH/wdpI6oqDz5OTEFouFb/7odDrWarXc86QnBWVZ5ruuOSI1Ajz9++DgwC4uLjzMyzsODg7cA4Y3DoMOkAq4oR063kpl4DJltDLWyOjoBNG5SOnfsgjZt6II3ONvlYkpIJ76OyVTlWcx5kk/oa4ockkPfmCDUp7nbnTrWFLlhGgAc6L/A8xIRyFdRT2hhPPhS2Q/ewEwjlhH+/v7HmmgTcwreoE2397eesRid3fXDg8PPWfWzPyZRDKOjo7cA0tfHx8f7fPnz8+cJ4p/UjhBjTMF0FE/prBRjIpHmVNGX+VBjQsunuaQWow0ShkxKqD4HAUTZWAggjbtZGoQyhA9k1AWdkwJuJTXJ3pweG58dhwHneyUQIpKOhXS1z6klL8qZAUK8TlxbFPjH0uFKKjQZ70EFP6aFOccXoyLIs5B5EsWId/xmf6vc5CqUqHjHOc9z3MPmZJ7pGH8w8NDm81m7jkFNP7lL3+xSqVi8/nczs7OrNPpeL6o2TrPtd1u+3vZONBsNj2pH+FICJci7Mwvz3l8fPS2UFAaniT8Rdj2/v6+1DAzK1YAAQSlPIWvkYYG43qNvK4Gcbzuv/LOvxaVKXGzdK5tlIHxnpcUC99zjUYM1MtFKgi1GTGadBz39vbs7OzM8vxplzHP0p3zeOGV/8kJJXUEj/xsNrObmxtfH+12u+CVZ3MgoVcAQcoQAii02207Ojqy6XRqHz588NQZgDYAuFKpeKH/w8NDW61WSd5OzYPKawWzZZE0PG4vGcxRruicbwIpsEp5jlWPRR2tNWlVz0YMEXWemfnxunggNVqGjKMdyMH5fO7hd+Xf1Wrl0avr62vr9Xpe6N/sCfj98ssvdnd3Z7VarTDfGC+0E9mWZes8bj7XSBpGkZYJBNBqFJjIxcPDg93d3bnsZlMYFV3MzPcm5Hle2CDL+JBKM51OC3I4ygP9uwxrxVrBKjP0Wr7TZ/5fAdQIYGIDtENQBCna+DKlpUIztQBTSkifp8BKryu7JgIG7on36f0q4HRy1HsWlUMEwSnwXEaqxBX8ReskAugUOE/NTer9KS+Stj+2Ra/R35tCEWhHoyF68ZVP+D+OrQqyMktTDZfYFnhI34PgIjyFsMJKnkwmbsWzI/nz5882Go08Jw9Lm1DlZDKx+/t7q1arvplqtVpZs9kshKum06l1Oh3fMIJipl3wIH9zkg+AdH9/3y4uLmw8HvuxfDqW1JhMAac4RnHcdV5ShmsEXbrW43WpOcag4N2boujNirIuZQS/tJbjGi2T0fHzlBLRfFPCl6SAAAgxlHgueYCr1VOuaLPZtMPDQ7u7u/MC+m/evLHj42M7PDz02ryE+tnMQfhzOp36Jhg9FpK2EBkgfDqfzz13UHnEbF06DXD7+9//vlBZAPDcbretVqvZYDB4BiLH4/EzOayGls5VSjbrnKT0oc5FCqTFa17TJX8tSq3buOZT9YpZ39G4T4GX2Hc8pprqwSZQTYnScnmr1cpPQdNqDVq2bDwe24cPH6zX69nNzY199913nge9u7tr5+fnboCZWSFiRIgeHsMjz2ErbDYt4yGcEKonFouFRxl49mg0sn6/b/1+305PT30fQUy1AUTSV5waef7klY06UscZUpwDxeujAy8lh5Un4vNS9FUh/qjcUy7c1P+KtlPKJ3ZE/4+MHgckvgcrQEEn745tjIJfBXP0ekaPqQKQCN5Vuet4qSVF6DUCXZ4TTzeJbdD2a78iY+l4pBZ6qr8p8BDTEPQdqfkpe9dfm8oEnQp8zctM8aGC0NR469jqvKf4WUlBFPepxcwZzAhMdqYSvsmyzH73u9/Z9fW1H//Y6/WsVqtZo9Hwncmr1crevXvnVnutVvNNA1QEILRvZu4xrVQqhfqonGTCmL17985Wq5UNh0OvGoAQBZCamacD6K7RuH6igaheUf08jp9StOBTRgVCOiVzzNIy7VsRbdQqCUpRbpQZTynwo4pE32X2HLhmWeY7o3d3d61Wq3mokCMikWc3NzfWbDatXq+7gmU3NYbT3t6enZ6eenHxx8dHz/tUr9ZkMnFDjvqq5Drv7+/bcDgsHDWJ54n1M5/P7fb21j1HFD4nr9BsXS0DD5ver54rwrias0iZosjT0aMf0wCi/C6TDyndF+c8ztUmyN24hsr+TpU41P6VyYToENAUlKOjI2s0Gp6zz4lNDw8PBe8gQE2PHc2yzGVrnuee3/nDDz84D52fn9tgMLDBYOCbRyGAH8/QslF4KJUeHh6s2WwW1ihpAXhg2UCFbNOIhe7yn0wmdnt7a4PBwNrttr1//94NS8U1WZZ5lI0jgRuNhg0GAx+XiDWi7leZxFjp9+rIok8RpPJbnW8v0VeXmUoJtPidKlwYLbr0tWOpTpaBiih8uVevi2ArBaiiV1O9qmZFq0X/18Wg7Y73Yr3EyWGyVYFrn/hfAU8M0WtfUn2LoCclxOLzU0IzCgmuTwnB6LXaFCWvpGAoApJUeyPo5LOUdyTOYepa/Q0xp2qo4KVSD6ZZMaFfC/c3m02/js0oKHiO+FPvwsPDgx0eHhZOndKcPjxevJ/yVGZm5+fnhdqnq9XKw6Qk8OO1RYAPBoPCKT0IpTJeiTynYxXHMMXjOtbRS6PCMXVNnO9vSS8ZlXyfMpzi33jWzV7PNU+td+VJeOng4MDL7XAfnlQI4EnofTabufeIc8vx7oxGI89tJqSPx3W1WhW8RnhImbdWq+VAUze04FUbjUbW7Xbdm6o53vCEbkpBqSswACDHsbm+vvZ1yRhEPo2ykflIyZ3UWlCeVbkfwcOmUDTSo1Fklk7Ri+tXDdiUnoLQy8xrrVZzw15L65EvmufrjXt6DVEffS+A8eLiwszMer2e9ft9m06nNpvN3Jin3nSlUnFgqh59+E/XIsY9BhaVMZDr8DoGGhtYtbwafQaQ393d2c3Njc1mM/f2Km7QNdHpdLw9HCesnlTGW+dGebFMx8cIbjTgUvjjNfpqgJqy1uICigspFdaIwFQXPZ2JRcRTzJ3qtLYppQQjaOb6qLwQUuoBxdumz+D9JOgThtI+admKCN711J4yIKnekLiIy8Km0fKJIVClOCf6O0UKNnT8VZhsAtE2jIXYx7jQ1EqP1h3XIkBSyj4CIqU4b3HcKLdTq9Ws3W6bmdn9/b3X8sN7Op1OHUyarflyZ2fHa0aSX4TngHUEr3ECEB4EypJgWJEbCL/jYVqtVp7fx0YrTjyhCDvtopTLarXyXCg8ZTo/KUAaeUvHqsxYisaG3huf/dr62SSK/YgGVrwmAu0o17iP75XX9XvepUZ5q9Vyha7rHdlGagoA0szcaFmtVh5qNXviW068gQC+HEFKLl29XvfcVQ4C0NN2tI96JjohVXLu8KTSP8CzHn+pY0P/z87O7Orqyk+yIi0GsBx5MzXmOk/8HcP2UAq0aT+jPlVQs0kUMYLZ85Sql9ad6r+Ufo/33t/f23A4tPPzcy/zpJVOzMzBpNlahvDdZDKxer3uHlaAK15N8jvh49Fo5DJP1xNzRlrMfD73HP1Go+GAmTJ/WfZU85fqKchYM3ODjmiWjqV6fuv1uh0cHNjt7a3NZjP705/+ZEdHR3ZxceHpNrRRyw7qaW7qVU7hvMjjOvaq9yM2e82xVgZ2oRcBqi7WKOhT1zLhipz1nuixKGu4Coz4vy7qlCJLDaAK7pTgiAKZ+/lMva5Y1VwDA9AGBajcSz5MrVaz6XTqO/XIC2SCU0pHvU4pIRiZRwUallwERRHEpxRaSnAoI74GdjeFImiOPAjgSi2cCFbVmIhjFg2EMqtTAS38gvVdr9ddyRJOVe/M/v6+l0kh7ApQBHReX1/b4+Oj3dzcuNWMoKVI/93dnZcggUcJi9HP6XTqyg+PA+3WKAE/AAozc2/Gzs6ONRoNG4/HpYpb13dchynjVgWfyhhdgymlr++P0SDNfdsEin2Nazeuz5QC0dBpCgjxnT4jyhCqOujRi7VazdM+8G6q15/n4rXs9/u2Wq3cu468VJ4FkN7d3XmOH30aj8eW57mfWoUCJyd6tVr55jxkM7yhh1AoiFCZTn/pE/fxHnJeiQqw4UZzwnlGNNZ1bqLOijI+zmUKzCpvKG2KzI1GuPKU2cs55vH6lByG9Dp277fbbWu3215OabFYeBkocAlyq9lsFsApDgL4jqgP592PRiM7Ojqyer1u4/HYbm9v/VhTs6fSZ3o6FJtM8cIeHR25t79afarB2mw27ebmxszMU1coyk9kDANKjSnlLf6Hd+/v7wsnuOkmxjzPrdfr+SmEjUbDhsOhA1wOXolYKiWDo2MhhU308yi7yiJlKXo1BzU2QJVD6iWx1qY+o4wxdVHrgKgXM76Xd5cNqA5GCiD4AIhnNPYdd32e5+41UpDK5OsxZQo4CB1Qk4x7tSQKQlQ9D2o1psBoyvqISkuBZBzv1DjEa1QwKGBPAWKlTRKWsY9x/LguKvDI68pjUcim+p/yJCuA5TqEQ5ZlvpMebyN8BG8BNpmH5XJp9Xrdj+Eze+Kz09NTV9rwLRbydDr1Gn+NRsNPOAH44lF9fHy0wWDgBdrzPHdwAJjnxBQ9XYpTVrLsqfwPCmNnZ8c9Zgjx1HzxOxq3qbUd10gq5JmSNzoH3Jvii29NsW/8TTtjyTIo8p2OicrBGEKOclWBHF4XgCg8kefrkOnJyYmZmYfryQEkV5TrAXQY59zD+3gHOaesBRQr93DkKWthuVwX61deINeVQyt4n+7iRjYTktUxozQV7zOzQk1Uag9H2cA4puaiTA5HkKtzUQZKN8l7GuVl5D2MT/gBSslR/ZwxiEAf3ckmO96PQTWfzwunluX5uuweu9qRkWAN+BC+Ojw89GjUaDSyu7s7M1vnjOZ57jzPJkJNyQIsYsCTl7+zs2MXFxeFgvu8s9frWbPZfLaLnzE2Mzf6er2e/eUvf7Fer2d7e3vWarWs1Wp5mg1e3izL3AHC3+1221MYiFLEkpQpuaC58WU8ru2NWOy/4gx4EaAyYRoihsq8obEx2nAd4Je+Q8HHcKpZ8dzuSCkBEK1VCC8o71BLg75q+EQtPKyTPM9doGLlYNUfHBy4ZZJlmYcGEOqz2czq9bozuFmxzlpZn8oW80tWSbw3WrU6jwq81Aujz2J+IqNtmhcqtjGOTeSTeF0KiKeAb3xmfIaSfq67LSksrR4kQqZmT5sAbm9v7eTkxLIs8138o9HIN6NgGQMQR6ORH/k4GAzs9vbWms2mvX//3rIs85I+hLOoG7i/v2/dbtd3UZuZDYdDB5ir1crev39vrVbL+4cXdTAYuGVerVb9WEE2wahHOI67WbpCSFzLkdf185d4kHfFncSpedoUUnmka0/5UPugAEEBU7wmJXMjgI850QpwUbyHh4d2f39fkDE4KTQ/uVKpWLvd9hOaMPjZaQ3fA7zjBijkbp7nDgJYOxhVlLDS+ylqPhgMbDQauaHGutrd3fWC/Z1Ox/mUjVnwy3K5Ppntu+++8xOnqF5AuD8aRGqsohsij0elnVL6KecA320K70ZDPyX7UnoqJU+VR1NACD5g7HZ3d63VatlsNvNUFMCm2fqUMryY8BP8SVsJuzNv8/ncxuOxH+4AP5Dzj57R2r4YWhjjGJT0Hb5iDen6RA4Ph0Pf1KQRVtbXZDKxz58/ux44Pj62er1u3W7Xzs7OrNlsupMjy9anFEK1Ws1arZadnp7a4+Oj1y/mPSnMoEZAxIOaGsT8Rj0aeTymLaToq8pMRWCkjYSZIoih0TxHP081KoY+9P1lTJ96b8ra0HcyWeph1EkkD4NrYSDc5WwIWK1WVqvV/HoEoe5Y5VQU8gbzPPdj9QC4ugM6yzL3pEZwnhJ8qf7FuStTZPGa1HP0/XEOyoDwpghLFkSKb6EycMJCQ/hoX8tCTfzP2KRAlIINNZBWq5WHOzliFCMIr9Hu7q7V63U/9pHi+wge2skJIbpxACucXKf5fO4F1KfTqZfVgd+n06nd3d3ZZDIp9Ac+/+GHHwq5UmqosbZoO+0cj8cFYayKuoy3XjMaINaL8p+GRXkOBgF8oREL/f9bU5SDGp5Ora+UAkiNo/JylCtxHuBPngkP4flk/vkhTMm6GQwGHg0g947d0JzOhPGDZ03z4NSI0Xmibephvb+/9wLo5D9ru1k/epoOcp82aF4eBwpoIXbA0GKxsHq9bpPJxI/5pd1RrtJmlQsKoCN/RlnCvKSu1Ws2BaTGNkQwYpbeSMX/kEZN9VkxpQcnFoTHfjqdFo7Q1ROYMGTgVT3CNm5upq339/duEJHaQhuYG903o6dL0R+dfzUycUoQpuc9pGYhp7Ms841S/X7fvnz5Yr1ez3Z3d+3v/u7v7Pz83Nrttke0aF/Uf/SP6hzj8dirH3z+/Lk0CqBtVrmgxhPXpfCFymPGJHqHU/RVAFUFg3Y2NrDMKkotqoi0U4tU25FSUCmrMgWm+JxB0ueplcV1hA4UqBBiNVufrJPneeF8arO1Z8HMvMQK78Q65wQeQCjKm3ZoMn9qrF4Cp9GgiOA0MpbeXzb+qfFTgZEyJL41pcZHBYleE8dHjRgVVvG3jolauC8ZWJACSEKalDdptVqugEkB2N/ft2azaUdHR3Z5eWnV6tO5y3g6r6+v7fLy0jdZwZOfP3+24+Njy7LMut2urVYr+/d//3f3Gp2dnTk/3t/f24cPH9wIQ+AhGOfzuTWbzQIgjoYenow8fyo9hGIHrDIHZs/Dk8qranyWKW8zK4y73hvBZ1zLGqGJcu5bU1mKAxTlod4XFYjydTQKeBaU8iozTjrPetLS7e2tTSYTOz4+LtSCbLfbHiliPY1GIz+f/O7uriB3SRugndpH3XRXqVTc06O8Qx1enAjk7JFmpU4E3gGAmE6nNplMXFHruKohVqlUvJTb/f29tVotz/vTslM6RykF/FJYXscgGhFl+nQTwKlZuc43K6brlYFT7bPygBpNEM/AoOb9q9XTYQqcHIXxzHyzcW4ymbhH1WxdMYU2oKs5cYnDJABWqs/NzKtUEHWi4grv0FrWKufw2OLRz/Pcn0X9X4yi2WxmV1dXXpWg3W5bq9Wy4+NjP2RFN25pRA68ol5L9ggQfet0OnZ1deXjiKxkPlS/sSZTvJfCc9Fw/tpo61floEZAqIpAKXot4mQoY9HIuKBTYJdnp6gs9IGg1Vwt3dwEc+NVAWBqriiu+yzLPG+JTQKUWuGc6MlkYqPRyMzWlhxEiIg8JgoEawiAoysVBKmVpX3SPuocxPpycRxjiEl/qyVqVgRzqefoXHJ9bN+3pAju1QsVPWaRd1KCtGwtxIXH37oAdSHrexlXlKZ6ITFg6vW6X4vX3sy8jim7p8k1pR5qv9/3pP7BYGDNZtPOzs5suVx6Dhbg8/Hx0f7zP//TBWG32/XC/Xg9tX06VvANwq/T6djh4aH1+/3CUX5aHYC1pOMWwz1l4FSNJJ0PnQflW/WIxLUUZdmmAFRd0yqvUjLUzJ59F8cz/lbjSkmfSyFzSktlWeZHmJqt64HW63Xb29vzjXAAOmRbrVbzUk7L5dIuLy9tsVg4gNX5gPdpu4bN7+7uHBirl4qTy0hpOT4+9siWbjLBcwY/KA+xrpDtlcq65JSOCXm1HI26WCxsMBgUjmtNyYvoCHjJUx9lqt5XZuxuilMg1a/U+uTzl8BNvD/ey2/kFeF6jV6iQ9kNT8hbNxGhm9Gd0+nUTw/r9Xq+YRSeBHyiS5lbHE96rGie544Z1KGjlWWoWsG8sl7G47HL7p2dHfv48aOnD+zv7/shF2w25Bk4uui3Gus6P/A/RhsbuUjlShnAajCo8yZlEHOvruuIFb6G/ktlpnhpygMVFT4NUfATmTL1dwS28TP9X0FUHEi+V2EEc/JswCcu9lgnT/M2CJVqMjTC+ubmxi0llD+eBwQ8Vgces/F47NfX63UbjUbeBxYArv2UYNJ+xe/LFn50v9PPlFfRzJ4p97Jnq8dnU4Rl5JX4mX4eBSsLMIZIzJ4baVHRpxRIBKcIEPgIRUopKerpEfZHEB4eHtpkMrFq9em4xoeHB/vll1+s0WhYrVaz77//3kHE0dGRexLY4XlxceFAEaD7888/e3kpzk+Hd2kLyll3+QM0syxzgUZOtZn5CVf7+/vW6/Wcn1EUep45ay+C1MjjzFUEm3Ge+T8lQNUDomAiNW/fihQ4lfVTFV6UzWqERkMttQb0Pj6HL9nFj9xarVYOLDGMdHMIG+UoxaT8zjPZYIp3k/Yge82sUBeSslKj0cgjAyqnNd8fAIEHjba1221fB2brjVnz+bxQq5L1jIdI20dfSPfa29vzMlixlF3KG83fOEHi5ykwV8b/fK8G1qaQtju1pqLeLzOcdF2mjDCdA0pK8Qy8iRo+13C8GrLklAIGORHv6OjI7u7uHMeMRiPP09eNXrqBGrwAD+Gt7Xa7Bbmjm1vNzA0priGqdXd3V4i6NptN63a7lue5G2ykBTJ2tI1UF9rDZ9zDmqvX63ZxcWH39/fW7/d9zKJxlTKeoGiUKaWckF9Lr4b4oZRVF617Vewpqz8qd32eLmpVHmXtYiK4N2W56vM154/JVqFTqVSs1Wq5EoXZAAqcQ44FY/ZUC43dzyok8UDpzuzlculhoDx/ygdkgwBAAisQRtKdhkx0FFgoKv08ZZ2mUgaisNN5KAtXKz+kLPxN8UK9Bta1P9HDwd+pz6Go/ONaUKtV50S9twgzQuhmT8Cu2Wz6uecoe4ycnZ0dOz4+9jmCZwgLrVYr965WKhW3tEejkX38+NE3f2TZU77q2dmZHR0dFZQJhhv9XK3Wu5VVKGpZFd2NynogpJ/nuSsIvACR75RvysBjBJJl0Zcod176X0HypgDUaPDFdZcyVlNgIIIXvS4lH5gX0pm0wL2Zeb1FVcw8G9mIIgTUPj4+2u3trRcRJ2zJxkDAHQqdPui54gBSjTxNJhOXmchqTg5ShwRKn5QYvGmAjlar5c/X1JVotKuHPc+fwrB6upZunI28rHOR4r2UR0nlBe80W4e541ym5Nxfm1iD6tgwK/bzNRmrvJ3CH/yN3sZAgQ+UBzFShsOh8wCyjffAY+wT0SjW7e2th9jZCEieKDoXL2X0qKoHnuhpvV73PSrKu4vFwsbjsVcfGAwGdnV15WWycGJRbzrPcz+xDaCKIUe/4UVd65pCCDjN89y63a5v2kZXsKGVeUjpsZSDTOcpOsQ0Mv0axoO+yoOqCkBBZhTqKcEZKVr4el8EuwpiNQSdErQxdMs1CqL4PO5MrdfrhcLQgFTCV1hk7969syzL7MuXL/7eRqNhx8fHnjc4mUyc2Qn1M261Ws3PfQaEaFkeQmrUqQQUaH6KWXm6QxzvqJR1vNSKTynBMoCXmm8d95gq8C2pDJyU/UY5qycvXpcay6iEVLFFgQufEvZB2aEkVcBqLhPAgLxmPbUEQ4h6fOxMbbVavuEKA+rm5sa+fPnidQPzPPdw0v7+vl1cXLjBRnv5oaRQlmUOijk+krGJuWa0kyLsWm+QsUgB+JTi4n8UkdJLvB+FYOTvlEL8lqRjE9tZ5tHgszJ+jZTiXZ6vhj9gslJ5qvTAwQ7wHfl9zB2F+QldouzM1jmcZuY7kbkOgw0ZjfGDk0CPrARY1mo1/5xNS7qbmtqpaoRQmUA9waw91Rk61gATZLKZOR+TCpOqBZsacwAL/UjpwNTv+NzXHBLfitRjF/kQiqAmyulowOo1qtvxnDLngLdKpVKoy4zjCR4lgkPONNVH2GBEXjWpI+hvcj81PS8CN+2/6gGAsUZYaedwOPRqFR8/fnSnF+lSlOrTU9zMzB1t8dhWACpRKwxNpTzPPRpMxKFWq1m9Xrfr6+tnc5YyupQ3YzqgynUFs9yn7XiJvgqgpoSgNhRKeUP1Wp3MlOJJvS9lRSpDl4U2lKkJybAbU4EqZUrm87krUd0JulqtPBkZ6wIG6na71ul03MLRROv7+3s7OzsrWFfkilQqlcJRfhTx39nZ8XOmNXeKv6MFSv+jgEqF3NWjp96KCK7iPOs1MeSogLSMJ74VRQVv9hyw0pcI1KPHieep5ajf67hpUrkSoAornZy3er3uln273fZTUFarlXuhEFCEOtXD3+l0XIECIhG0HEWK8FssFtbv970v0+nUS1NxD88ml0kFX5atw/mE1MbjsU2nU+t0Ou5R0DVGcv9kMvG2Hx4e2ng8fpb6Ew1OJZ0LPHrRuIhzxbN1LqK3ahOVvFlxw0g0TBXM8z/fx3UfAQ4U+R7gBH8S8lYvPxEdza+/ubkplIoyMweK5Jqyae729tZub2+fFTaHz3RzKMobsEpeIZ5d3oEDQT2y0+nUgUWWZYUje3WO9/f3bTQaFcaMTTSMrx4MwL27u7vW7/cL3lgdH50PHe8yIyoqcp2TyKs6r+os2gRS4zICN71GSWWueq/5Lo6DygcMeqpFEFmCj6ihi5HFZ2xY0nJ+GExES/M8d8/neDz2ig2sS/Qn88BGQmQt7URmZ1nmzgLaD4Cs1+t2dXVll5eXtlqt/PhScIUaZurI0Drtcd2DF9ThoVE71Uc403CU1Go1PyAjzpFS5E1+a5twWui18IbKoDL6qk1SZcAy/q8DpYOiiykuSq5NWfLRk5ACG7oTV9vNJEbhgoeM/BWEnBaiJi+jVqu5mx+rh7PKz87O7Pe//73t7+97rceHhwe3cAaDgfcNkAsTABBJzh4Oh9btdt07pRUBlDmjUqL/GvbRsY1AnzFSL5cKx7K5pB+812x9NKbSS7tT/9oU25syrJgHDYeYFfPCIDWIytZFXJgpL4AaarybcKF6mFqtls3ncz9tpFqt+gaQLMs8x3O5XNr9/b2Nx2O7urpyIQk/U3NvtVo5OG232/bTTz95HpIKrvl8bnd3d9ZoNHxtIAgR7KrwKXXCelOh02g0fIzzPPfSKdS/xBORyj1NKV3GH8MoBU65jvmN86/XRxm2KRT5JbYZ+pqQqf6tCjU1XrwPJchJZhz0gHIlVw8PKgoIT73ZUzH7+/t7B67V6lNNXLw7Dw8PBS/XarXyED1GFfVNARIY/oPBwHmw0WgUNhUSgtXNIihjQApjl+d5IaTZarUKG1WJWmTZuvwfjoW7uzvfMMX6UflhVpSz+s7IbxEIcG1qHiNQY+w2wbiK8jW1pqODI9XuMlCrBih59zruqpswrq6vr63RaHgZsix72uw3m81c/2M0M+cYYdQ/HQwGvgEOrytGPFUqsiwr7PKHZ6HZbGY3Nzdex5oxyfPc/vCHP9jNzY2dnZ3Z+/fvHWsAolXPqn6NhiuyEb3AGuU7UgTMitiCSC/GnB6nrXMWeVflk8rR1Py/pB9folc9qCr04ucMjjZOrcB4rXa4zOPBYCD09N3cp+EYTayHFJzyfJQneUiESjk9gTB/rVaz4+NjZ75Go1GwfDgP/b//9/9uZuYnkqgLnsm/vb31U3uwThB2mn8CcxCqzfO8UK9Ny2jo+KXGLjINn8cxjAovGgNKyqDMbazTumnKXueehRH7rd7nOKapcYieUw3VqZDQa/Rvng8v1et1563d3V2vAqE1SdktzK57cu+00DgG0c7OjnW7XVfuhH9Q2g8PD/b27VtrtVousKi7F0Nn9IWj/h4fH10pYBih/AmZIfB0/aEIms2mDQYDby8gVcPDvDN6UFTG8G7GNcqNlCLU+dPPowdgE5S8WVEJMcYpMAqp8QQo0nFLAXV9fgRGeA3x3qOwUHyadqIbkzhC0szs/PzcvVJ4GlkzzB9KnDmj38jJ5XLpnlgAIgadesR4DoBZw+14wNikEkP5GKeDwcC9VRiP6uHVAwNms5nn7T0+PtrR0ZE7KXSMo7zUTTKpOdTwsM6NzpHKlU2Tuxo1TfEdlDIgYwg5JY917rIss8PDQ+dRvJRavWG1WnnetMpoSjLRFs2pR671+33r9/s2GAwKUS1+eFecY9XHyMPJZGKPj482Go3s/v7eTk5OPFr7n//5n/bw8GA//vijnZ6eOp9R2UXzsrWONCkLrEV1ELDGAKxE1sA6rVbL9zfAz/1+341SnHcaaU3JjJThGwF0xIUpHn6JvrpQf8pLkfqOAdKwIJOX6lRsZMpKjJ3QZ8WwCO9FGOzs7BRK9eAZxTMEKNjb27Nut+uJzOwA/P77751RhsOhLZdL63a7Hra6vLz0XXWVytMRp5zu02g0HLiyu9XMPC/17u7Oj4wkv4nFled5obBwBKEpS1zHTP9W5a3jBEWBqO/R8Y8CsSyEvgkUwQsU+SvV3jKhH401BbhR0KbGCNCZ57lb7xRMhk8QlP1+3yaTiZ2dnTl/kf6BZa+lTfBuotwxrrDC8axywgheJkrymNkzwUSpKDyfhMDoM7tKVYFDGrbV3duML+uBqEEcezUC1FiNskYpelrg+5RXMRVB2CTeTYHRMo8w/yN7Yz90PMpkavTY4blBESKDsiyzZrPpfGe2Vszs/iWShNIcDAa+SdRsXTid0KqmxZCHSkF00gEwgugjR5hmWeY5dnme+/G68KcW/o+nPQFi8MbiOVutVv4MBQqMIWDo8fHRc7sB8aqEdWxT/BXlEroMuaKexlREKD7jW5MahnEdp5wnkcejfIX0GTxfIylERJFl5DUz9zgEzNbpd6SK8HyK4jOvGFdaPF/zWA8ODrxyD98D5sAParxrDd7d3V2bTCb2l7/8xYbDoZ2dnVm73fZUmSxbe2Px2KsTTnNOK5WKp08hZ1VeoG/4zfORu8pz1PU1W6eqpXQ+Y8a88a4Yso+OnjJ9+xJ9VYg/BUpSCwXmSQnReD0WURlgKHtHBLZ4VOKmH4AedUphPKwuPJ0kUBOaf3h48OMau91uYTfrYDCwwWBgb9++dcZ78+ZNIYyVZU/5eXidsG4Qumbmp58sFgt7+/ath6Y4lWR3d9fG47F7HQiHwqQvAcmozFICIzXWqTlMCRIWqP6vnptNEZip/sbFBClPR7ASQU30iESDIFqQuh7YLERhZC25Q9kyLHqqO5Dr+fDwYM1m08zMvamDwcBBKrlz0+nUvQjwvZn55hYdA90EYGaFMBgKncL8BwcH1uv17Oeff/aIADlSPI81wfG+CD6AhJn5btbZbObnstMuHesyJafeT5UtCgyi1zwq8yhM+TwC3m9J6lFOKe4oh3U9x3Cc3p8CttyPR7FSqfgGFOYf4+X+/t6Gw6EXtKeqhPINcpTqDtxPaTQ2NQ2Hw2ebofBuIVMgrgNkkmKEJ4g1ValUCqepEV2Yz+d2fn5u9Xrd5557zawQlVB5DphGhsf9DOQsaiUAPGuMtcoN/btMNqXAZ9SHcb5TwO5bUNTP9EcdJGVAvswxoN8rn5qZNZtN9wwC3IiMar4lIDR6spFXGCP8TxrA/f29G1cARfQz/M5zaCPAFUNfjY7lcmmDwcAmk4n98ssv9v79e+t2u2a2zq1XvhmNRq4j+E7XB7zEeiOiTBSE/lYqFccxAPtqteoOj06nUzhYhXfE8U/JnJSjRq9/CT++Ri8CVF1UDJiGkJQZy0AQg6qN1kmIgFQVj96rng4dOAZTFeL+/r4rZASfmbk3iZ2mFPg9OjryCc7zp80lFxcXrkBbrZYtFgtXyIvFwr2kzWbTPQgwOEIPsEt5FJKtDw8P7fT01MeWMCh9w/riXGGYvGxslOJ3/C6bn6j4aRPjqSHGKDB5xiYBUygFZKDIU/FaNZ5i6EnvS6UBRAuS6zU0iUcda7parfpZz5VKxTccYfU+Pj7aly9fnFcBtIS3aPdwOLTRaGTL5dLOzs4sz9c5U1jPCHK8DNVq1RqNhu+4RsjxzKurq0IhZwQYFSiIAGiRc7MnI4xdooTzAeBXV1eu0NXrFwG95hDGOSiz7Ms8MHpPBHAKajeBUkZRlLep9Ra9wi8ZW1D8Dh6DR9moR8kozqsn904dAxSrZ4MFvJrnuedYw3MoQEKqtH8wGPhhJtVqtVB2T0v17e7uFtKmyKODj/CAXV9fW57n1mg0PNwPcMUjBtDQklQa0tcd29pmeDvLMt/sgnFWprBT4/+aQyfyaplu3BTSfiiG0HQ8jYbE9kd9E3EGThvST9jfofVNzcyNo7jjnn0gOIDYLHp3d2d5/pTucXd3555/nqtGEuCPduMkg48wzGOaFSWrRqORtVotOzs7s2azaZPJxCuemK3TA8AB7MaPwFE3UGNgKTDlfwgnxnQ6tfPzc+d51jPAl7+jg8DsucxQmRp5Na4Frkt5XFP0ag6qeuIU7Khy1mRwvT4yFxQZM3YMipZmHGxtC5YTuzlR4oQ1syyzk5MTD00dHh4WzogGECyXT6fhmJkvAtpB3lWv17PxeGzv3r1zAQWQyLLMhTz/k+OhwJRwGGCakCzMgocAb8Pe3p57WJUhysYzAkqdz1TJiQjizKywAFMgVefkNev3r01xw5byaOxH/Fs9oxEYpO6JVmXkaf5XcNput/1ccniR0k3wKAn+q9XKjo+PCxY9Spm5JtTYbDadt/r9vr+TdTEejwteJLN1+R1OQzs8PHSPLl4kyqCxBljnCHCUtipsrVEJkKa0D+uT/K+UgYRCUJ6OeV4RAETvTPRYRU9iSrBuCqUUvdnzlJ3YdlWaCvojPyqvAzbxxqOQAYJZlnld20ql4jVNzcyNb05zwusPSIRvtX6kHhG5Wq0LqZutjRszc4+l9gvgrSew4fkiv3lnZ8ePP8Uw0k2DvAt+Rfbf3t56jp56r7VCC7oGkPX4+GjtdtsPJ9B8Rp0znVM+U3msMoVnKyiO8vUlY+VbEOtW226WBjJqVEZ8UeYIAOCg1/lM8451U51WO6lUKq47aetqtbJer2fVatXr88KHzE2WZc8+i3KD+QOQ6rWVSqVQGYUoAc4y3QRFVGuxWHj6V5ZlXlVA80xxPsCXeZ77kemNRsPnRMcbfsaTOpvNnL+Ojo68lnGn07HLy8vC/VEf6j6BVFQ94oHIt1wTsUikV0P8kXk0Z+FrhHpcmClm1evUYxCBA4PMdQwAFjmnhhDuofRTu912QQYYRTABUhEUp6ennhv6008/uXcJrytH9JEbkufF83T5ofg+ApM8Q1Xq6nWCuQnLknbAJioAgG5O0XGNzKKLWxkhKr2UQaHWIeMSAcFLHspNoKiwFWDHvihY0XE1S+c/mT0vZaQWPr+VP1D85ER3Oh1rtVoO+szM7u7uXDnOZjNrt9ueL6rWcbPZdMWOop9MJh6uRxCSJ0Wb2HAHX6KoEXR7e3u+kQkwslqtfAOhepWUR+BxPFz0FzDZaDQ8nxCjDY8qG2jUk8bYRkMiFdbmdzQYUiWseHbqswgCvyWlgId6nF6TkVwf5asa88rTSnjXkavIMzNzYwLQalZcW9VqtVBfdzKZFO5hfLV6w2AwKOx+1lCt1iwlVUqrr2Ao0R4MIOaf2tRsEMzz3KMERC6IGNB+UgIwJLlOARLGLp83Gg3PAyQyoIZUyoGgzh3GkXni+SldF/l800AqFOUnP2rYRvASsUQK1Jg9RUKRnQA0jFzNN6WcHbqUFCQ2PRHhrNVqdnFxYcPh0HkRw4q0AOSTGg06R4qJ2EMCiKTfyEZO86N2NVVXWHO6mY8yZuqdxZuKIwKAqdEMM3NcxFjTRmS2biQHOJ+fn9v9/b3V63WPaDD+OhfMT0r+RANE76EN0P+1B5WHqvchhtlTL4mgJyp/tQr5TFF+CkRExaSfac4poZeDgwPrdrtuUWC1wAxY2Zpnulo9JUA3Gg3fMKVWGKGkmBc6mUxsPB679X90dORFnGEEGGo+n7tnjFBDtVr1MIUCBhYj3gosdFUuGg7Q8Ul5TMqEQbwvKu1o8fK+aC1tipKP4NPMno1BXGipRRgt0BQY4reGUzUsCyDTkDwJ9awDcjJbrZYLFjxRq9XKj8adz+c2Go0894oQPcJ5OBx6pQnaoRUj4EM16o6OjlwgA4gJN6GQtVSbepMIWynP4AUAVGixdvK+WQvUFGbMo4ciyoJoYEReNytWmNDrUhGACAI3hcoMez4zS9eU5N7Iq/BjfIeODYCQcmbwauQhACthTQzrw8NDu7+/L1RnICeVDaCz2cz5QQv468lk6BfCj8hz3WyHPsHbTyqAmfnGKtKj8jz3KALeXzPzkm5qfJI3S0lAM/Naqcvl+uhqeLZafSpJyHGW/X6/cMRmypjS3zoXL32n4xJl8aYYVxGIQtEJkjI0Y59Vb+lvAA4gCq+5piVpfr/ZWjdTeznLMs8zxZk1nU4LEU8NywNOVReCHwCTOJ+Ud5HrtB++7Pf79ubNGzfQWSOx2gCOM/qD0aMpXnmeuyeUFEPFW7qjX3kGMKzVDNBRh4eHdnx87DWvU44dna8oq6OeTfFJxHdl9CpAjWAwpSTitanvNNzBNerB4zN9flx0em20COr1up+c02g0bDab2enpqU8mu9d0pxsgEOsEgXl7e2vn5+fOVIQ46/W6DYdDd7+bmeeVXl5eepkodjZrXzmtgnf2ej1ncD2RhZ2hmnRvZs7o1OnTGoK8IyphVU7RClfvkZIyTyrsynMVDOs8b5KShyIwj4o8hoHVi6F91bHVRanpBCoceI+GIjudjofP8ZTCe2dnZ2ZmzocIJRQ6xhKgkAgB3gPSWWg3fKIbupbLpV1fXz/zDs1mMzs7O3Pvj+YwIWgRfiTXs4a4Js/XuYGqoDHqOLzi+vq6UOOVDWGMH2Ouik6NIJ1XVVwpwyPlOY+GyCYod6WouKNRqbwXx0i/j3JYPRs6luq5Qe4wl/AT87+3t+fniz8+PnqFCfL9zMzDpaSHoPhXq6eDSfBm4l0FMCLT4HEzK+yQBqziyT89PfUUKIAHSt3sqV4r7yV9Sms8mpmDj5iOAhiljNTnz5/tzZs3DgJoL6AcXaSHBagnVPVfSmeq3FAlr/ekQFv0lH9rig4AbbMC7Oj0ShliKUCETOJgk06nY51Ox3lvPp/b9fV1wSufZZl9+vTJ5R08fHZ2ZlmWeXjb7Inf7u/vC55T3quYAy8pBnzcm6ObjYhC5PlTVEGPlubwCbO1DMWoenh4cC8ubT88PCzUHsahRltVHwGySREwK5bcZIxZ6xp1IpUL3k05CdTw0Ehr5NGUo0Dn9yX6qjqoir75LEVR8EdG5V5V3iostOOalxMtFwiL9+DgwJrNppfVqVardnFxYZ1Ox0OLbGYiL6/ZbDoz4kYnj4qyI9QGI3+U+zTnZDqdWqVSse+++86BLoCB72FUNpkgENVrgdXNewAkAAYWIczOu2KqgyrhVDgzej1hSlXwEeRGCz5l4asg2gTSxRF5L4J5VfhQBJ3xueqlizlkOgZsjjo8PLSzszM7PT2109NTDz8ypgA0wCoRgWq1aufn5152and3105OTtyjhOBjlyneVJ7Fb/qNVwmQoHX31PomfIrHC35m1/5yubRGo1EA4YAJ1iSJ/3hSO52Ol8iq1Wp2dnZmtVrNPnz4UMhnZJwVgEXwGUl5MxqxOi86Z1FebRqpVygF1s3SBmH8LGWgRW8K1yFHu92u/60bSDFIDg8P3Xtlto7swNMYOLu7u74TGc86XkwFpirztTwQRruZeYqWyjp4E8eDplEhe4lKwcNaukc9bWbm15mZpwMsl0vfUEKeH2CBFBiOq6QNlBpSRc54qzyNMjMaIzqHek2UW2U6+a9JavCbpXlNKYaAX1rj6rHe2dnxcndsQmKOzczz8SmNh1wjp5pd7ujqyWTiEUxko9bBVQMOfgcQ81uNC42sRicPumM4HPr6IJKKoUUda9KgWCdsTgQc83zGDeMPvgRnEJ1QDy1AW4/HBrw2m03r9XruXdb0qyiPdd6j0QvFa8tSVlL0ag5qBJApLwa/taGxMXpNZFQFDlFgpgAWg8tmk0ajYaenp558zBGj1WrVc1UQsBRYbjQaludPdfMWi4Xv2ut0OnZ8fGw7O081Utk4wrtZBDA9JUbUxT+bzez29tYtcnZ4ppQiu6CzLPM8VcaRPvD8X3/91fMS2WGo45PyjOgYphhHF1YZIIhMpxZQnMtN8UbpWET+gtTTFL1PUbim7uXzlABmHbDpSc88xgJHYCAksOzJoyOXmZw7M/PQFpbt/v6+K37AKaF/Benwk1aMwCtFbVQ2SuFdpV9Y0KQXdDodF7Kku2RZ5u3nJCoMKUqzUAnj5OTEQ66Xl5fW6/U8/4tx1DlJWeVxfrguUrxelWhcK5vi/Y8pVGWg5aXvyrwYZumoGGF65Gmz2bRWq+WGynw+LxxxilxbLBZ2cnLiBg/z/PDwYP1+340z8lEJ7Ws4P25KgS+Rf0SV2OCE7J1MJl7uKio71hg/o9HIbm5u/Dm0UTeimJnzKrmrX7588SM0zayg5OEl8sqpRUzIOZV6pU4DnvmS0Q+phy7O96bwbcrB8RI4jTgi5VRQw1OjpaQ/YSxohQe8+sjeq6srT9vb3d21z58/+6bRLMtcXuK5vL+/d8PfbH1QA88zM895Ru+bPffGEwnT9Cr2s3AMteaIkq6C8wnePTo68pPZ8IaqNxrQyxoEcBNxQzYrOGX9gUn0cA7WbL1ef6YD41yWzXWcxyhz9PqX6KsK9evLIuCJ1zNR0WJPAR0oBXTiZ9pBwGm327WTkxMvnE+CvlmxxARCjVDTwcGBffr0qWARHRwc2I8//lgoxUNu08XFhefgnZ2dFUqkEErQ9qL0qYfW7XadSbU0ieaJAKBhcEK4gJD9/X07OTlxbwFeB62/lgpX6+84V3EjSWTA16ybTbHcy0gXggI1/c7sOa9FoJ7i+Qho+Zt38X+tVrPT01N78+aNHR8fe5FzBKuZuTcJC1nz//D6Ay65fjgcOu/e3997aSrajEKDl5UvCPsjwLR0DxvytL4qngWS+vUZZusUBg0f9Xo9z/nmGcwB3o8sewpxtdttN+rM1ulAKYMYZaTCsIzfIxBIAVyVb5ui6M2e55FHI9TMXuXR1P2MR/yc1CQtK0U4vFar2cePHx10LZdLD32zw5/8UrP1RoizszP3pD4+Plqv13M5nOe55/hpTisGnPIuwFZz6QjLcgY7m1nzfJ2eQ+4en3Hy02KxsE6n456qKO/US8tx11zb6/Ws0Wg4UFmtnvJgWY+NRsOurq4K4J9nRz6MHtSUk0FD4XH+uWeTKDoDlFeZvxSAgXSczJ47PDAGarWanZycOKAj7H19fe0pF3g30cXX19d+/dnZWaFEH/qeuVePPjIMecb7IOQGehu5bWYF7z3/w9fgATNz/mG9AE4pS3V/f295ntvd3Z3t7e35xm/eCZbBMcDaIF0Qec04qYefVBbkPfteqHCgeoW51HkuixC8hB9SDqMUffUmKVW80SpSZoyLXYFBFKIREKmVWAaqEBp4briPunm9Xs+F7Wg0sul0am/fvvVE6Eql4jVMLy4uvIiz7ljVfIxut5sUCBoWwqVPnithXUDFarUu4zMYDNx9D4OxUMzWVpieJ310dORC7/j42HP8sPZ0h2EcX7U8X/LMvJQLpHOs/78U2v7WpN7RyJtmRc9nKicmNQYpIK99j6kr9Xq9sOjZeAcoRAlrWKbb7boSxXJvt9vW6/U832h3d9fev39vg8HAZrOZeyTzPHeFj9cpy7KCQUP74Xczc4sfYUSKSZ7nnmtKPyjnA9+ZmRtdGFcchsF6QwHwfK1UoTlVrFk1cKOMUKNKKeWRietAgUHKa7Up3n+zYnRDK2oosFZZmTJOo1EWST1EnIaEUqdeaKfT8SL3yOf5fG7D4dBTSjTXFIBJKonmBmIoYQBpWJZ5JYSpnh+zp3xS3s+pO4TnAT/II3ZOm60L+69WK7u4uPC0E9KwKEWl/MkaQG62Wi2ParDONGxarVbt7OzMIxt3d3c2nU7t7u4uGerVsY+6LvKlznGZfNWw+rekMmdWSpeYPU8BiLI6gljmhTQ+DBozc888aX6k7mFYvH371m5ubuzLly92enpq4/HYI5yU90N+wUfwFs/A4CEF0Ow50MJpxFoEkGp1CVL/kMvw+8HBgZ2cnPh7SXuiFCFg9eHhoXCSGhGBSqXie2TAQGZWAKqMN1Fh1i98t7+/73nUjDUbv1MYQ0nlT5THcf4Ve7xELwLUuDh0ElILi7wHbWQMabxkOcVFxoQq01Ln6+zszDqdjuf1dTodazQavmN0f3/f3r17Zzc3N4VTJZh8jiTF0iIfRGvwIaToD1YPQABmxmWO0NPwDp4FcgvxUGn1AK5DmJIPAzNNJhP7/vvvrdfr2Xw+L5Tnoa1qzaiQi/MXPTOpOU8J09S1eCxQpi8J0W9Bsd0pXubvyGd6nSp9HVsFPNEI29vbs5OTEzs5ObHj42MHi3hfWCuah4Tx9fj4aJ8/f/ZyUpomgiGE9Y2S1HwqLGUOmkBAkueKdwyvJqDw4ODAC7FrCJeKE+opJc1kPp97+FYVPWNwf3//LHUlyzKv/Xt0dOQh5Z2dHfd+KG+p9yVSWUrGS6As5UH8Gmv+r0Xwno6B2fPSbrG9KO0o/FN8y/dZljk/dDod++GHH/zI5+FwaH/4wx8c3KFQ4TcUaJ7nztN3d3cFbxPKmhJobPxAUWNcq4dQjSg8QchovLq0g9xnjBztbwxrcg9tQGHP5/PCZkNAhYLrWq3mJ19hTGLkERLFM/vdd995nVjtTzS2tL8x/SsCvGiwKemcf2tSR0dcj6l1VuaQioYojh/AGp5U5gpeAPyNx+NCtGo+n1un03FP4dnZma1WKz9UAgMeDyntjWXN8KbiaYWPY/gcXa4Gfp7nvuFJPby8l/Qu3SCt+ILc/ul0apeXl5ZlmXW7XccWWtWl2+26rNYI1mw2cyCv3lStp6p7errdbiHlITVXmqqin6XkVMqB+RK9epJUaoGkvA8KLlWxpwBPBFDR8xE9BGbro+ZqtZqH9t++fWuNRsPDNdVq1f75n//ZGo2G/eY3v/FnwTAwxtHRkb1588bfgSA1s0J+HhMMIAVoorxRzFzDBqssyxx4MDYIL7Mnj9Xd3Z0zpNk62Zo8Fyyo/f19Oz4+tix72m04HA7t8+fPnov6yy+/WK/Xcy+VjmFkpuhNSVk5qXCpCokoZOO8bYqgjPwVhb960vQzvSY+r0zAmhUPjGAzHaXGsNTfv39fyCG6vLz0dBWzJwPoT3/6k41GIz+9ifJneGeyLLNer2e3t7fu0YEvEdZ4aGk3nnw2e9RqNQeWhHEODg5sPB67UsdDNR6Prd/v29HR0TOP6f7+vl1dXTnwJRpAXzDWqDvIegDAEJplrXz8+NFLwajBGo2ElBdRlWGcp/h3lEkpefYtSXlTw2sRUNNu9aC95Lkye77BCsMCpYRiAnSen5/7c3X3PXUl8Twx32bmNaNpP55EgCiyiv/5THchZ1lWqP1LFEq9t3hjK5WK14acTqeeg0h6FM/AE2ZmrqgXi4VvcKL+rzoKALEYe6TBADi4rlKpeA74w8ODAwY2aqnjIDWP+hneYOX5Mh6gb5tA2iZ1TqU8vBry5l4F7NHI0mgSnj3kCQaxglgzKwBJQB41qPP8KVfz9vbWBoOBgzQ9Mpf2A1CRbwrqMMgh3SitqVyE9cnpJk2AtCmALhsKAZTIdJ43m818AxN11EnRYt3wTI2gafu0vWZFg5ccbaLR9/f3Nh6PvY1R/6VwQpTRkU/LDOwUvbpJKrUYYgNVwKtAjYBIKWURKlPCHBBKk7qNTDxepkrlaSf08fGxu8nJcWInHwxBTh+ues3HU6AH41KjEsbXkjvkl9De6XTqniHyVgHG9/f3hXwVmAqPBIKPdrD7GfC6t7c+3u36+tqur68L3lkEmc5ZnD8VFDreKdClwjMVQopKXZnxW5P2WcNsKQWRsubKAD6/U2kBADKUWLPZtG636/lO7IYmPMgRumbmQI7wY7vd9jB6rVbzsPh4PLbhcFiYFwQTIaDxeOyGzv7+voezWq2WDYdDGw6HZvbEgz/99JNXptAavvSHkm3UDNQNAyihfr/vwGU6nXp+FKDCbB19aLfbvjEAIY8XAHD88eNHu7u7K4AUHeNUipHOm861yieVUUqa87oJVCZfdT2nhH/KKItjFBX/zs7TISLHx8fuGSTsjedFjQE9IlfnGGVM5RS8LtRoRB4TYud5ClCj/EitVZ5JOyiFtVw+Ff1vNpu+NtiQkue5H65Cas3Dw4MD00qlYl++fPF9CMhmgAVeK8YLsGO2BkfqFePQjLu7O7u9vX2WUx3516wYAdO5Zf5VH740z9+Sorw0e+6MguinrsUUMFV+p/QRRjtGDwDOzDziBACkTcwbnvRK5SnVbzweW5Y9pXEgl9TjqMeLKjjlWWwANLNCvjOb7BQc0m/4gXKDYA4iojjDDg8P3du5s/NUP3U0Gvkmbp6tm1QByDgkAO04IcAzjJfWkIWvWa/gEdKwiNYyPxo5BEelwGtK73L/a3z76iapyGj6klR+TEpp6OQoE5eBUxQf12NR1Ot1++677+zNmzf25s0br4OnJzL85je/8dxT3XGJFY/Hkl2XXIMyBDwwyVreh/CjFuvH2sabEJWdAsb7+3u3/HTX4HQ6tTzPfaMLjK0AS4tU60aGxWLhHgtyEtWbF4VeVG46jymrPKZplDFUBMLfmqIy4DNI+5FS5jpWen8qxK+EFdrtdu3i4sJOTk7s/Pzcw+4YWmp9M2f1et2tXgpLPzw82NXVla1WT8fgkYeHV5RznCeTiQ2HQ88d1M0neAaopcvxjwg1BKrWgqQvWZZ54j2f6Q5XNoGxOdFs7ZlbrVYeuUB4qyLGkj87O3OP1P39vQ0GA8+1Sin3OOZlSjCl8FI8onO7aZRS1PH7yIcR1KWexfgfHh5aq9Xy0mfImgjSUKA8U3OozcxDoXmeu6cH44oKDYT01UNeBrK1/cwv7VAFN5/PC6UBKW+mIINygQoMj4+PvbpLlmVel/fTp0/WbrfdyQHvHh0decpLlImasoAhSNvu7+/t119/9SiC2fMqKPxomlGKosLXsdoEuRuNi+gM0fbHtax6imvi+kXvcjS0VnaAN3k2QK/b7ZrZ+jhnTffAKaSnMLHnQ9OoAHZmVpBlGN9EyQB6GCvz+dxTo3AgYCwBmlerlXv1R6ORPT4+2s3NTQGY4tU3M++3bqgCI/E/uIQ62w8PD3Z7e+v9BHhTlUP1P/1ERxwfH9twOLQvX764kalzFOUzc6XzzjvUyGKuUo4vpVc9qPp3VM6xYZFSoEdd+/pMVWoqYCuVilvJp6endnx8bJ1Ox3ecsrDZqUeIkFqLMDCnIqhlMhqNXOjAeAcHB9ZqtdxaOzg4cCsYhcsz1RvKWGjRXQAFTIF1z/2EjxDaKrgZAyw63oWVt7e359b97373O7u9vbX/+I//KFiTuuA1rMm4pzacRIMjKm1V5PpsvtsEQWlWDj4igImKX70WERTFkJsaUWbrU5vI5QRwshmEDVIIMQV5CFgMIVX2Zma9Xs+99bwfb5IKaELmWPUYXRBrxqwotDnNTMM/8B+8ykZE3U3NaWvz+dyVeK/X85q/rA34TfMJsdYxzvb39+38/Nz29va8jJpGNqCUkatzqAI3XhPnVp/7mrD8FvQS8I79jPeUGWTq+cQjBa+iZJkbfqOYePdsNnMvPcBLT57CSxrDhJoiFcO7PB8e4zuVL/wGFMLrHNBitq4wQfrW3t6ep1dpSTetRY2D4ebmxvr9vpmZtdttBw+AVbzArGWV061WyzqdjqfXDAYDX486T3EudU6j/IzymGsjyN0UuZuSm2ZFcB0dJKrro04xW6dnNJtNPyKaE/d0XOFTUk7YU8L7kUG7u7vuwILntcIAAFVlFU4lBYJm5rqZNvIewB7AFTmoB1Oopx9QWa1WHaDiYeXZ9AMMoGkgvG82m9n9/b3X2ebe0Whkt7e3hf0QvFdB9e7urqdRsEHyj3/8o3+vcxnnk3akZHXEgl/Lr6/moEZGikwVmY/fEWiWKfT4XewcYXzKSbXbbXvz5o3n+DHA5FyoN2o8Htv9/b3N53M/U/bx8dGLLRNegkmxchC+GkLicxQwzIGlxk5Wjh+j5iMLjx/aoNYPCv7Tp0+FjVNYQwhas7WAzbLM3r9/b2/fvvW6k9Pp1DdgvQYW4zwqgI3KTC175YE4lyiVTaAYCk4peiguOPhCP4ugnf8V0MKr3W7X80ex+DV0CiAEvBIqyrLMa5qyy3I4HNrj46P1+/2CMgQUABJIIcE6h1cBFhRi5kQ0PJuceFOpVDy0D+n4YWQpOMXapn8I3uXyqUg63i7CUbSfo1jhY835IqcVATsYDDzCEHmwbA75n3mJkQTtV5lxvQlUpuRTOWSpe6GUJwtjiJQhDCpCeaxlDHkOEkHBAbzIRdZUE03fuL29dZ6mHbqjX9cU8ofnAZbhOww0rWoQI14ocnI/ySvluYRAkcu6YxvPkuZxN5vNguzg5CicGbS5Wn06hvjXX3+1Xq9nnz59ciOL0L+mNKTmlr6lQKqCA/1Jgd5vSdEZRT8xPMyeh/dV/+j98ABGBDynR3wig+ARyvRhQOBFxKuOpx2DDEOetiKP2CuA8QJxndZ/Zt8J7YaIdiq4BRcgj3Ao4dzCUAIjPDw8eOUfQCt91vbwPNIIiaYSRWDtkeOK15fygTjIsiyz29tbm06ndnNzY3/4wx+84oCu82gwpXg2ZRTr93z+Er3qQdWBL3PZ6/+REaMy57l8r4zMYtYQNadCtNtt3/GLcmdg2bmHy77RaNhisXBg+fPPP9v79+/dVY6HldxUQCs7qCnLYLbOT2OxsFmF2qbkZ9A/PA6451UAaUhM0w8oyA7wwAriOEwWH2kMZuZ5NpERZrOZ/fzzz96WGFaKCr5MOGiblbmi5Q6VeXO+FaUWDJ9HkKLXRxCasg5TYNXMXGl1Oh07Pz+3brdrb9688TSU0Wjku32vr69tPB77yUzwPcKMnfOr1cpDmCholCxKlFwqLYD+8PDgnipyQ83MUwo6nY6H/bWEFEIUcEt4CuMHYQxPIHgxDvGoUQQb5aQg4vDw0PNZVXlzfGa9Xrf/8T/+h41GI/vjH/9onz59KniBUeZqBEcjQr1y0VCJHiwFv5tGKQ+E8l4KbOtnEeiZrcePWpKU4TOzQjUSvKwYEESABoOBHzxC2BJ5NJvN7ObmxrIss8Fg4JUeNLcuVmgAKBJyZI3geaKEkJbD4RmAZP7HEENOA240csEYxZ3z5ONyUhvRAdqX5+vqLchGNs+o/Dcze//+vW9IXC6X9uHDBxsOh4XKBy/NG9fod2Vg9GtCpX8Ngq+UdI7N1hUHUuA6OsK4fmdnx7rdrh0fH/vmYE0RUqDKu46OjrzcF3w1Go0KgBMnEfIU3sfrSIQSvsMbj+cRsBiBGM+mP1qpB57GGEHnswlvb+/p9KjBYOD50zxfjy3F+FI5yDhpVQoIgEmUq9/ve/k0eLxWq7nOuLu7s/F4bO/evbPT01O7vLy0f/7nf/Z9L6rvlTd13jXyEY0s6P8nIX79HZlJ/4/AUxsbrf7UIlRLQK3fZrNpx8fH9vvf/97Oz88LuRUwGBtOsiwrhHjMzK0vFCEWQbPZ9E1GZua1ymiDWv2Eu2gzdcGoIKA795ho9TZBKHPGCQYlPLVcLu309NS9s1qjVZOcsQTzPC8kWqvVhzWV8sLE+aGP0TsaAZqCAqUUk35Liu3GAIp91WtfMqL0MyUVlJzEc35+bp1Ox05PT21n5+kUG9I9AHosWJRvpfJUkxGvDTuWNUVEC/M3Gg3fjEFZFYw1eB+e4G8UNhsH9/b27Obmxk5PTwvlUzDkKP0EMNVSPowhY7NcLq3X6/laJ+SpQJGT3hQYsF7NngRwq9Wy//k//6ctFgv79OmTA56Y/qL8GZWa8kBU+nptNEI2hX+1T6l1Gnk0tR7NXuZdvO6U+OLaq6sra7VahU1wpHfwLIxwcuaQcfBjt9u1y8tLr/tMeoqW6TNbp8TgiCBHTjfWoTjJ4dTwv25CIceU0k6kGxBNQhbjGKA9AA4Mt+FwaIPBwJ0h0XhnUwzRMo2OobNOTk58Djudjv/9888/u3da5WxKFunaivOtvFAmj78FRV5V46/MYWD2PEUsjgFefj0NihrKeZ57Hr7ZOt0OXZxlmecpA75ubm7cCMfpAy8qz7PZFUMI+ai1WGkrKQXwGlGkSqVSKBmpoXrmHcMIA+vw8NAjvjje1GtPxGoymRSiWWAUvMeMD3hFN7iamafeTCYTa7fb7szI86cDiX73u9/Z7u6ujcdj+5d/+Re7urpyg7NsfssMae0z9DU8++omKV0k6t2MDKVMF6+JjVHmK1MuWMGUPjk7O/Nj+HRHKP8jgGAmyodMJhPfEaxhJMAhwgnXO2HX0WjkbUPhIwS5D7e7tlktKhZLlmUubAGMtHkymTiQrNVqnisFs6xWK7u6urJ6ve7eBN4Fc5Prt1gs7OzszGutXV1duUWY8n6XgUwATbSQaH80WPTnNZf9X4uUd6Oyj4snZbmrkEx9r1Yhnhlq8757987rRmJta41PjtpVBazWOgZYtfp0fjkFlymjs1o9bfoj7xRvp25EAQiQ18T8ayoKAnM8HtvJyYnv2CTXFM8BedCAU9Yu4Jd1hnAfDod+NF+9Xveae+rpYnMf3o0sy/w0OMAClQ2Ojo58w4FSCqgpYNb/49p7yav6rakMtOg61tBemeDXe/QaeO7o6MjOzs7cKEGesllK60GyaYMqELxDlTrzfHl5af1+3zflsbMdAx8jho1MOzs7DkDIiyX9RIm0FDMrhPMBAXzHmeR49VkDemIOf6vOqFQq1m63XQdgVAJGeH6r1XKQStkgZIB68piDi4sLm81m9vnzZ5cJym9lDiAoZaTEd2wC/6baHjFCBN4ppxfX8bvRaLhu1vQ39F+er1MzuAcnDuAMXu73+1av163Vatn9/b2DSoynLMvc04jRjged98ewOO/UPFaMFSqo0G6I+5D9VP4BbOJkAqDC65eXlzYcDu3du3deakq9tkRaOVmN75B5yAMMNdah1mvlh7mgvNfbt2/t559/9mhvyvBI8a3Oc8SFr9FXnSTFy9SDUWa1cU2Z0jd7fjJV9BiA9NlF9tvf/tbOzs78+ZTCIYSPJbNarfzMZRQ5dfMODg7s7du3Dvgo5YO3SBOSdVc8FrZuPmJhsClLS45QjxJhRQgfIYiSJz9pf3/fk5JTHj5CTQoaeG/0xtZqNXv37p2NRiMv5o9iUI8yiykCTJ3rON/qhaAvKUNlE0JNZuUbt5TPYp81FBq/51nRGDMzD8nDY5RZigYO4K3T6RTGj7C22foksSzLPPeS/OrBYGC3t7fuvcczQL1F8gT1fdxPewBoGDr9ft9Wq5W//+HhwS1/vLd4HnRDFfmmePUBNWbmOX+0W4V8BEsIcl0bhOh47/X1tf3rv/6rjUajZ3KjzEqP6yimqCipMbJJFIGHpttEORz5Nt4b5ev+/r6dnp56GFtzJXUs+Hu1Wnke9Lt377xUWbVa9Y1EeDD7/b7t7OzYjz/+6OXwVAbiFVOvKVVJ4CfAAQYY6wgAyG8Fvep5HY/HnsdNzh2eYPqljoa44WY4HBbWIuOLA4KjVvHiRv4CzJ6dnXmE7G/+5m8sz3OvyqEyWEFqmX7UOUw5GL41peR/5MOU0RX7r/+jl6lf2mg0/HAGdQ6Rr6m77dWhhMf03bt3Ba87aReALjZ34iwAKPKjfJLSLavVer8IMo9IGOBWa6Oqk+Pg4MA9pYBy+AD5qEeSstlJo3hZlvleBDaSq5cfXqXGOkCWMWOuqNlKv9++fWtXV1dupGqULjp7dH6jIyfO8Wsy91UPaiqEkFoYMeSm99BABTiRSWOImETnH3/80d6+fWunp6ceRsTCwToHPLLbvtls2vv3721nZ8fu7u7s6urKjo+P3VoiTwOLSXfhky9Fm7Mss9FoZHmeF9z6eoJInFiYRL1OmnOKB0Pd/ygHzcMlNxavKgKNMVMG5f/f/va3Ds45poxcnAg6dT5TgiLOd8qbqIxWZrRsAkV+TLU18rter+BAFRkKn6L8eKTM1iCZcOnh4aHd3t66N4YTnPCOk0t6eXnp/KO7UT9+/GhZlrkXC887QJU0E97N+xWsKp+SE8V3rCm8sXiUzMxLWdEfipIjyM3MFQH83Gw2CwAU70TMk9X1o/UHLy4uzOzJa/Dhw4fC5hoFuKmNJWbFUjfKwykDWkHttybls5e8Ua+tSwWZyttsbDo9PbW9vT0v7fXdd98V+FrlBfIEb6huAGE8b25u7MOHD3Z+fu4lAG9vb91Ihhcoe4aMB6Ai1zGKACDwKBEHZCW8oAcI4K3SOqnsE6CiBetG5xvvLc9vNpu+BhSAmq3XkzovNHSr/E7If7Va+UErpKzoOKfmOM5z/K3fbwLvqscuBaTjGtNrI59zLTIKz7Z67EmXODo6KnguzZ7GhsMSWAvIWU0Tmc1m9uXLF1utVl5BhPkkdQ6ZxLrUDdb6uXpTlSqViq8BokEqv7TtpHbx2eHhYcEhQXidtmk1CTMrAFWtDQvW4LlHR0ceISBFEV0Gr7IOd3Z2/ETE09PTQglAHe+4prR/0YGVujZFr3pQI6BMgRj1VuhCToGcCGyi0K1UnnY0N5tNu7i4sIuLCzs9PXVAR2jFzAohTU4SIc8NxYWw0V3DhAtxdfMZXijqn5qtNw0AEvhNGkC/3/ewaEyGpg28g3Ny8bSyc1v7z716VBrWPtYTydIIZN1lSF3Kn3/+2Q4PD63dbrtHQ8P9uohSFmxUVGbP8510keF93BQPqlmxLylQrr/NikZWtOTV8NJnmz0pt3a7be/evXMLXEEChgf3k36i+Z6carKzs2Nv3ryxfr9vvV7PheD19bWHvJVP4QvWAaQGg3praXul8pTzqoajFvbPssx3bKOk8TQBROFV3QnN8+7v733cATaEjVgbgA48v3guaD9t6nQ6dnFx4YIxglGVUdq/OIc6j0oKxDaFYsg+ghA1OrS/KaWv1+DVabfb1u12bX9/30PWHOkJz2nlBjybCgbN1gpxOBzahw8f7OLiwjqdTsGjCPBD2bPxleiQgkhACG01W1eRwHGgedHwoXqtlFhvOBI0HQrPE/oEA4oxY/ONhjO5Bz7XqJrOB7KEE9OyLLPLy0v3BnK4RZlTQNsf/4/pWql+fwuKuozPtM3aR3XY6PfKy4A0dt1zfZY9ldS7vr52+WK2LqvHQQzUcAb86Ya2+Xxunz9/tvl8bj/++KM7hXhPs9n0aBLyivlG1sInZmv9gSHFZ+htUvrYxc/a0es5FAIeBQzjwGCzIXyKQaa8RK4uXk7NxyWFgCgV3l1ST5DB8DcG4OHhof3mN7+xf/iHf7DZbOaOFpwNOv+6xyAlVyOmeIleBKjqzdOHlSFlHaS4cKLSeImZAV8AS4jJYODYqcYk4GmMgvzg4MBGo5EDXDwueHOwHtg9T1kKBNdisfDEat5ZqVTs+PjYLi4uCqUkGCPc82bmR5oi+B8fHwvlMXiHgh924pELo1YazyGXEOHMIkDos5mm2+0WNpqgeBTEp6xyxi8CgJTS3zQFH5V1VObR419mTEXPY+RzctROT0/t4uLCE/YxLhBu5Hrmee41+ChoTqgQwTIYDFxo7O/v+5G68BEboxBWeHFimFFD6vQnBVq4ZjqdOkggL2oymdjp6al1Op0CKNHi1TqGKHyKZANoaYu2EeELUNHya7Sz0+nYTz/9ZJ8/f7Z+v++e4gi4dX50DuOclaUIbBJFAz72LWUklc1rVBZmTx6Vd+/eudwC6MGvKB3GGblnZoVcaqqeDIdD+/nnnz3lajQauTd+d3fX6ykii/GeqnwjFUSNe/UK6e55Vex4eGg3KSrcD4hkYw1gG4CMF58KE3ipACnqdYZ0zKfTaaHcoc6h7to+PT218/Nz+/jxo7Xbbc9N5PkqYwEH0fgwe17POnqmviVFXaHtUwwRjcSytYox22g0vLKDRheJ4CAz4AsM/vPzc3cwcYQpBtd8Prfr62tbLBb2ww8/2OHhoR/1DL/DMxDGCv1A7qbKrMGPUVayPtjEp5iKPnCtykrNgTYzb/t4PPb6u+oZVsyAkajVhngPIJPNVRhfvFNlZLfbtW6362kWeHrVcaUyNqbMvYT9yuirTpKKQDUFaGA0vtO8iMiE/B+VPp0i/PTDDz944j0MwrnfKH4GGIFDuSgA3HA4tE+fPtlwOPQwUqfTscfHRxsMBl4uinNwEVCAhMlkYqPRyJmNupHUt4SBsT7wmNF3tabVA0VYM8vWJ/MgsBhnDS2o91fPZddxB/Tw/n/7t3/zI9yw2OMcKIPxmTISwiVawPHdERx9a4rgIyoCFeq6kOP18RkQz9jd3bVWq2VnZ2cedsGjzppRYabWLd5QNgMCvuAFds6TYsLmI8Ltej6yer1QqBhTzBHGHO1WAMJmLHjYzLweKeE0hBbvUKXJ88gNY7cr4EIVvG5MQHiTX6ieE7yu3W7X3r175x5jcsR1Hvhb5y5leCg/q/fmawXmX4uiIlePeAQjul5V4fGdrlkMfwAYucNxU5R6gzRnbzQaucfz+vq64P3mPo6fBuh2u91C2hH5p4BjUg7gPe0384WBZmbOFxh3CjwIZfK5mbmzAQBAu1h35AkS2Vitnsq3sSdBd3oztpr/zzjzPWOJDqC9f/d3f2eDwcB+/fXXQmpNNEbKUlb0uk0kBc78H4FLjAzwW9errkVC7fAcutrMHCtEQ7lSWYfzHx8ffYMVtFo91aSmYgVRA+Ysz/PCfJuZGzHaRpxSkK5V1h+yEg8mdXsVN2iuf5Zlnputxvpq9ZQiRvQMvuQeKmpUq1VrtVp+ghb8pDKYZyqfqdeaeQNws/cgyzJrNpv2d3/3d/aHP/zBhsPhs/6ndG6kCGhfoq8CqGULI4KXVKM0L4WB0PwbsyJDc4zZ7373O2u3214CBQu62WzaeDy2fr/vFjuCdbVaOUggZ6Pf79vnz58dRGgSte6K556HhwffKYf1pSEdFgi7Xs3W+Uvkk2Id4TbnflW6Gl7iHWbmp7pkWeagAGUKU7HAVquVHR8fFwAPu6a/++47u7q6coUOANLQRGoOmAc1NJSRInNFRtsUJR+tNkj5U/ukwhXDJ3ocld/5m/D+8fFxwXBg/LD6GXfm9vHx0T5//mzff/+9gztCU4PBwIEA/Hp/f1840lTTVHgfwBNFrDnQqiD4To+rJHoAkCAsRi4g92painpqdf0QUoXvzdYRAt15TbI9pWMYMxXMgPrvvvvOj6zU/EOdU1VQOr/aVp3TuGlrkxS/KoxUeFR5WvseHQbcy3gSYsdIhvcwwOM7GeeDgwPPz9/d3bU///nPnoesURg94U/TAfCU6mZPDplQL5MaU2rUAFK1z+qVVwCBA4HrULSkTlUqFT+tTcFknj/l9NVqNT8gAmMLXtT9CmbFI15xsESQr23qdrvu8bq8vPQ5SkVqymSp8rN6rDaFUjol1TeVrfxWuc2YwRvITQX3yC912MBPyCOzp/Jg8OpyufRavdGhAk+p/KbiQ4xI8Kz4HOQn/MIz1WsJUEU/0z6Mfa1cUq/X/cTAZrPpG6wBukQEut2uryEO/UHea7RVnSvk2+JlVhnKvciAWq1mf//3f+8pDanoDPMbnV4prBDvS9GrOagpha4KV5WkWvGqxKJHI1pKqvzxcGpeCbmWCCnOyeX4Ui0HQacBrsvl0o6Pj+2nn34quPu5fzab2WAw8GL/FKiNoX8EuNk6T8TMPC8VYM2iwTuA5RJBKZ6yePSe2fMablqigja8efOmkHNIuQoWJaW5AKdv3771ckC8W+dIlYx6YpSJUiDNbA0+ogX1rYk+KG+qQog8mbLsYp90nAg/dbtd90ICuNToIszI+8jRpFi+WuQfPnzwk85Icfn48aP1+30HCCgkBR+EYwjlsPEEEKpKk8R4xijPcwfHXG+2TjXB2EOgUoRfPcTci8LGajezwtno6sUiEZ98VcaAcSYUdnh46OFRIiVKKVCWAqQRNCi430QCFCkpcEkBb/gyemB13vHuU3if1BNyLhWcmlnhXG82Vrx588Z2dnZ8h77Kb0CveriQ0Shp0gn0RB5dhwoCAKCQGmNZlrkeIM8UMLGzs+OyG+cBeXxE2Rg/nBVaNopn3N3deRksxpoxok5v9FqpjsM7tre3Z61Wy96+fWu9Xs96vV7BkxUBaZmi17/jPG8CMa7RsaXyNxpUcf6RZzif8HRSeol5I+JJuBk9ymY4vPNm5tGiwWDgxi4np3U6HZ97DdFnWeZyDwNPI1dgCpwTKi8B1GrcYKzAb6QsUvJKDUKwguofDHnSUeDVarXqB6DQZk7VhNdJfSGKqzpK0wP4HLBLu3QvAU45PeVP5z/qzegEiLxRRl91kpSGNlIANC4OvV69cdq42AGU6+npqZ8YgSJEmeokU+OO3c54BWCG2WxmV1dXdnBwYKenp35CFDkgTOrj46Pnc+ClQjFzj4JpGJr8DwBzvV73xH1CXqPRyHZ2djynFsbScBjKRL1lZubeWZS0etm0SL+OKYzDEW54Fer1ur1//97u7u7s48eP7tWLyqDst75LLXf9XoHrJpCGbqMwV1IeVOBZtnD0edSPOzg48FN5CDvCswgEXSsA2eiNzPPcwzjtdtvu7+/ty5cvdn19bQ8PDzYejwsbA1W4AwzJ54PvWH8IbzxGeuwjgph7CHmaWWEXKd7dxWLhm8HgUwQkgncwGNiXL1/s5OTEr6dYNiBFU2LwBhwfH5uZeUiWyhnNZtMVR6vV8vqW9N9svXEhesN1nuN3Kss2hV7ivegwUBmtm3kiH8cxAujf3d3Z7u7T+dvIOvUi4clEuZ+cnHiZP7w7pF1dX1+7HMK7pHUVdc71f4xu7sGgQ3GqjlHQp7wNuNRNMApi8Ore3d05oGWDXrfb9TFAztKPwWDgax2dhNJG3tdqNY8+UN6P+wEKOFUwPPV4TJWvKp9SnnOVFdGQ/taU8oxp29VhEHWXkuY+4ni6uroq1IMmH1/lKMa1elsxijBkVqunMpPoYjZFXV9fe1lJcudVVgDgFDzTFnKbkWHcp5uylIc1/I83ktzparXq6wlsoTIf4w4co3VW0Xk4NvDCah1Y1grRE9a4RmppoxqGAPC7uzv7/PmzRxqokhC9pDqv2nc1UlJzH+mrQvxRicddjZqzEO+P3rcy7xTWd6PRsHa7bScnJz4ReF00B4pngOrpMAnQTPbf//3f23Q6tc+fP7sABrQ9Pj7a3d2dTwRMSy4ggBBPLJ4mM3NgjCCC2bGiYTB2aVOLNR6VhrCLwgrrhfA95/HCPDF0ixCEAX/55Re7u7vzXCzKICmzKMgp83Qzd1FIxnlk/jdF0Zf1BYrgWz9PLSAdJ/0fhfP999+7N4jd5rojnWdiER8fH7vwQBhSdeHg4KBQx9HM3CjSkBL9w1sPf/HDzmgUJb8x6DBkKIrPD2CQdcE1R0dHhVQEhCW8CiDe2dnxE4qOj48LIWTSUEh1wIvBGmEHKxtPAL0HBwf2ww8/2MePH73WcdwsFY2uaMTpvEfaBAUPRZ5TwJXqV8pjrPJYn0dpvLdv37ohgxeHUKYa0MinPM/9tD6MaXhqf3+/kBLV6/WsWq26LFVPDOAC+afeG2QzOZ9xDwPXMufwmhrsZuZ9wsMUQ7EYOFTCGA6HrszJV2y32w7A2cS3WCzsy5cvVq/X3aMK8K3X656yonV/NQ1hb2/PywD++c9/fsabUadqvyPfpryr35pSoFP/T/VD+6n/m5nLgaOjI8/XJ6WNFI6Y1sFYA/LwzsNf8OTu7q7r7Pv7e4+4shGTI6orlYob3Rq6x0HGmrm7u7Nut1vYZZ9lmesB+o/3lOdoWP/29tYODg7s6urKRqORvX371o15MAF8ia4hIsrmb9aOngTHJvGbmxtrNBp2cXFRKI+p7cJww4Os80Tktd/v2/HxsfX7fbu7u3s2pwpWIw/8V/n01RB/BCPqBlbvqoIdbYwupFS6gOZ71Go1a7VafoaxvhurZbFY2O3trZ8gheJTRYWLvNPpOIMhHHu9nt3d3dmnT58sz3PfAEWoix3VhBHUK4u1AWF5kDO3v79v/X7fvVN5nrtlPRgM7PT0tHAMHowBAOYzmFbPGFYGwWLCUmQR4NVF6XMs7N3dnR+hFhlIvZ5lShpF9ZLBonO+aRQFYxSW6slOWXopq9DMPHR3cnJS2FzUbDZd6KH88zx3w4kjRDF4UNDqddnd3bVGo+F5w/ClghV4B2uZTYDwuu58xRtBwX02+aki12P9AAear8r6G41G1uv17ODgwE5OTvwY4bjJBt4gR4/1St8hwPByubSrqys7OztzD+7d3Z3t7e1Zs9m0k5MTD/WzAzsF5sr4lOtiKod6sTeBygR7So6q4R+NTLPnhme9XreTkxNXmsiesvxTs/VBEjEUryF88v7wJCFzAQyAAzMrhMvVGwVAZR3QBtrOOokyq1qt+hnjWjj/4eHBRqORDYdD63a7dnt7a61Wy+U8/ExpIkAxG3N1/wQeX2RtTCnDK/zlyxc/9ZDn614EACwAA8NT5109jNHbyFqOAGATDKyUIajrK/Jq2XUQQIujbu/v758dL4vc0feSR8/3AFR0JV7K3d2nk56gdrvt6QB4X3WPiKZ+ED1l8/He3p4Nh0OvS6p5+QBaZKo6BOr1ukehWq2W9ft9Ozk58YpElcp6gyo1W1kz6lE1Wx/Vfnl56RiIH/Xiwu+AUrMirmPsiW7A6/Dyb37zG9vf37c///nPrzq2mNuUgfU1PPtVHtQURYGoL0UhpxiSeyPwJVeCnDOULwPPIofpsHgZXL5HIargw0LBk8Nu5Y8fPzqDsqsfYAEja55a9KwpKNdwMha47qTWUCaeTjN7lvtBGwEjWt6FfmMdko+Dos7zdZL/999/7564drttNzc39vDwYO12u7CLsEyhp4ReFDRlXslNoLI+RU8Ti1Y9FQqwdJGpEZRlmY8zoNJsvWuTEiUIvOl06ukZGoYEIKBUDw8PvdQZ9SQ1Z1n7QGrB0dGReyVRlJTyQcFH6x/hi2HDySIAA8pEIZQqlUoh34ijgPFiYsRBrF0AKZY9pazUKOP5nJima2W1Wnlx82q1aj/99JP9y7/8S2FTVZnnSUPc0dBIKdNN4d8Uz8b1qLyQMr70Gu4DfGFQEGZWHie9o1ar2Xw+t8lkYq1Wy3PoSR1SWaD583id8MRSYxXDnvx4jQDwfuZbjXK+j2WcACg6BhiDeLZYo4CbLMs8dYvKAoSFMR4bjYaDXDzE2k829JmtN8Pw3qOjI69drPmGpB7gdTs5ObFut2u1Ws3TDXhHWX5mii/KeOZbkeqFCFbMivpDI7Hcq89AF2L0np6e2q+//mqj0ahQtD/qXzYfwQ/wCdgB+aufVSoVr0tNNAp5p+1TXax8howk3E50Nhpeyq/weJ7nhQgABpRGhBk7wK4ajuw9AZyiU0gNxCFRrVYLx0hrvWGAu+7W1/cqGK5UKvb99997qmA0lFLznJpbNbpeohcBalTIkRGj8IzAUy1efUYUnCx+SkPwXgZ2tVp5uHMymdjx8bF7YTRMqpYoOVOqmAFw5Kd+/vzZj+xDEfIMVXhMevxchTE5glmWuTeLBYTAoxyKKk0NbU0mExeIbKJRsIRwVIWPlcm7sHoY806nY/v7+/a3f/u3dn9/b/f393Z7e1uY55dAZ5y3KID4XpXgppDypRoZ6oVS40CvTz3DbL349vaezic+Pz93YIW3CK89xgYWMuFw0kbUowjY6/V6biT9/PPPtrOz48AQqxvBAkAl5IWHvNPpuDJFWCPYlHf19BxSYgj9cPAEaTEI4Hq9XthFipLhf7xyZlYQsijtarVqvV7v2a5a1gYABmCfZZn34/Pnz5bnuZ2enrqXWg1D5ov76GvMPU4ZVpviPTVLG4cxrB9BgAIbNaJ1HPQ8cxQz12mUhJOi8JjqJhNVbAA7bdt4PC6AB+qh6gZOsyc5zjqB97VkFeOgu/N5pwLAqFsoy6ZGz87Ojg2HQ/dyIcMBw+QQEukCpOAU0FQSXYOsSeUdyhai0FnbrC1kN7v5kcW0n3Q1TZ1TflC9q/p5E0i98BGERBkavW58Hn+TW99ut+3XX391nsIpwBiZrY/axWHTaDTMrBjtxcuO84BILMYMxjPEJic1wBl7ACB9xlPJswGNzNNwOHT5Cs9gBMEX6gBgLSBn2VCqa0IBJHyt61UPKMLDy25/jZTt7j7VTT85OXEgqh5j9VZ///339i//8i+FvTFKUS6lNmB9Lf9+1SapSBHIRKSsngvNldQwtoIAhCFeIBgL9/RoNLJff/3VPazcozlyZlbwWDLos9nMLi8vrd/ve43Gm5sb+/z5s/V6PRdOgET6glWDwGDzEwxotj7CEkGKYFIaj8e+C5TcGPqO5Q1DdDodb8fj46NdXl7a999/Xwhlqke31+tZs9l0RaKhDyYfgI63709/+pPPh1rtutCjQEwxUgoQ8L5NoZTA0wUSP9NFxf9mxZxflJLmfcIrurlEDZf5fG6tVsuT+lHM8JfutGejBgKDDX2ECBE4rA1CYGr9s+mD0A/KF4GPQMGDqqAaY4e8UNJG2NxHhIL+sYmEun3dbteyLCsYhYTcEIiMqxbZNlvvGgW06s5v8mn/9//+39Zut63dbtvHjx+fhbR1TlO8G72L0CZ4oKBoVJkVo04RlEVZzG9kLmtbN0ZMp9NCWRmUsob+VDZynW5g0fYpL6OI9V7WBNExUpg07AgwVoAHbwJGVK7xXgWvGEEAUjxd8JluRgXkstk2yzLP66MAv65VrlU5q2NmZr4fQXUCzhIOMCDiwfpFr+j8p5w6ys/oHb1nU6gsZSzVlxhZRaczF/osHVt9jt5PXunp6WkBaKrcQc4CFkkRUfmEnlfwlWWZn7wE6NKKJawL8AgGOFSpVLw8pm4WRd5pWhV9JxUBcA22wXOKnNUawUQ+iFTg9CPaRl9ZzwBZsyedAP5SJxx9Wy6XXlWGo6zjPOseJb5XzKfg9zXHwKshfgUyKeGu30emiddBmqPBD1ZEt9u109NTOzo6ssvLy0LyLx7Q0Whk1WrVS9uwaxr3NQOC5cHEslmKwv240jUHS0v0YDHs7Oy44GISAcN4jnQxYfksl0u7u7vzgrbkJBLWgRlxuetGFVIdOGZsOBwWjkxjh6nWD4yCDeEMaLm7u3Nvm56QoRZvZJiXPKsqIMuE0rcm5UcNs6RCafF6KAVmAWV4hVQ58o69vT03Tvb29jx0urOzY4PBwBX6zs6OtVotN054DyHB1WrloJBw6tHRkZ/qwaYNeMNs7YmJHjBtI/yFEI6eeACGJvabPeUQ3tzcFGqmzmYza7Va7pV9fHz0iIK2y2wtXBF8GolQuYASA4zXajV7//69H28YnwvFcD7zqkJSvaqb5pGKXiiVw1GhlwFrBY4QY71cLj2PX8OLzJt6XPB0q3xXYMEudeTk8fGxnZ2d+TvhAeT02dmZe57a7bavHWQXKQS0QRUc8lxr6dIu1ja7ssn9Z4x0XZmZh2aHw6HLwoeHB7u/v3d+I+RPhZbVauUHwZhZQcmiB9gTgQ4gIpZlTxUEBoOBgyNkNyCF8Y1Gcxl/pIzpb0kRK8S+xEiG8mZcj/AqXk5OP7u7u3Pdy7M1AhX5VOU7hlme53Z7e2vHx8fWarVcn6Oj8zx376fOCzgBOQ5PHB4eFo47ZQ8LmAS+QMawxsgFNVsf7kDaDRHTwWDgZS9xVuh71BlHFMLMnP8BzoDeVMlKsAubzDBe6TfvBHi22237X//rf9m///u/P+NDXbPxvhgFUqdYGb0KUJVxdMJUOKQs6qjMlVkjKMAL1O12bTwee+hdQzUIUy2Qj6CiniJgjHfym13Inz59sp9//tnrnjKZeBC0TFOn03FrG0uMmpXq9cQ6Rgji4cVqZyc9wkq9a5wGpO8AROA9oqLAcrl0rwCAFAsfkK25UcwDLn92TccQXZwb/VFG0+/jZ/BDtOo3gRR4qEBUb50aXmUh4bjo8FrPZjMHh4wJoUo8jBwoAT+x4/Tjx4/27t079z5iCU+nU+dZeFLDUGZPCf0YchxDiiCCr0iux8jDkw5oBfBqOBcixMQaISzGKWyUysJjgbGYZZmdnJxYnueFc93zPLder+f1BlkvACbGjtApuX0RkHHWO5UItAwcc5wKJ0Wlrx4V5niTwvwKllNrVXlWf0eQoMCWXP5KpWK3t7e2Wq3s7//+7z2EjfGix0u32+1CO3gPio97kaG8S1NRdH56vZ6dn5+bmRUUN89j/dBHvKt4gfT5un5V/sD/6u3EWw/gBlBwP9EsavYuFk+l0cyeZDl1p6nnq95d3e+gm7u0SgFOjNFoZI1Gw969e1fIw1ZwpqllUMoBxHxvCkVDT+ck9tHsOb7gM36IqjK+Z2dnzhf88E6tM81hEQpekcfj8djTqOCv4XBox8fHXuJOw+gY5syrmRW83jiueAfPZC3gESZSBR/Cc/C97tQHX1CfmD0CZuZ8SZ81rSo6Y4hIqAdVvcoanUA34Wxg/MA37F0wMzs5ObGrqyvfswDoL5tv5lnBrDogXqJXQ/xKZV4HZbTIcEraaAbWzFxhI+x++eUXWy6Xdnp6at9//70dHR15uRJCfuzu7ff7juqVuWkHm1N+/fVX+/Dhg41GIwd9ZuvdaoRF2TEME6AIlsulgzztoyYtUwePhVOtVr2mWp6vy6yYPeVg3d/fezkeSDeH4Vki1IbinUwmnnM4Go18TPT0Fd6Jkuf95PnqOMW5iUo+5VWM/8OAm+aFMkuH+NVrw3dxPCK/63rAg49XhNJMv/76q83nc2u323Z9fV2oRcp9ZuaKDi8sbSNHeDQauUIl9UO9+vv7+356VQSC1A5VEEbaCLncFInGG8GYqGe9Wn06No+5xZC8vLz03D1AAKAaoMvu/r29Pbu6unKg3e/37fz83E95Ozk5KawX5gHv7MnJia8x1ifKCzAbFXoEdC/RJvEspN4njE8+AxhFA1Epgh3CeHt7e9bpdOzNmzfOZ7e3t3Z2dua8jIF+fX3t44vXUjfE8Vy8NBhDeJ/UYONaol/D4dCazWahwD9AAG8l5fGQZ+oc0fFALh4eHrphSITt3bt3lue5e3HZXGq2juLh2ICvMCpXq5U1Go3C8bqNRsM6nY7njWtI1swKc4OnDO8U6yfPc/v8+bMtFgt7+/at/du//Vth7b6kuFXJl8nlb02agqAGVYrKnFhmxdQNDFdKNzHn6pXT/9Fx8V0AyEql4p7Tfr/vp5xh7OI8wqhAH1J1wcw8CkA/0dEAQgwe5AsGIO9gvVA5ZT6fW6/Xs+l06s8GI8znc4/28hyiXaSCgaeIiCFLu92ulwgEjOJcM1vneUePM2OtxiZRBcDq999/7w6/6KziXtas7k3Ra1/bs/Jqmaky4uVqofPylOdNPXrKWFghKL8sewqd/vjjj/bmzRuflOl0WrB6+L2/v++lojqdTkEx39/fW7/fdysHr5TZ2mtLiZ6LiwtfHCcnJ4XFgAA5Ojry/iNENU8LoUbe4dHRkR/Xipte70X4akkHLBnGl7ASY4vwJbzPrm8sJCxFFkaj0fDxU49xFHCqqKP3NCpxFSop784mkAr56BnThWP2fJNYmRfDbL2gUDgHBwf2xz/+sZA+AegaDofWbrcL4AICcOn/9/f37nEHTMDnCAXSPwAc5AoqwEbhY4mrgDdbG1V6GAX9osID3lX1Zub5U1k2hDsWPqEmTd4nqoEFXqvV/Ljdy8tLD+/e3NwUqg+Ymb8XD5tGGACy8Vg+ncsy72P8LPLDpnhQY5QCUpnLdZDOfQSvCv4xjL777jszW4cGkcOEMPH2UO85GqxmT57F0WhkX7588TqQgDozc35D7vE5RzvqnDLHtVrN64niIaLtemKPVkbBW8rz2K2McuWd8BNrTU9OU8C9XC79GNcsW5c2fHh4sPPzcwcmODEYX7N1VRbGjnFgfjDSiBgyRjquKfCZ+n/TKHrzGVP+1rUYQbg6SOi/5uuj00lLMjPXu5RWxMuohhJ8AD/p8c78xknQ7/cLunJnZ8fzNalnDs9hXJFCiMOCfi0WT7XWaQ88ZmYFuQtgpooE5aV0/wIGIM9otVqe9gdQpS40MphIwM3NjZetoo14VDHQ4HFSKqjrSs1VdBl7XpDlP/zwg/3TP/2Tz1/0+KdkVbzmNT5+NcSvSlqZKubMQBpeTjVKGZH/6Qgh/H/4h3+w4+Njn2gzKxS6n06nHn6hriQeUCaSQa5UnhKTb25uPMeNOpBstjg6OnJQsVqtvLzParXepQqQZQwoLM5mABQ0tSvxaLLpBYsIoKvJ1xSNpnYlTAdjAnrYlDUcDj2fi0WKMD88PLTHx0e3DNktS16J7upTJch86udxTiOzRSNkU8AppEZQ9J5GYRl5NP4dwxe1Ws0uLi7sH/7hH3zD3d7enl1cXPjOUFI8NK0C0vJP5PF9+fLFhsOhzw/5xxraMlt7bzE48OagkOE97ZvO7+Hh4bNcP/qGpwChxW/4Cw8b3gAA7XK59HAwCjnLMt9wcnFx4YB8uVz6+gVcYOHTR6IVeHwx3shFZwyZT+ZJPeBmzxW5ziffv+SN/BZUto4UgKryjyBcDUuVwerh4+hnrSKBhwgAqKlNhL3NnpTn7e2tTSYT98ICyqhHCmkIWytYTCaTQpUS9YwRkmcNoXSJFDFfKFU8O+Px2MEI6V7j8dj6/b4Xeqf9gATeDw9i5CNXqWTRarU85arVatly+f+y917NbW1bdv9AIAmAAQAzRenoSOf2TR3s26HKL370R/AX9Rfwk8tVXd3lzn3vyUdHopiRAQaE/wPrNzH24qak/rt8hQesKpVIENjYe6255hxzzLAmj3L1WBNYN5xAnAB07/Hxsdrttra3t3V4eKjLy8tHMsicea4235MnL4skuw5AU0CS7rvUueQakkJeBoOBGo1GOMOkTBSLD6kq/M7fkA3C1Mj+ZDLRyclJdBEBfLoOJmWJsDaAFFmU5iF+9CwOPuwobf6IAkhzVjE9gKLf72faSuEg9fv9INXIM725udH29nb0JPYIAgSCR8OGw2HUQACe0RUeXeNzzCNpYufn53rx4kWwsnQWqlarOjg4yPTAZr64dqFQyHwH3+P7nfv80PhoiN+/wDeKC5K/3/MM0k2VTgqvOeglxO7gmAXl1Ibb29toPUMI9eLiQmtra9ra2gpmBoN/dXWlYvGhzcpsNouCi8PDQ62tranZbKper4eXDu3OsY5pqAsFTgsTZ3s8t4RngTnwvBFXymwgKjvH43GA6kqloqOjo7inlZWHI/k4RhWjg5I+OTmJxHKKZ2BkaaGStrtKAWcqUJ/igLhxXJTh95Uq8NSTz2Mr8lJakEdykGq1mv7qr/5KJycnKhQK4VjNZrM4rtQdB1dOhLFHo5EuLi7inHAUp1exEyr0ljcoAWTPK1HZVyh1lL0nxCPXyNX9/X1cm/cCeofDoVqtVrSYms1m2traUrvdjudizgAL5fJDex8iIyjJfr+v3d3deMaVlRVdX19rY2NDBwcHwQ6zj9iz5XI5WgEBglxPOPhMdVeeU5LKxaIYeSmfHZWUASw8C8AxfaY8BgPnHNnCwALonJXudDqZFCVk9/b2VhcXF+p2uxE5IMpDuhYG1EG1s06FQiHCmW7MZrOHQli+j9e9wBGHyQE6upB8OM9B3N7eDuYS/SspUmMAGAByb/QOOYFuJuQPmKe+IAWWs9k8B/v+/l71ej2TnnN5ealms5mJkuTJ31OMOL/nkUCfc7ju5HePrvL3dI+mzhW/0/eUoiMK+SaTh/PrAZqsA8VBHEnLGt7c3MTJR3RVoe4E8qjf7wcYlOZEAOuMXAGEYeVdH0+n8zQpsMR0Og1yC5nluZFPolCDwSDqb4h0oZeRW4+KUXzrWMLnHlIPsg1mH3nGRmAvwA7NZjMKAsFG3W5XFxcX2tnZiTaEpEdgD/OiUO6MOKZI7W7e+CBA9QKGvBCwbw4HLSk7x9+8wCrv5lutlo6OjsLLcNZKUggSwoW3D5h7/fq1RqORut2uzs7O1Ol0tLGxEcCV74Wp5USVra0tbW5uBsM5m80iVwrj7tXO5GNA4QMCJpP5UWNQ/LPZLFhYbwDMM3e73cibkebhftISVlZWYrM52+Nz6Xmt3W5Xv/rVr6Kgy5k6whbkdnlxSjryFCa/p7lQ7pktiqKUHht5l+MPAdenQr2+F25vbyM/iCb3KCFv3I8CwbtkYOj6/b4uLi50eXmpi4uLSOnY3t5Wo9EIZdzpdOJ+UXReKCLNe0tyr57cTuoHhntlZSWO/ANwE/5BwSKneMV8H+uMgk+ZAUKb7L+XL18GkILlIiQMAN7Z2QmGA6afSAShVO8k4N8HW8U65615niz4+59Srp9ruAJPWeEUZKcA3V/jOZkjmtYzfxTlec9pwpbkY6ZM08XFhd69exe6mPQqWHB/rxdxeGiWvYTM8rm0Qt+LVFgfHH0KQ9FrkuJ8cGneroe9QATK+/8CmtkL5NQOBgNdXl5GY33Sw3hmmFtpnq7j+xJgWiwW1ev11Ov11Gw2NRwO9ebNGx0eHmb0c5p2wnrnvS5l7eynGPo/xkidPd9PeeSHv56miRWLD7UX5NgXi8XIcZcUaRg8t7Ol6CQPq9/f3+vk5ET39/fa3d0NHUMdx2g0Cn2Hrva8amTR85XJUQUnIcfOyFLoPRwOwz5AKiGznlMNg8vzeni/XC7rxYsXYe/5H6aWbkGQFk5QQBhcXl4G4wpxxT5xfVIqlbS3txfO23T6kPt6cXGhf/iHf9AXX3wR9S0wzO6Q5hFArouewh3p+Gij/hSMulC5knTl6Z9JQ6PuYTnb4+2XfKFLpVJU1U2n0wCcbG7AJBN2fX0dFciEIskhIXReq9W0ubmp9fX1OOseRdbtduMYVI4tI2RLuF1StHzg855zRGi2Wq3GiTt4MZyxnvZQ5XdpnudIyJT3MJ+eB4hXTqWrNyf2xHLAzvb2doDXVFnkGXMfbsxT7xdFuwiKUsoCTZe7FJhI2ZzEp5g1/7sbz8lkot3d3QjTeDWls3+EUn0D12o1nZyc6PLyUq1WK9IyaMWDU0OnAPKKJAWYLBaLkfZSKBQi7A4IQFm6g0RKCQc/4P2SzwfwJMyOskTZAQYdYDiAQJkdHBwEY8ffGo2GNjY24hQfoiWSgnEtl8vRGWFrayuiJO/evZMk/e53v1Or1YrnB2S4A5cXOnQ2L2Wj0jX/nMPvz0P5DlZ5HyN9hjxmiv/X19ejqM/z4Pk+SVGYgXwQxru7u9P333+vi4sLra6uqt/va3NzM46npkE5zCzg0judsA4YPhgpN2DcE6ksyCaOFqDAWWKeBZ3HuL29DbDMesPwco+SotsFdgKwgg7nDHPALPcIYYAeRl8QHYB5HY/HevnypX7729+GQwpz6w3ffe2d7MnTXZ9q6P8Y4yn9Kj2OSqWYwgku9COpGjjsFNY5qcV3MYfoQsAXQLfVaukPf/hDRA0gpiiipp6EfeCFf+gY9I236kvtiDTv9oOTAsDs9Xrx/BSNeqoA0WHuBeYW/b2+vh4phZJCRql9cPtL2iF917vdbhSogjWo1k/TEPg8c0yov1KpqF6vq9fr6YcffghCJcUMafpnGgUA9zzlfPn4aJFUyoamAsgXcKOe55SC01RYUy+LkDkThWIGWOGxoAgIAb569SoUKQoFQ4qiqNfrUYBUr9fju8iZ+uGHH6ISeXd3NxjXg4ODTGifa8OwIjjQ5TCnq6urwbCRc5qGCWBIAQzcM14ZAoh3DlCAVveQL89JpSwAh3u/vLzU9va2Li4uMu0zUpYlzyHx8ZRC/BCw/RwjBafS45SE1CDkKc50oGSLxaJ2dna0t7en6XQa7AjrhCzCPsG2zGYPqSOdTkc//PCDzs7OQvHh/EgPTkq/3w9w5gwQCpW0D77Lq5VR8FSCcjwgQJnqTpr1EzK7v7+PAieelRAT4VuOYvWcUsA6jZ79OFWeZzabFxpWq9XMCVnIMS14MPwrKw9HEq+srOj4+DiiDr5PABipjklZGtaXNXfG34Hg5x7OQLjM8lreXvsY4AacMV8YPZyqtJilVquFM4vcDIdDff/993r//n1EBdbX17Wzs5PpSwuoxPh7L0aPwjnb6JXFfB5AS/4r+grAioxDJkgKWfU8Uk9j4NocGIDsoCvTlkJbW1s6OjoK2eM76NgCeCdCxRHXpLwAoCFJ+P7z83MNBoMo8GGkTGLqoPj/6Xov6kgBt7/+FKZANnd3d0M/4iSBAYhM+vwA8tC5yNDXX3+tbrerr776SpIyKU3IBsNbPjEgHNDFOB7OGqLDUkbdWVz2HsfcIlOkgvEeHDfwwsbGhprNpra2tsI5A1DTI53nB+eAHfb29iQp7BPpkRBw7EV/Vq/s59koyt7d3VWr1dLbt28jBzWNEKZEgK833/EpsvvJVfx5YTL/YkYa5nVl6V4yN4jiBIRKcw+YMFO73Y5wubct8J5hhF3Z1ITce71eFJMAHMmrgBF68+aNer1ehFVpvUP1qgv73t6eCoWHXn2dTifSC25ubuIYMUnBYvmRkWwynoP75ncWGcUGc4aSAnjiLQHSvRp2bW0tcq0ACN99953q9br29vZ0cnISguRClKf83CP7kLD5mi5SmFTKssIOUtw58gruVFn6+1Iwg3PizCMy7VWiXvRGOxGqo+lhSkU8614qlSK1g0IMWALW7+TkRO12Wz///HOwjni5AAaABsaZBtNnZ2ehiGB6VlZWokWVGwnkCzZiOBxqOp0GYzabzQJcM18wcCTZSwq2FMXp3wNAgH3o9XrRE/mrr77Ss2fPYn9/8803EQlJZTRlm/KUoCvSPEflc4+UWcoL9TvIzguZpYbfnVVP8ykWizo9PY0jDpk7GCacID5/eXmZ6c9IhTC50mlXE4yspEzhFQx6r9dTu93OMF6wXIBoAArME8VHnkLgJzPxd9gnACoFKDwL4BHnyjtGYKvu7++1tbWVqXym6KnVasVzA5Q4TprIx8rKivb39zWdTnV1dRVFlM+fP9dgMIg0l1RGXS5Tw+9y4Lp8kUYqr8gg+MBZUHf6eT/yTGcHLwKCkEkZP8AhAI81PTs707//+7+HbpMUYXQALe8H/HmBnpTtGcr3oa8hjMATlUolCCdJEWEql8uZKCvfLc3TBHkGwC22Y2trK6LHvO6pBdPpNJNahmOH3WeOPWUFfUv/anCXPyNRDOaSPNZ6va7nz5/r+PhY//AP//CovihPl6bRgf9rBtWVf0rP+3tSDyml8dnYKARuFIVUKBSiSo/Fo3k5R3ZB3eOZohDwgmnuf3Z2pru7O11dXYXCub6+DpCJsoExOjk5UalU0p/+6Z9GkjV5oAyExzcawrK6uhrKb3X14XQn2C2Y1V6vlwGobAAY0UKhENWjFJhA8ePtEP4FqLPJyAPxkBfzS9iXtite1ZcqEF8/X1/3elPD6QbQgfWiDO7LFaI0v3fPG5KerpB9ythj+Pb29tTr9eJYOg8PSQplMp1O4wSmSqUShSUXFxeh6AB6fuQoRhH5Pz091T/8wz/o7u5OnU5Hz549izzq/f39TF9cirPu7++1s7OjWq0WFaLeXsorj/HMMdbME5WcVGuPx+NIHeG+OWZvOp3qT/7kT1Sv13V9fa29vb3Yu86ewboBCthXRDd2d3czzund3Z1evnwZ8p06HXmOk4M8Rsqw+v+LMPKUtxe++chjolLgymsUAAEI0YmelzabzaIgxSNi7XY7DjlBttDfOGjtdju+g/A50R2MHz1wpXmzfp4VQ4/u9pxDWFEvNEUu2+12sFoux8g5e9JbB2GAAScwdHwfwCBNv+Jzp6enGbC/s7OTOfltZWUliqDQ0TC3yLvbxTwZTtNSnFlFTj4W9fljjfQe8uwGv7tMStkCah/9fj/65nJNB01OLrA/CIVXKhUNh0NdXl5qMBgEKVUqzSvu+cd+ABs4U4n8sG7IH84NZJH0oP8oYJbmvXEBq87MkjYlzSMcniqHbGEL0IPsDWyt57KSugLWcJDLd5yenmo8Hoe8srcpXMWRJA1ByjL73DP3lK6xY4M8mUijBB8aH63iTzdN3mZw783ZiBTwpA/IpOJ5FgoF/e53v4v3o0yYMDdwkuJ3Zzi73W4kuiNMTGahUIizomntUavV9Itf/CJC9CgvFBX3gTCwKTwPpNFoxBnknU5Hl5eXketxeXkZhVMADuYG4cWr4gjJnZ2dUHo//vhj3PsXX3wR56KT+8WxpXiD0+k0Tt9AIQJyWq2Wnj17pvfv38fc+bqla5R65XmykK7xIihKhitzv7+UQXbvnedDYeZ9HiWDkuS95FCiHC4uLuLkJBRpvV7X6uqqLi4uJCnTHoczmjE6zkjjrJ2cnOif/umfwugdHx9nzlxGxmnTw708f/48GNJyuaytrS2dnp7q8vJSxWIxwuj1ej2YMxwwSQEsAMswoFtbW2q1WqG0YZ2++OKLYDpgTj28TPsUno+wFJ/he25vb3VycqKtra24DiwVQFzKtpdKwSdMnK+fMzr+uUUYT0U4fE+mrzN8T3shJe9ptVohb8w3XUEI28Nukr5SKBTUbrcjp5890+v19NNPPwULD/sDMHTwiR72PDxkisGaV6tVbWxsRB9pWBtJ0S7K01g8msZRmNIc/JJexXcia/4ehtsWdDBgwVPG6OKys7MTQEWaF7my3yl0odocvQ97BggArOTp5FQ3L+rw/ZQnk/5aniynziUOFWl1FJzS0YO1B5jSqvHZs2eZLgueK8l9Tibz49PRBR6horLeCR/kB5ZWmvc0h5jghD3Pg0b/oEuRJb7DiwcBqO5c43Q1m82YB/abs78+3zwjehpd3u12A/DTR5VC3YuLC62vr+v169cZUA2+8MMsSqWS9vf3w94B5n3t3H6h1318CiHwySdJubFON1KKiF3w3GD451KFC1uI5+L5GTCB/EO4WHiEiLzQk5MTSQoF4xVmX3/9tUajkX71q1+p0Whk8kvdi06Z4eFwGB4M980CUvhBCJWEbMLz9Xpdu7u7AU6pAPfwlfTgff3mN7/JhH36/b5OT08jOR+PZ3t7W0dHR5E7A0NLxfN0Og3PCVaEyrxCoRBFXYCKPIF5yhlJnQz/7KKwUA40U2/Nf0/Bp8tvOlyWx+Oxrq+vI/enUChE4RqJ6TB+hJ9WV1ej8nRnZ0fj8ViXl5ehSEkF6fV6oYxhyCeTid6+fatvvvlGxWIxQviENev1ekZx8/387M8Mi7O6uqr19XWdnZ1l2HvACOwauUoo+VKppEajEWEiepWS302VsySdn5/r7u4ucvgwAnQ9QNF7ERlzQRU5Z2Zz7y9evNDf/d3fRaQFYMUapmucOlW+1qkDvQgjvU//x2vpe/N0cxoyxcCQk+YFTCsrK+p0OprNZrH20jwXmnSWWq0WefP39/c6PT2NU804RMR1I6FMaa5HGewNac6c4vzg0EAi4HDRRlBSRMGGw2EAX0mRMuBhehy6jY2NYH2QOe6lUJgfAUzaTavVCtACwTCdTqNpuaRgmpl3rsV8sy/X1tbiftHNyDUjjWqluuspNnJRR0pqpTo3fS8DO7O+vq7j4+NIcyqVSnr37p2Ojo4C6AMSaWkHICPVyDvx4BB57qW3hQQAI5esN84F0QN0K/bXc0BxsCELiER5Gpek+BmnHcfG15ROA+R5t1qt2JtU55+fn0dxKcBcUsgdsgiIfvnyZUTW+Hy73dbLly/jKOtCoRCpCxsbG9FFhvXke0jBYH+lzH6aTsXIiwTljY+G+B1Epoo8D6SkN+U/ozCcoUIZUDXMxsUgIiBOy3M/AD68qLu7O3W7XW1sbOj9+/eRw4nRxlj+1V/9lV6+fBkeFB6IF1ohLOQXehsGGFYEASUHW/nixQudnZ1pY2MjwmK00QE0UxAFSC2Xy3FogLfZYm7YSLS+QrFxDXr1wUqQb8s9U3m6urqq4+NjNRoNXV9fxzqmToWvna+Zr7t/5lPySf6Y4ykgLT1O2veN4/LrzLlHBqQ524PikhTGFZna2tqKojuUA3MGk4mcc9IYTMv6+rr29vYijYCqSW9ov7q6Gg3EWVs37LDq/rt73XjBNM2/u7vT119/rcvLS9Xr9WhbhnLE8AJieM3TCPD4YWLH47F++OEHnZycRAXoxsZGpJ2wx2h+zt4nj3pzczPyzzj04vj4WL/61a/0f/7P/4lQsa/tU84z64t8+HjKaH6O4ffqznUa1XC2OWWCU4AjzRuf0x2BVAwYQkLZpBLRZN5DiHnh+larFfoZXXV3dxeN8V3PemgVIw/45GQ2HCeIA0gJN47u9AEsOV2vXq9H2gwGFPDLscGAU/YS7yFHm2Kc0Wikf/7nf9arV6/ie3EYAeueAoOO5+AYZ9pg1/b398PRA8C4XnESJI8lZ6ROzOceeelgbut5DmQ2TV1Ir8Uao1em0/khOji54ANaScLoweD7aXfIiTQnr9hDfC69P4AaGICDR7iftKuJM608E86/ND+pDyDKax4Npoiq0Wio2WzG/15YPR6PdXp6Gt1f3MGXFCD24uIijoEnFfHXv/51pCaWSg9t/L766it98cUXQc7RLQAb4oW5XtCIHHtbq1QHp471f0RWP8qgsmipYmSgHN3Ypx6+3xg/o3Co8kQI+/1+eKblclmdTidO0vEq4+vr68ijIMQDm8SE3t09nC/NsXbFYlF/9md/puPj40gb8FxTf2YUj08obCuv4WkVCoWg6qvVql68eBGhqUKhEE2td3d3Q0HDoiIIaV4SAgtrxWY8PDyMqkMP66KoCc2yQQBZgOdutxutqNzx4Hvy1i9d91QJpSGJRRjIYwpCPeTgI1WSKSPlBh8F1Ov19Pbt2/AmyZsm1E/ofTAYZHrZbmxsqNvtSpJ2dnbU6XTUarV0fX2tdrsd54qTy4SHTCELDCyMlvcE5e8oduQDheiglVZVnsf8y1/+Uqurq7q6uorDH3788Uft7OzEUZYAGgoF19fXg7XyPpYwB+vr6/rqq6+iBRHGB2NB0Z/3aL26uopoBnsNY1csFtVoNFSr1eLvvn55gDSPiVp09kl63GoI48lcpIRAypbzGushKVhMmE70EWkk6BV61Xposl6vR6s89Dcso+etesgPwOfGmX3iqVvco0e+0Ek8h1/T850BKfy+vb2t09PT2G+z2Uw7OzvB/hJBYA6wL86cwST/9re/jQMkMPSwdR4BY996FwAGQJ+0LEAJBVX+jHm6KY8cchlfFDl2OyJliQLmCfl1nZQHwgGa9A+lPRS6ALnimqwlxcS8TgodbCUy4Ics4DyNx+Mo+sOmItsAZl6TsjmmRCNSQi9P17BmXpcDDoD42NvbU6PR0M7OTjTwv7+/D7bUO6RwL7Su4jv8pCeKvtH/pJVVKpWwT6wD5B6vceyp1zXQoShlQwHvT+FAf+1j46MA1RkIn2huwind9ItTI+GL4Qgc5UVrEjwJwij1ej288/Pz88gnStvhTCbzI8IKhYfEfYxooVDQ7u5uTDRGT5obgdFoFAsJAIR+5714FQ7MAZlOpx8eHga7M51OI7TDyRKlUimjzBBQz6vhs3d3d2o2m8EucXLGYDCIs6ELhUIweDAl19fXevbsmaR5Y3Ouwb3mGXVfs1ThuBCn8rAIXjzD5UvKsrx+/ylj4WBOyoZQ+X02m0UoB+/Uzy+HFSGfmRwqSZm2Mpw69e7du7guTAshIc9PJS+QaxPCYcDgSHN2zZ+R13G+yAn1XpfkZO/s7Ojq6iqOZaRVFUwwxgHv2eWA14rFh56rzWYzlD17hVAbRj51eiuVir744otMzhV7JJVFZ+/zvHh3NlIAtyjskw9/Hn8OB6S87k4jv6c59JIiLH10dBSsytHRkaR5UaqkYDCJ1iAfGHLAlOfh02/Ro16SIvzJfaWpUx6RcRDted0+0vAn7BVFWexJ9iftschfxpFEv6Kz3A5g84hKkQfrskN7KZd372LAYD9DEABm3DDv7e3pxx9/fORM8DPy4PPEnPraLgJAdV3p+oa/fSidyv/mexp9s7q6mmHyKPSV5oVr1WpVlUpFnU4nzrQnMsvaEJZOIw9gkvF4rE6nk2mv5qTR2tpatNpD1tP1Yn8gR57+wbPy/Ohe9sJk8lAs+/Llyzi0iAgE+wndixPH/ROJAE9tbm7Gv2q1mimQ5b3uHEp6BLAd19VqNV1dXcW8lsvzk9vSteaengKln0oQfJDuSj+cevLp+xzIpA/H37k5NxhMeLvdjgqzzc3NyGUj1wLWBTrfDRv5FBxXxoLStLdSqejVq1dqNBoBcLkHz3/CgPpEshiS4juZeAAhYVY+IylaVsHqAqJ94PHzWQfv5NnCNDC/eFiwWAgpAIDn4JQXCnLYTCsr89OpvFiK73Zj50bQmXDW19/r7/ncIwUevlkwAil45ec0lQF59XUHnLoXjgIBTE2n06huf//+vb755htJijy4ra0tzWazUBrcJ0qZlj4elkX+YSs9eoHR9agAnr8rfZSQgxkKSWCW9vb2tLe3p3a7HQ39r6+vM1XNOHXsJ+YG5pUK2r29vUx+Fc+A8kdZjkajYI7ZZ+7tM/8wsV7xnYbA80CpO128Z1FBqjTfZ8ybEwKMlAwAxPvffF9jyNBJ6C8iOPSWxLCTf4meRvdI89PQ3OnzIhJYWQABYG51dVWNRiOOY8Z5YY96qgq2IY1meOgcvco6OrvFcc/c83A4DPad4hP2keuEQuEh/39nZycAhodhJQXg9mIw5JProGuxH8g0hAz731MYfJ3T9fXXfE4WRX7z0hL83tK9mAe4nTzo9XrRBjIPUDpTXy6Xo8AYefc8TOYdxwZ5lea22utM3Pkn0tRoNHRwcKAXL14E4cU/t8PcH7Yb+53ODfLkgHV3d1fHx8cql8uBYej4ImWLqZxMwTFjb8B0zmazwAveSxtn0IFuofCQBkGfePQCc0hvZKKwLrusi+uDdN1dTj5Fbj+ag+pfkP4t/dKnFL0zAqmnz3DGBZZyc3NT3W43Ju36+lqz2fyEJ0KEgNOLiwudn59HD0WU6+vXr/Xs2TNtb29ncpI8MdnzN1g4n0RYAVecPAOCgkeCAEjzFAhvE0QIAoECVBIiYx4pPikUCtFfDa8O5X92dhZKttFoqNPpBCj5+eeftb29rV/+8pdaW1uLI928l2bq4X7q+iOEeWu7CMPv11nS9P5SRs0VoMuzh6ekrJLgRBLO14a94dznQqGgXq+nnZ0d7ezshPK7u7vT27dvdXp6GoqGewWsra09nI0MY0DqCrlyKB1kNC2K8mdGkQB23Nh6niEhoWazGQa4Wq3q/Pxc29vb0RKN3CeYW5QgYaS3b9/q9evXscc8t7pYLEaHi62trQANMFGnp6c6OTnRs2fPNB6PdXV1pf39/WAUYLbSferP7A5y6lz5a+laL9pw8Ja+Dih35ykFAK6fcOxxVlNA5XItKY5IxHHHWee6OP+suxeneBQIfQdoyAvTcg8YT8KVHu3xdfI9A5sFs4lxxTbwPDs7O5mOACsrDwdBIEcUjkoKcOuABVDEHsDRolqae2VNPK/ai2TJsaZA0gmBlIFKHWsfiyazLocf0rnp7+mzAdY49AMn2dNEcCzQi+AEaX6aIw67fyf2mH9ra2tx5OlwOFS/31ez2QxdzF5Bpkit6vV6mYgVeo1nTutpnFUFOzipR0egFy9ehMPe6XTiO9LPuYMizVMYwFCbm5s6PDyM/Y+OJzcXIsTnx9MenYRinxJFKxQKUQxMazUnJ/k/lV9fX57pQ+OjCYOpUki9N//nAMeVP4uSeg8OCCWFsYJ9Qbhub291dnamdrsdYXz/nPRwEgMLSi4JBv6rr74KZYNhh+XCw/AQFEYf8InB9mfxnz2U6kp7ZeWhj9729nZ49ldXV5meqITRYD46nU4m1WBlZUXPnj0L8Eu+LqGjg4ODTAiKOVtZWdGvf/1rffnllxFOJa1ge3s7c7JRnvFOGQsXrHTNnYlapJGCTjcC/N3fKz1mkfnHay7jztY3Gg21Wi21Wq2QNXp4fv/99wHyXFldXl7GqTzS/NhEaZ7vDMPEGeoA13a7HcaNpH0KA1AmKPSUnc8DaHyn9CAneNAU0wwGA7Xbbf3www8qFOankRA24zvdUOzt7UUBls8jYzqd6uDgIE5JwYB3Op1Mo/WVlZUIPfseIyrB9f15fO3yvj/v7x9Tln+s4YDUf0518Yc+J2XlG+CEM4CcIDPIhrNPnHzEGfTdbleTySRSPtAzzhQh32lz9fX1dR0cHOjo6CgcH9JiJAVYo4UUzIzbipSVc4cO4+zAhfdSpAI4pAMBhSbdbjdO8wOE0lHG+5Y6uTKZTPT+/fsIGcPIMf9ebEv9A3+jXRwHbKD/nbDhGVPWyQd7YVFIAelxSgpgSspGLNKwuDuujEqlohcvXkTeM7LSbDYzusZzfumUMhwOMxX7DuzQJ3wWhwkMwEEno9EoClaJhNEJgMgPsuB7wDGEO3+OoXgvKQP1el0HBwd6/fp1pI1dXV2p0+lEnjj70/Oe2QPoeEgIT0fgdf51u93M6ZYATY/iOaPPa7RM6/V6+vHHH/X27dsohEV+n7Iv/rr/nJJi6fiko0753z0iF7QPKU1Pas+7hjQ/ns7zTZhM0H6pVIqCEpQeXhaJw+/fvw8ACNX98uXLCO94gRMC682pUfC+OD65LAQG2PM3fCPyvHgjjUYj+o96+JdrosCq1WpUHSLchCwAl4DiQqGQ6a13dXWlq6srHRwcqF6vR1sOn+/BYKDr6+sAOa4QPCcnZZZSAJMHRlMH5nOPVCm4IpQe523l5bwxnGFFdljHq6sr3d/fq9ls6sWLF5mw5srKinZ2dqKZvbe3wemCRUSOkbtaraZGo6Ficd503EOwhG/wfskrQpb8eZ39yQsBT6fz06K4Hn/jRCiOyKVR+3A4VLvdjkMopHl/v/PzcxWLD8cE4wRirBw0UHh1fX0dp7JR9EhhANclakG+dbfbjSKy1Bl2MOPhM98L7lz5flyUkepW10dp9MM/w0j1M8aYKFWtVlOr1YpoEoVOhL1xvDCK1WpV7XY7ZAEd5e13+H7eXywWg+2nuI7Bd02n0+jtPJlMQjcBNGCg0kIq7s3TD4haeeoTJ0xJDy3PcIY6nY6urq7U7XbD8eH7mVvPHcTmuJOwvb2dSYnweSD3lTln3/Heg4MDFQoPfbNTBpz18zX9mM5dFNl1x891r+tj/5vvRWkOXrCnRGlYR+QE+85a00cdRpRTvqR5qhxgkVZUt7e3kbdK9KdarcbJe5BM0jzf2e8PWXFGXZq3pkJ+0Mteac+9kh+7vb0d7fQgJHCYPLXEc1b9eFbXrdy76zUnKXZ3d1Wr1aIXdavViq4Zvl5cbzgcRk0ELa82NjZ0dnam//E//kdc1+1KSmSlMpGmsDw1PqkPqhtoXk/zE1Nlyk04IHhqTKfTKGY6Pz+PakvCUlTLO0vDfZGrd35+HqdHeS7Q69evM0dNcgwZ4XRod4Sd8Kz3EuOaaQI/IR4AKrmCKFHpIUxUKBS0v78fhwPAFHCPGFVam8xmM7169Uqz2UzX19fxHel6UCU+m82Cidrc3MyAZcAUc/Xll1/GRvRrpiEDFxw3+s6Y5gnXxzyiP9Zw8JHHEnvYP83legrMpCkpFM8R9jw6OornR15hgjBcNEY+OTnRd999p/Pz8wj/+fW2t7cz3jlAldxTjCAyS0FW6rV6WMijHmnYywusmL/xeKx3797F6VfIf6vVykQU2EPsU8A6codi5VkwMihnr6wGEE0mE+3t7Wk2m8XpboAcmOt6vf4o75tnZ+3Yrw5AXTakp0Pon2uk+zDVvz74mwN1v4Z/jkMjyAPlGOa1tTWdnJzo6upKr169CoDK2krzauiff/5Z7XY79PLGxkYATAr3WOvpdBoGF9lmL9HGivA7DI3vV8AAz0BEQFLsA075ga2k0wv7ED3vzNfJyUnUPNB/27u2sJ/TcCfzi90g2gcjBuGBY0QqledJ3tzcxJHad3d3arVa8V2eepUOX9tUFp6SjT/2SG2GM2YpUeB/473oBz5PruOrV68yTerL5XJ0yYEJRJ663W7M4WAw0M3NjVqtVgBFUjZKpVKmPzkA9vb2VhcXFzo4OIguIUQAOp1O2GJYb4ggsAApXThFEAMepeAfueC7u7va3NzU9vZ2Bi/t7OxIUuAVMAZOGzn86FkwCRgGUsBb9dH1BYcVBxNGOXV62DM///yzvvrqqyjSZT8SZWi1Wo9kNHVG8mQgrclJxyfloOZtgFTA/KH8Bp7y8v16hUIhWM/b29sAqYA8ej8SwseYEgLiODvAH0zO8fFxGDVocv7uyf2Ehfx5nVVFuBzMENIh/DObzULQETKUI+AYw8v3wd7CNJCS8Pr1a00mk2hYjiKFESiVSpEjQy7JbDavsMVLY8MUi0Wdn5/r1atXuri40D/90z9F3pOvCeuZCmmesUjX2EHdIowUjEjZdAYHJ54gnuby8V7P7cQLJ+fNHZzJZF6h7vl1sKCFwkM+6snJic7OzmKdUJCAqV6vF61OPA1lOp0GWOR+WXdkEKeEZ2DtABzIBwUyeOLMydraWoRayVsmv5ACPI4P5LqkAcDeU10N4GaekX+Up6To6+vhsM3NzQCrP/74o371q1/F+8kr9I4Yzj6n8uuv4Uy6zC6KU8VIncQUeObJaR4DlzolhNEHg4G+++67YPh/+uknvX37NkL/nuMGm1MoFPTzzz/rxx9/DEPHwRTF4kOXiZcvX2bA/mAwCAPqLflgaryYydlymEcvvmAvsV7eRo95YZ/yDOwnHCAYX0lhR/iMd9q4ubkJYO3hfewKupp9WK/XQ/Y9f5ZiwdlsFsRIs9nMFKe4bXEZdf2VArzUYVkEcCo97ivtz5SyarCCztaxN3GgpXkeMIMDa2D0OA1JmlfAt9vtKAz++uuv1Wq1oigPAmltbU3NZjNaWoIPcIKQPwAcgJm0I2TT8YCH+qV5tE6aF7/yOQ5BoUsRqSToTe4R+zCdzg8wKRQKGWcOfQp+KhQK0VMYh5E+6Nzr9fV1sM3oZJwsAC+OE+lpGxsbGVnd3NzU69ev9fd///cZoMlapjrJWXQnij40PvkkKX/NFagrQDeE7hnlbSa/BhMzGAxCIPk8SpVB6xuv1Cc/DmobtpNcFG8FkTInKPG0r1nKAgNsWeS8nBlfBE/s5/psSjxlL24hVwRGjPY7AGiAgqRgy1CGzgbymWLxoVABCv/w8DBOMvHE/ZSV9XVmw6WgNM0He4pN/ZzDNwnjqXvM8+z8tZTNkhQVyBhe5APlwHeXy+Xwvn/88cdoVt9qtTQYDKIJNHKDgnSHx6ugfaDQHdjy3Le3t8EW4ABxT34dZ2H5Hozw5eVlGG4HRoBXZIO8aJhNHCn6F3JNaR5ug/Xg/dyvNO9BOZlMoocsRh1l+fLlS/3TP/3TIyWYyqr0+Dxzf68byEUZKTBNQQn7O9VT/ln/PJ8hx357e1uvXr1Su93WcDhUqVTSL37xi4jIoHPRAb1eT/f3D6d6OSu6ubmpi4sLFYvF6Mu7ubkZDos7U9yPM+2SopsAjNP6+rqazWacm84+QGYJbeLspQDc15lIAxE3SZmcaQfByOTd3V3YII8OuF3jUA1y+d3BdceQ4jIPy3oaxdnZWXRacYCdrncKXl2nIb+LMJAz7Aa2M7X//jPsojQnAjxiBHmFo/D27Vv94Q9/yOAGQvRcl/SnXq+ns7MzVSoVHRwcSJp34aH9kutd0l0kBUFAtAG7ii4DtCLHqcw5GJPmHXS8cwmgjzQDyIg0LYd9gyxhL6rVaoTf0cM4bZBfOPEwpIBbcFKh8FBP4MAWgM6gMEya4yHmv91uRzcKl4MU+7mD8h/BCp90khQbMA+cusfsC5LeoCvTVGiZXJrnuufK5r65udHJyUncA4tEmIh8CsI+gEIWkEIlvp/v9lYrvtHdu51MJrHAPCu5LyyYJ8nzvN4Gq16va319PSrzvX0O97S6uqrnz58HS+qbGQH2zUL7LXJX2LAwVfQse/nyZeQzNpvNOI3D793ZilTY8jxh/zkFCYswUpAizRPKXY6fcsL8GimLgUFbXV2NMAzr5XlROAywJ8jH+/fv9fPPP2fkUVIAMaIBhF/5HVny4kA8YhQgMgpriXeN0id3lnXnWbgmSvTu7i4K9vx7vcsETJKzCBhl70jBNakIByzDEFerVXW73WARyNXlOwuFh/xpOhogu61WK37OA3OpF58C0VSuF4VJTZ2IVA+nYDQFKfyduYDV2dra0vPnz0MXNBoNSdLu7m7kRfPd6EN6T7daLZ2enkZ7tPF4nOkFyb5ijskXlBTGD4O5uroaLBEyyr1626t0Dng2ogSDwSBC+oAGLyhxJpKQKmkAyP1sNotWaewz5Bg96tXPhJEbjYa2t7djz8NEkcICML+6uopQKgCe9AIIEg/tpzrVjbvrYidZUsfrcw3fS5IeyQR/SwE2r7v9LJfL2t3d1Ww2i3zSH374QT/++KN2d3d1cHAQTvnNzU0c8ANxNZvNog/5y5cvHzXRpxk/AxAG6OQgCkCs6zr2iKeczGYPEUzkyKOy4AB0nu8vnhsb7Gkh0hy0ey6qpACYyDlRZKKwzvxzfxAQkBqsAxEvoqqQcBAsp6en2tnZyazRbDbT2dlZ7Bm/V5cBX/uUEGPvf2h8tEiKC/v/KcPkN5QCTxdWPudgiIcqFArhjd7f3+vi4iLod0lRsb66uqpOpxPsC2eH47GzMLSm4L7JgUqrK/Gi07w+nzhH/HjuXM+BqSfy8/wA3en04ehLikpo5i4p2lzs7e3FpnFj5B4VgsU8IlScTnJ/f69vv/02Nvj+/n6wAeVyOZhYPsfcp8KVCpCDUjf+rGEqD4swUjYpZZ1S5cjPKTD1393gwzTB9HnFpDQ/ihc2kdZMV1dXceYzKRrl8kMPP2+PlrKNfBf3j+xJ86NXfY+iPHkOZ2N9nXgeL2ik2hl2FHDabDbV6/XiJCzCQwAAWGUP6ztIxZjDBkjKANXxeKyzszMdHx/H3JOvR3u577//Xt9++20UVLHfvBAzdT7ydBLz5uu+CCOVW3eMPTrlAM6fy/UPgAC9iBFk3rgmuoojH4lekV50cnKSAZjValUXFxfRaQJHTJo3TpeyfUpZPwCr6xzuj/UAIMAAkXvIM7L/ACnI13g8DgfJU0YAIOytdO5SRow9AQng7aQInSIz7XZbjUYjGGiuJSmO39zb29Pm5mbsrZ2dnQD+7A+AugM11zusJ8/Mff5HWan/18PvJQ8reLTHnSlpztCtrq7qyy+/jENDptOpfv75Zz179ky/+MUvwo7DOLbb7ahO5/vJ793Z2cmcYd9sNsMmStmoKQCXw31wnOl3PZ0+HIuKjnMmkjQjcATMpQNJbw+IrNCdhfnCVqdYAFIB2XUm1+fYsYMXqOLokwcN8cd1KDCvVquR/kBbSmwJ8ksbr4ODg5i/vPVnuIz+R9jUj4b4HXD5l+eFHNObyAs7+fD3kCRMk25yLWBIOdsYZpJep51OJ0AXE7W+vh7V8zSdJUHf0T4Kz5Wkh0lTlsgpeMIBCJ4LCZsMRU2ofXt7O1qTwKqRK+LtWqR5FwCYUu4dQ0J+jCv1tbU1/fTTT5rNZnr9+nWkCQAEvAVRnuHzdfQ1ynuPOy15zOvnHimb/SHAkgIan+s8kF4qleKIOOYSVgmlwN5Abjie9+bmRu12OxwjgB0pLqwvhhE2CuMozXvVucx5Yjyg0dvb+H51ueUevPiE3NNWqxVdCVDunU5H29vbWl9f18XFRSatAaCZAiR+7/V6Ojg4CEXuzO/W1lbc8+rqaoTYkHHYuPv7ez179ky//e1v9e233z4ydB7iTdlSN+wuB8jHohh5vw93qlxOU3lO58CvUyjMO35gkDqdTiZ/HvaRIgt0L8wK+hXDznp4Opbft5MCOEYYXICg369/Hh2FPr+9vY1CJvY0Bp1B1Ixr4/T5HPFshEnZ29wL+hGdTDcCrslRprPZLOoiOG6VU38I4aIjAPCz2fwgj729vdD119fX2tjYiBxcB6UOvpk31jpv3j73cOAh6dGedOzgLLAzxb5+Nzc30e7u/PxcpVJJR0dHobukee9PHGI+T5spKvJZ493d3dAp3t4xxSucXokO5h8y5AVds9ksIrfoUiK6nADJYG5g46V5+J/58aIn+pQ60YX8em0Cv7tzBE5hsO9pS8l+997SNzc3Ojo6Cn1fLBb1/PnziHCTngK+gnTw+UuZ/3Su/D0fG58U4k/DD3kXf4q25/enPHxpHt5uNBpRLEGvPAqBMIZsSLxxvBWu46CBKjPyKTx3AsM7mUxCCTkjhcDA7HL/DiA9LwWgSFsVnm80GkWovdlsRg9Xro0ycyYiTTWgwX+pVIowKMoLYMNZvRsbG3r16lXcgxeQ0Euz1+vFhk7BtQtXunYOVPNYnkXy5L0QJmX+mV+XwxScOtBxFpC5gUnHkKeFTNK8t+O3334byq7X62W80WKxGAaM0KdXhvLdKNOVlZV4NuRPmlcWAxgJg6LAXXZ93dN14xlo3TYcDrWxsRHh4NlsFnnMGxsbEaqtVqtR0Qlz5WEw2CGeCQeUZyEXEgVYLBYj11pSzAl55ePxWK1WK5Nkn66157NJj3VU+syL4mDxHK6LUlCS7jmPaKTXIbxP6zL6PPL8GJ9WqxWN0UnzIDUDI+nAidy/wWCQ0afMOfmVpJlIc0ABYGDu3YihRwndc//SPGcap4hr8QySApi6Q+KOkn+X5/yh02HGHBjALFHV7L1WyVllj0FqQGzQRo5+muVyWa1WS5eXl/riiy/03XffPSJz8myl6yReWzS9+5ROSbGDO838zXUGbD06oVQqaX9/P6I3yCh6Ffs7HA51enqq09NTra2tqV6vx9GlyH7qOOMsFwqFTH0H1fMASWTBMQSRK+8FjQ1APiA4vO+vpIxuTovxXI+T5giYpJDKW5ohGzg+EB18T7FYVLfbjTZaHkkBT/F9tJyC3GLPn56e6ssvv4zI9GTy0NHFx1NklWNCnIJPca4+uYo/NeYfY818oz3l+XOzCB9VdlSU0kKC3LxutxtVpKenp3EsIswTntDd3V00pAbl83148YXCPN/JhZXruBfP6yh8Z58ADJLC+0aQ+/2+rq6uoiEzIUyuhXLEY2NDunJyphLB436pCiQE9+LFC33xxReZ99FMl9MvYAYQPA9LuBf7FFBNwep/1CP6Yw2Mot9Xen95zyRl2/ykgAd5IZ9se3tbz549C1bJvfXBYKCrq6uQyZubm8xpXsPhUM1mUxsbG5l2YxxDi7JijaT50booItbB5cqdCA/do8gAjR5B8IIS2t8Mh8NouE/Dfj/v2TtBrK+va2trK3N0Jd+Jwif8RnsjmI6ffvpJhUJBu7u72traCjYM+YcxJu9sPH44Wer6+jrW8SmGGFlgfd2zd+ORJx+fe/AMThL48P3nMu77lmtsbGxEX9perxcHKGBwYWrcscEIUkDquq5QeMjPI0zoTClRAT8tbzgchpPCswCAPe2KPce6UDDK+5BnB5XsG/Q8ewE97vsFfc6z8X7kFB0Jq0VKADod9pT9RS9LnpP0KfL6ADCkHRAiHo1Gev78uf7n//yfmZOuXAYdSDMvrqf8/YvkXDEcN/jfUsLAwSl7dX9/X9vb29ra2gqSSHogeSiaguEnr7dYLMaBEnd3d1F3gs3jOFMpmw7EPTkZVavVghySFBFPIlmASt+f/EN/4Yizf5B/7gHZ4H9vTcXenk6n0RsYxw1HbHV1NVIbmVPfn9wbB23c3d3p2bNnoR84Ac5Bclr059EUCiGxN2/evAlHMiW6fK39tdSmplH4dHwQoPqGSIGIK33fHE+BV1eYfi2flGLxoa/Y8+fPg9UsFB7yQAjz/cmf/EmEqX7/+99HFZmHVQFjLAwLwSR6+AdWIM1NgqqntQOKkcnFY0OZcv+wm4R+rq6uokE0YHo4HGplZSWOVMOr9lwTZ/8QFJQmQBjhbLfb+uKLL/TFF19kUhMA/nye+y8WixG6SMOeeV65s4ofWudUFj7ncAbNUzf874w01MTPLg98Bkfj+fPncXwsa4NRJneTk8NQVpPJwykegAJYFVJSCoVCnCKGMuN7YXTY5Hjc5OYxvFCQ50A+kac0gd5zV70xP5X1Ozs7oZg991uag17mBoY/BfjeMzgNtc5mMx0eHmpjYyPADqC0Wq2q1+sF8CW1BweS73cwx9qnzLevNUqdvznY+9zD92PKrHm6Cc/qoWB3TvgHW12tVtVoNEIX0eAcMOXhQEmR4sEcEwKUFGt9enoaQAyA6I4390OY03NTWT8HnYQpkTV0vxcsudPl6S4ATgcEzrC7DoM1c+Puhtt7oWLnzs7OtL+/n+np++bNm7gukTr+xu+wquPxOIplNzY29ObNm4iOpWDJ58/tQPqzO9mLMtK0BH8Ody5cBzspUKvV9MUXX+jg4CAAKgdF0I6SnH9kFiBJPcBgMNDOzk7mEBzm1COlrhfQ3WAP6UEfAmw9aoBdwdaSs8/hIeg7fy5k2JvmI+ekDDJXOIbkPs9m85Oe0nUHe3hUgv3Y6XRULpeDTPF1GQwGajab0ee6Xq+H0wkoBQeRX40ztbq6qmfPnmV0q8sww+XU5ZbxMbn9pJOk8oTJvyCPefCNlL7G4nBtz8GoVCra29vLhHlog3B9fR35ExRZ4Bng5UrK5KhSaenAEsDm50GjeClGQXFRcAUQRWmhgLhHru3KArBBeI2jWvHmybGjAt/BBtdCifJ3PHjyXUhR2NnZCdaDjeuMQbH4cPY5xQ3OMKXrlxpsFLC/lgLbFAAsymDtnwo55P2cMpEoIml+BCmvHx8fZ1pDEeqRFJv4+vpal5eXsR9YR+SdAiqAGSErBkYYhsfzlV3Ze16qGzm/X9YJheusFo7aYDDI9N31PC/AKfLoUQbkF4WZygL7nO/iNK16vR6hN0AUzwfjOh6P1e12tbm5qdPT00xPQDfsPlJw5+ub/u6KdhGGP1e695wYyGNXfV+z5uvr63r+/LlevXql3d3dMFD06aTR+XA4VKHwUPhzfX0dEYA0dQPjypxhjHE8cLKkLHhA5jwfnhCqhyqRZUKuyBQgx6NiyDwn7/gc+Fp7YQ25zgBi3kseNZXMPCd2hs/Cru7u7qrf74ctef/+fdwL+xSb4ik2OFmEkNmv7lgxXP/yeX+2RZFb7tMdVJdb5CEvDQD9VCwWo3MJB3UQ0m6329rd3Q0d4zanUCio1Wrp5uZG33//vfb39yNyU61WHx1kgtz4PvK2Y6wDKUzYY2Tfscbd3cPxn3RmACDioPCcXJcWfZJC70nzoifkhnliPtDVnovtoA9njNC8R1i//PLLYJYpdIUomE6nOjs7i9A+8ki6lRN0ML2Xl5c6OTnR+/fv43udRWWumE9fbwfWHwOoH0UTDlKcRnbjw8ShOJ9C1A4UuEmM79HRUbBJKCBYqFKpFCckvXv3LsI9pVIpk6vK90+nDxVp9EHD8/LNgTHlPsbjcXgrXoFHaIfv9JCUh6KYcGmuCAuFQoS2KpVKnHTF4NkpYHAWyOeXVj8YcPL1VlZWIu90OBxGtSkbw/O/isVigHWuDUjJU3ypEvRNlo7US1qE4fckzQ1WCkT8Pa5cXJZ90+Hx7u/v6ze/+U3IaqlUUqvVeqSEkZ9utxvAzq/jioyjQ9vtdobpxGDS3BnGlfVD0eIoEWJC+bA/Hcixt/D8mRPymjAChJGQYfZOr9eL6yJf5C86OOAZmN+3b9+GnOJc7e7uxu/knMMWwJbgEM5mM33xxRfBZKUOiK93KsPcAwNjmgdmPvdI9yTy6KHtvOdyg49cVKtVvXr1Svv7+yGXrHmtVotCFPYMx8gCTnF+WRMcA5cZUlbQs67DyRMkBYYqYPQqOpF9ABNFrjPgT8oeOVkszk8lIwcW2YX5Qf7JW2YfeK9XQChFpcyvy9Z0OtX19bXOzs5iTp89e6bV1VVtbW3F0ZTeYxUGGkfAO8Dc39/HqVypjnVWN5UHd6BTAPi5Rx6xIc1rP9J/fMZtEhXkX375pb744osggcrlcpyERk4mZM/t7W1U8b97906TyUQvXrwIuSJc78yuh6XR04A6ogVpOhNAF/kiUoq8AvgYgF70nxcysc5+QBEROPJAub9+v6/p9KEP6u7ubqQC8nwuN16Pw/ccHx9rNnvoTCApQCdkBCkQ9Ld28D8ajYLsY0/yGoWsqX3x53e5cPsKOP0YXvgkBtUnIe9nZ0P95txLSdlUrk1uTrFYjDNe/f2j0SiAY6PR0Js3b3RxcaHBYBDKjZC3pAhv3tzc6P379xoMBtrf39f+/n4IC4sIU4pSQ4hZRAwmlDd9zvBMOM0BY07oFsUIE+UJ3IBYDhBAKD28Lz14QaPRKPJP7u/vdXZ2ph9//DEKxsrlcrQv4VxtqqFvbm50fn6uo6MjlUql8NTevHkTbX6oZPX1wMvztWdtnZnxHB4+l8dUfq6RypuDM2cd88A4ii8FuJ4rxHVqtZo6nU4oFb8eig9WCjn2/CnAAUegokQw8ITMkT8/Y9kVJeuD/HJ/qTPiyhI5RnaRm+vr6wjpc92trS399re/1cnJid69excVtnye53cGCxm5v384LAIZPTw8jApv5vfdu3eR83pzcxNFMczN+fm5vvjiC0kPe4fk/NSRSI2fryvD19XlYFFGqtClrD5NIzX83Vk2Rqn0UE1+eHgYTs3XX38d+hY9hhHHsDG/1Wo12FGYUQwc4AH54To4OeSe4mSwPmk4H11G1IxoAc5Q+lyEzEmlIeULw8eeYl48woUjnx7M4t+Hrj4/Pw/gC6u0u7urer0eew1bApA4Pj4Ox5PTziA6AOAAoS+++CIOa3GdxP14eNjl+6no1ecebhfydGyamuCv8XnAf71eV6FQiAJjQvn/+3//7zjQp9lsamdnJ+xsp9PRt99+GwWVfqS5k0eMlJwBtHr0krZQ3qsWmaFlE8wkzwWTTyGi51pzXzh519fX2t/fjxaYAGTPs8XOwGL6/XCv3pMdezAYDLS3t6dCoaCTk5OovEdu7+7uou7nT//0TyUp+goTUUH3s68gEbe3t3V4eBhO2IfAZqrL3Pn7mOx+EKC6YnDD6687Rf4U64YA+P8OeDyMSt6Ee18wSnjDhNpRXii2QqEQuXFQ6QBGFBbeQgpeyA/hHrknDDUeN8wUXreHUmFhJYXyXV1dDaVN8QnMEQAVZojvSxUQoYZer6fNzU198cUX0fbll7/8ZYaWd2ag3W5Hb1XCUrB/19fXGcPuI/VuUxYd4OOhMQf2izDcyXE5lPIBS/qzKy7/O2v2/Plzffnll5lrIZuFwvzc8NvbW52engaz4qclYTw9RAnjyBnfvJ+WN864eoEIHrankDhbzx7wteR+WTM86uFwqHa7nWm4v7Gxobdv3+qnn36K1m3VajWcNwrvUmeUOSiXH46EfP36dRhljH+3283kNLGfZ7NZOHBbW1uazWbq9/vq9/va29vLHDjhw9edNfR7ytNXi8RESco18NJjxt+fy1MaHMTitAIAnj9/HikWtVotwteErTnGE4DJunqKkx/TTKrHdDqNNoFesYxxxpDlsaGkQgEmOGmH7+E7ZrNZ5N7DWMHwT6fT6DiRAj2eF5BAQYpH6gDJzvhBHrx7907/5b/8F9Xr9fguABks0vPnz0N+S6VSgAMnH9DxgByPckiPmVIf6Tq7DCyC3sWGuRwy8vRvHhs8mUx0fHwc1eKAsqurK41GI718+TJOIjs/P9f333+v4+PjyJ0fjUaZwzyc/PHvRpawZdwD9huQiHN2d3cXOhHQSo0LXSzSw1mIuIJrIB0AiLyPw4lIo6HHtEfykA9ngF0OPBXQSbV2ux3pWdvb2/Heq6urmBsicml+OMV+6Twxh99//31mv/vADnjk0vHCp+raT2rUzxcARD4GWlNmLe9meNhKpaJGoxFNtzGaVEaS/8O/1dVVXVxcxCJjLAlFcna9JHW73RDUk5OTaFDbaDQyygK2EgWMAsFIOgvMQnGfCDQKjs+QD0pPvZOTk2hBRVXqxsaGNjc3M03LmT/of84bvry81Pb2tqbThyrQ+/t7vX79OsOmUT1dLj/0svzqq68i9wZlfXt7qzdv3mRSHlhLvtvXLRUmV4ju5ft6L8JwkIKCStl8l80UwKQOFT9vbm7qxYsX4XRQ0UtiPg3CMXok8wMGKASC6cdwAl43NzcjP65cLgdLiaKS5ik2KD5JsRcwiM6AouicnQIUuJPiXQZ4fTQaqdFohFP1n//zf9bbt28jWuBsA4qMIgEKGwEQr1+/jmblflSqV0ID3KkoBwTxjCsrK9rb29PXX3+dyfdlfVxe89bTHV9PTVoUuWWke85/dsWfGn5n22Cj9vf34wQo2D1y2AGJ6C9yNGEEcWAIpbLv0Xt0d8DZ6na74bh771o3fsgnAE1StBfEcffImss5hnA2m/dx5BnQg66fWGfkFeeRym/YL2QLe7K/vx/pZuhaHECYVi/W8wIUab5H2+22SqVStGnzwi0YYNYRW+M6OJWHlF13/fW5Rx7zn0dI5ckp+m19fV2/+c1v9OzZs3BULi8vdXV1pV/96lcBHPv9vur1esYedrvdWD9SkFICxjEMUQEn25AbSB8caTACqRykc9zf30ehNqkdOFWS4jtI02NvUYgFXmg0GlpdXc2E2NlvFI7inHFdz5cFs1Azg3PH/tna2grSBALi4OAgACj7CwxEFwTSy9D1AN+Li4uIDLK2bkO5N4aTf/8RIuCTAGoqcO7duxDmsXHuTaWfYzFfvHiho6OjR+F3WElJkYNKOMcVEowgQke+E9Q/Hi+GH1bKny8Ndbr3T2gIyh6l6gwY78WbZoFo59LpdB6dyACQ7PV6ajabscgIHxumUqnol7/8ZRzlRrL4wcGBCoVCACAMDBuKUDQHA8A+TadTHR8f6+zs7BGL5OudMk0pJc9GdxZ9kQb37jLpDpb0uC0Pr/k1pHmV58HBQZxSRu84/s76oIxgpbgOgMBzllCGAMBisRj5coDTzc3NR31DeRYUrD+nM14YaAw3su4KmpCmK2C/J5rwTyYT/fjjjxH+JVxaqVRiT6FcASmAAPL7KMThOTiVCNCKAsYRbTQacdwgeWEAKvamG/d0/XyN8yIFi2TgfaSsP8P1qJQlDpyFgomv1Wr6sz/7M71+/TocWH8fcoYDc3FxEceaIo9EiwjXs4dgZWAx0TutVivW2vNMvcrf5c8dR3QncosdQE4xnhh2GtwDQMn5lBShSUAt+aqcK44u5+/Mt4fjC4VCFOykKU7D4VC9Xk/7+/txDwAc7uH29jZz2o4TGfy+trYWKRC+nsiBr32qoxeBOU1Hutfy9Ck/u+yWSiXt7OwEYKWy/f7+PtpO0ke6WCxGE/z379+H3d7Z2dHOzo7evXsnSbG+ACvY1xS7cB+ASPraekQKZ8LzrKlv8Xxn5J696kAU7MCeYU96moo70bDAXgcjKXBNikEoyPbUBPYBRMr79+8jjcpJCwCus8ySIkrCOrHP0e+eC5s6/OnvqU77GGb4pDZTeR5I+iUp+HQF6tfxTUXrAs+lkxRFFLCK5XL5UbsT2ifQLqXf74eQ3N3NT+Ghwm44HIZiooHvbPYQQiSshdJ2A4sQkDPligFAi5BRrUqoixY93333XSQVk9cynT7kecEUMDAUMBYwYMwvPQyPjo4kPQj627dvA8wQjpXmuZTuaREyxTilCsPvg//zvPeUcfTPLOJIQXcarvD3eDjCASwAqVKphBcKg0oBEQCNgiccB7xZwo+ejzQajcKhaDQaury8DK9ZUsbDRb4lZRo4k9cmZatBWWNn3ZAL5BaWCGagXH5ojF6v1zMNm9+8eRNH8v78889xMhZ5duTauZJjj2xtbYWj52uwsbGh6+vrTINolCVzxWvIF+wsBqfX68V6uUPA/6leygOrrPUijHRP5rEUHvZzGXXA4mxGvV6P02KKxWI4Ixhi3ktK1WQyCdaFXHjkjdCn9NCXstPpZApI0Zse1vdwPrKIPvXUEGwEMu1OGKysX89ZODeG6Hv2OSw+r8HqpmQH7NNwOIw582gRzBipK+S7Amy4B/QxkRYiKLBko9EogBWdFFKn5Cknxf+Wkgafc7ispiBFepy2wmtOWFWr1ajYf/nyZaw38yk9kD3kT3sEaDQaBaZw4JW24mM9nVRJQ/739/fRcxU80mg0IvLAwTg4PR4h4BoOvl22+T6iWB4hwy6wJ9jPOFG8x+tanL2F6cTGUPzF5yh0or0lc4HedufJCTdsDjUQOzs7EQV0fYpecvua2li3rx9zsD6pUX+q9PNQcJ5nlMew8nfCLy9evAiEDyhdW1uLPAoAIkaaRe52u7q4uNDd3V2EJCVF8Y+DScI3TDjtlpyhxYvhMx6WkhQgBGM9m83i1B9XEiguQvv1el2tVivunX8kLNOXcDweq9frhbdESB7QTjEOoU8YLw4B2NnZCY8SZg5hoTgH5uHu7k6Xl5fhGPBdHq739XanIs/jcblYFGWZMqKpksx7rzNxqaEAQNK4/tmzZ2o0GmGw3RBxLRLOx+Nx9OZFAdE6DUXA/8w11aHtdjv2BO9xZQbbhDJmvfHyYZBQ0HyG0A4AFVnAQHO0MIVN4/FYBwcHUemJU0jYx9ut+TyenZ3p7OxML168CFDvrXzevXunarUaxWB8Hk8dQ0Je7srKitrtduRWYcBSg8C6SfMTYFwZukynDOznHl6BKyl3P6ZscZ4xoCBtY2NDGxsbATylbMsfz9Ejvx4WZjabRW4oYBP9gk48PDzUZDKJkDfrhpF0Gee+ARCS4lTA0WgUzrPvV+QbNpXXeQYMsOeveuSMXqN8bnV1NVOdDBNPPQDO4XQ6zRSS4ngRSSACCEDg82nBlTRvw+UhZGfB8nSn/46sevRkUXQt40Mg+6nohpMFzWZTz58/19/8zd9ob28vopZEpgaDga6vrx8x4+geJ5xoxYhe5LvdwUudGs8t5rQqwCMEBVFccAU6mz2RPiOfxYFBf3vE1nNGKR5FniDbnGDwtBcIPk9PAVTyt16vF3sZnU30FaArKfJOIVTIUYV4Ya45Pa7X60nSo+fmd/awM+SpHPxfAVT/Ehc2HyiKPCrXBTb9LA9PInS73db+/r6azWZUhuKF025EUiTIr6+vq9/vR5IvuX6DwSBad7jC5h47nY4uLi7UbDZVLpcjIRkBZ4IRKBQpFD0hCIwelbFekIIAra+v6+TkRJ1OR81mM/JSEb6trS1tb29rd3c3lLVX7wEmDg8PM/MGQKZhvBdWkdfoVawAfwqr/JSXNKSUMk2MPLbR1zUPuH7O4Rtaym4WhisPf18alpfm1dCNRkMHBweRcsIm73Q64ZywGWnkv7LycK70v/3bv2ltbS1O9AGYdbvd8MJhGCVFp4rZbBbsgjMBhOAxdIA5QAb3j+L10K+z9NK8qv/169cBxn/44QfV6/UwEGdnZ5FCg3LieN5GoxGV93z33d2dzs/Pgwmj1dZk8nBEXqFQ0JdffplRvA5iYAJwtgDlAAO6Dfj6pgxOng76kCwswshLQ2EwR089b2oMtre3tbe3p5WVlWCeJEU+JSCMde10OsFqkg9K83EHiORRo8sODg6i3Y87agBTZ7r8d0nBlkrzPsOAxjRX2BkaDmPB4T84OIjTytBpDsipQIaI4LMuJwAaCA0MNuvS6/X0008/aXNzM04pLBQKmSOokVXPn+aaPAMV4J5S5pEFZ8x5FtdLbuA/xLT+MYeDaJfFD9mI9HXypRuNRtgsnpE1xC6jv2BM0W04yuwBopw+vymTx/27AzAYDB4xkMgjutnD98ibpx6lDKqUzUMGO7AHydGndWSn0wn5QK/DhLo+9yJV9gg4AlwECN7e3g5QjNPlMss/nE2KWnnfzc2NLi4u1Ol0ohjMHcY83cTreTr5Q+OjDGrqmT+lEBkoD190fz+CQc/E2WymV69e6dWrV5IU+WkoESbfKy0vLy/DY/BjvAqFQhyB6sqMyQHsXlxcRHK/5xs5tX1zcxNgUlJ4P+/evdPh4WEIc7fbDWFDARL6+fbbb/WHP/whwhbS/KjK7e1t1Wq1yFGVFJXbW1tb0ZcMQNnr9QLQUzxQLBaDbSZhnPsHFBQK80a/hJgJIaMc3ctNFZ6vL3Pq8pB6QIvCQvn6p+yp/8vz9v0a7rTs7Ozo6OhIx8fHYUS5brVaVbfbVbvdDm/VZeft27eR94MH7S0/er2ebm5uos0UDIv0sCcuLy9DeRHKkeZH4DroJM+Va1QqlWCRPBQJwOa59/f3VS7Pzwm/vb3Vs2fPwiBg1Klc5RrkT7Nf6XV5cnKi+/uHgzb6/X70jNzc3AyWAnYU+fO18nwtmF3pAUy3Wq1g3pgHX3fkNmVvUsPpjPoiGHlGyvQzT55y5IxqqqMxdl999VWAJkgBPg/AkxRteqQH2eP88vPzc7Xb7XDMMZaEJ2F8Go2GKpWKLi4udHFxoX6/H0YRhh6Q5gCVe0Wuub/UeME8AggxmhzywFG79/f3Oj09DeA9Go20vr6eafBOQaOH+JEt9P5sNovrHB4e6u7uTv/yL/+iwWCgzc3NyLHlaFT2khtj5NkZNPZnt9uNU7KYD5dZZ8j4G2yeR7sWzbHysLSU3z87XVdnNUnD43AObCuRQ56bnF0Kod+9e5epiqcOwGUKbOI6w51iXicHP71PvhfnAmIAO8pa+nMzF0Q20/C/96zGaazVatEL2qNpEGUcAOQsLmk8ANvxeN6iENtPBxrScyAACdszTwBvZM11Ks/X6XR0enoaNQOui1weUsIyZU4/hhc+KQc178uleR6Fe3dsHvf2UkBTKMyPynr9+nUoGWkegnQD6HlNKN5OpxNCAks4GAwiHwoQkIItWj3927/9m+r1eni1gOVerxdCgAB6z7FCoaDvvvsuDCiLvbGxEb3BAJWXl5cRWuv1eiGcMGeFQiG+C7ZDUrT22dra0tXVVRhy2FNX1hgthIj/8TxRvHhlfoZ5Xsgz9Xg8/JcnaL7G/H1RRir8rgilx/3Z0t/9fSsrKzo8PFSz2VSz2Qz2BCUDw8rpSJ5fvba2pu3t7cj5gfl31kWaHzOK8kF+UQLkNFOUhMIm9w9FhZeLfMBWSoo80dlsFsYAp6nZbEZe1du3b/XVV19l5sFzmlFWyB/sLnuy1WpFKxPSbmjIz5wdHR0FkMCQMBcUEHB/vpadTidYKA/L+dq5Ykz1WJ7SdBn+3CNlT/kdh8hBZh57Bpjd2NiIsHNerrs7NuSmzWazcKQuLy91cXERPSnRQQ5EPGeOcD+G3PMtcabJq0bv+r2Rg0wKSLoe7DPuHdDNaWw4Tx6CJV0FEOmV0Z7eNZs9VGtfXV0FIMChb7fb0SpuZ2dHx8fHoYsZAGxkzVk8/u5OWKfTiXQzlz2XUQd2qcz6WBRSwMEmw22JD7fJfFZ60HHkR7KOHgGC2UZP4byj1+iSwtoiZ64j+N50nzlm8IgoIJT0JmSMKCf7jT3pea9gEZwzvpvXHNTyOtgDcIrehWnt9XrhpPF9kgKMeicM9DIROO/VPZ0+dDfiNMtisRgpjEQAZ7NZ4BP2rvSwF4+Pj/X3f//3mdQ0l+M8oiDvfR8an9QHNY+mT71gRurV+7VYCBaGnMnRaKRutxvsEkbdvRHYKEKmKEfCL4BamMoUoTujNplMonDKAXTqSaWhNITWgTnhKYSbs9Y5dKDT6UQaAfdNnil5eBSlcN94+uPxOJKxaUHEs5IgTb4UuYIIq7OpZ2dn2t7eVrlc1tXVVST2E1ZIjTuf903LHPhwWUgV6uceecr9qU3h3q6vN0YFh0Oayz0MqrMbpVIpjuJF0aGoms2m+v1+HHtIVwcvchoMBpnTppA5PN3xeKx+vx8gFcDBXigUCgFm+/1+gGFpDkIrlUq0GpIUoBkZurq60rfffquXL1+qXq+r2+1G5f1sNsukFFDERB899hD7e3t7O4pTYHMBm96L2J1QjDyKm5Aa8grYeffuXaRGpDoqZbFSmfS/e5RlUUYaKuU111P+TK6n+ZloC/0NR6ORdnd3MzJNGB52fX9/X/1+X+fn53GaFC2Z3OjznR6G536ReRxz7hkZ5m9UGa+vr2dkEMfdi1hYW0ArYUWYeBigwWCg09PT+AygFIDBHvLcb+aqUqmo2WxG03RaXq2srMQ8HBwcxH37iWnYAfK/fR08okgdAC2K3r59m0npSZ2olAxIjT8/L5rOzYtO+HtSQkBSyB/5zGALiqOxZ7wObiiVStrd3c1EG4vFh+JLj4D6HLpcScrgAqKvbusdnFJcCIhM7wlyCUAoze01kVR0NYwmunk2m4V8wLQDGiHiYPdhWrFDRDLAUFIWvxB5cIKG/TAYDCKlgmjLdDrVcDjU1dVV5iQusAgHrzgLneIBJwZ8T+Tp5KfGRxnUpwSRv6dMKYvlwpgajbW1NTWbTf32t7/V9va2nj17pkKhEHltPPBoNAql5Mb18vJS0kPvPKr5qWYjNQAlh/B56AjwBzj4EMvimwx2mOvyzA4eYC329/d1fn4e+VsY2Xq9rlqtFpvI2YI0147WWTBwzNHa2pqurq5ULBajN2qhMM+pItwME7Kzs6Nut6vBYBAnc1xcXMRzph5vqjCZf4ybr3eqlBaFhZIe56Gm9+kym/dZad7PcH9/X8fHx5LmbAgeLswhn4NBceNMuw/STNjQKLHZbJbpp4dCQVYJsd/d3enq6ioMNffvoVrANfsIZpJ7owUOKQR4xyTjv379Ovo6rq6u6urqKu6JpH1JIc8cvzqdTtXpdKI1Fc/rVaeSoqjMgSHev0clAPeFwkPqDmHks7OzAPhpeDRlo1Ijn4bgUhZlEYYr9BRsI6v+XKy1K38Y+mazqa+++ipOhoGNwvHxa+E8zGazOMjDz+fmuwBk6Cwp25ex0WioWq3q/fv3IVMAAAqh1tbWonCwUCiEc03qFuFdDDlpTcwBehH9/ubNG/3www+SHsLDa2trUd8AePDIkrfsgSnd3d2NHFacxvv7e/X7fTWbzUjFqdfrmf3J9bEFXtDI/Xl3i+l0qtPT0wjJtlqteJ/L6YcIghSYLoLeTfdWuh+dpHKswBpvb29rZ2dH+/v7wZbCpGLDPS8SGcdJrdfrEQ1gbRisuZS1eek9Uy9AGB0SATxCehP3xL3j5NDnnBRGZAgHnX1NRAF5R7dzH16Zz/3BZBaLxeidjt6GnaXNGrrZGV2iVRR78Rk60OBEeqE6h6cgv6VSKSKA2D4vYEz/ce/880KpT5HZjxZJpawS3oUjcX9vntfkAolHjiL7xS9+EYYIYaCIYjyeN0Am8Z7JQ5nW63VdX1/r7u4u8ld9U7iid+SeeqPcN0CM4ZPJ590r9vw5vq9Sqej6+jqOJIVuJ1cUxgCmCO+fz5IDy8kVhEa5Hw4i2N3dDSVQLpcjgXwwGKjVaumbb75Rt9vVn/3Zn+ng4ECNRiPCDnilPFs6L/78riT9vciDP/sijVR2nwo/SfmAhc+vrq5qb28vitpOTk50cHCQkQkY0I2NDdVqtcgDpkUJjDfglPAJSvP8/DwcL4BjKls0/oeR99AroFOan2lOLqrvU2SO+8Nwb21t6fnz53r58mV0pvC+fyh7FDdyRN+92WwWCfn05eX7kWmq7qfTaXyHzz1sABW0sF0caQgIgM2DYUA2U5CWOpluNFn/1PlclMH9pgbd//6Ug4g8VqtV7e3t6fnz58E24TBJc6aW/U1IkM/DhOP8OMBynSDNe0NihAAMh4eHury8DINI30h6UuMEeZ9fwDIOi4cdna0hzYu8V4CvA2sPgaK3sTPIDgDD062IMvV6Pa2ursaBKh5JkbIpbT5X7EHmELs5mUwyvX7T07XS9Xcmkt89B5WR52T/sYcDUGlemOyyyfpy/+xRyCfsIetfKMxTUZA/Pk/E5fT0VOPxWN99952urq6ivoQ5cXnHuUImXJ4coBLCpzUkryG76EBYUIip7e3taKnHv9vb2yATwBfo8s3NzTiRDyaVmhpqbwDIAEpAOSwx+pl5gUXGIfSiP9Jh2L+sEd/vrQIhNsBQ4/E4jlV3/eBkSB6OyCOGXDd/aHy0SCplFl1R+oZJWSn+zk34tWh78s0338SRZt1uN3rUDYfDaLGA98Hk47Wsra1lJhWPnx52/gx4DyihtHCGBQAcp0AlNQQsIIKDQcbLIrRJmKjRaGhtbS3Oqp5M5s3XeQ6/PpuInCcAbavVisq+3/zmN+HpezipVCoFM0tHBP/e4XCo09PTyO/xZ/Q58+dO/5bOR2o4F2H4+qaKMg2h+vNIWbn199ze3uo3v/lNhOIJIbIGtGVCObozQ5icVj8YsI2NDfX7/czpTChUPosSWFlZyYQEOaksVfYAS5QgSmtlZSUYK1oH4U1vb2/r6OgoQCJtyChIJFxGWB32Ak/d2S/mjHmoVqva2NiIPYYCd4XN/RMK4zvevHmjv/3bv9Wf//mf68WLFyqVSjo/P8+k4jgw9d/z5Jfhzom/fxEG95zHmPpwhtyfHZ3IqXkwHXzenVNpfmgIclSr1XR0dBQOR6lUihPGnAFKw5Kw7JLCEdna2oqUJwyxt7viVEBIBwo8VldXI11KUgBYgIGPWq2mg4ODALr05U0ZO+7LnbmVlZU4ZYvagPv7+3DEeE+tVgv75Kk1rJPnIHY6nehuQc9YP5aXzh0//vhj7PnUuKPPUxZdeqy3FkVuXQ+l9+wy/ZSjuL6+HsXQpLPhxMNIE/InRxJGn1Z43W43UpjQIXlRXp9LaZ4rDKaAUPB9gpxCDBBiR27ovQuIRUd6PjLP460oAczsJQqiPL0BsMipfuwf0nSQMQgq9gv6laIpTuSEwWWPpGkGbj+ZN64FI/v8+XP93d/9XegE/wzznUeC5SFiYtUAAQAASURBVMnHU+ODANWVfMqMpsLFJsvz6l0oPf/09evXGo1G+vHHH6MvpPTgWTYajSgO4mSoi4uLWEi8/fPz8wgVUa0O6EMYaYeShl3wClwIAQgAb59Qngc20xcBRgrvHQWDJ8KmAxz4cWgsqued4FlXKpXI5VpbW9P+/r6Ojo6CcXUPhnuGiYVlPjw81M3NjU5PT9Xr9XR+fp6b2vApQsOcpnO2SAbeRwpaGHn36iyxF6K4ceIceMIlrDOfhRnCuCEDlUolDCPpKMjX5eVlFK5xz6wrewo5ghHlH8ARRey52+VyOcAen4chY06cucIxvLq6ilAlaSuwwdfX1/E8ACAS8ZEnThxqt9sBUL3AhT3qIVs3WHj79/f3+s1vfqNms6mzszNJD4d7XFxc6OzsLMB66lS64U5fT//O7+64fO7h+jJvf0pZBywF4xhSZKRQKESeP8YXsIcxggQgvxI5W1tbU6vVUr/fD51BhADAhVGVFOyQv5em3sgRB4kUiw9HgVI3QG9r9BrvYT/AVmGsCavCauIsOmPENfwkHyclUnaOo09hSzmSF2aVNl0pYUM+62w2i/1A6POHH37QP/7jP+rP//zP1Wg0onsMdkJ6HHZOZRFZcN2bslKfe+RhgrzXGR7iR87/+q//Osgp5FdSMNm1Wi3yMJFj1qBer+vVq1ehEwlZp6SalNX1/g+5oBuPNM8d3tjYiMNacM5JV8Kmkp+K7DmTC6nFfoNVpYYFYOugEkaZaAAdfejXSzQUYo5CW/Zjr9fT3t6etre3owvM5eWldnZ2onAbeWXPecs3SZkDY4bDoVZWVqIrhrPaefLosoDT8B8hBD6pzZSUzUdNFztPANNwKu+j0k56OIXk+Pg4PB+MO4oSwEfoxQsmCFNSeby+vh4LQGjA79tzMDxEhXfiIDw1BM4+ON3N79wTQGZjYyPYVelBwGnIT/N+BIjcVc5qxpshmZrDADiOkLmnYtzBTMokMcjVu7y81M8//6yrq6tHFax5xs/Xn9fzDD7vAdQtwkg9uryfpSwYzZNXZAMWHOcIth3D126340QkwjrINGsKOIDFxJF6//59eNnch4MuGFc/SIIcIDY6Ch0HEEdsdXX1URNpwB+AF4eLiuJut6vV1VW9f/8+8/1EJogKwCT1er046YXK1pOTk3hmjLGzyuPx/JQSz9GW5nsA5u/Xv/51AADCeZ7jm7IxKaPsBsr1UsrkLMpIjbaUXwyT3rc70hhRSVEoBfBk3qbTaaQSNZvNeC/AAJ1BZ4lCIXvCHo43xZroUxx2TuGBPICRhdlHhtkjtETj/jHOOFc4Rt7NgPQSZAdw6r1VkYd+vx+5+by2vr4eBa0UtUqK4r6VlZWIMtRqtSj0Y33cgSwUCgHM2aNra2s6Pj5Wv9/X3/7t3+oXv/hFMGc4bRAm7pjk6Vm3TylQXYTh5EW6r3gmbCf6yEH97u5uOB2cDkcB0Gw2C2aRuZfmnS16vZ62t7dVr9fDOQZnpLmnrJlHH2azeVuvUqkUa849oo+RCYDhcDiMo59ns1nmUAh3iHztKHDFieFzRL6cQCAqBZBl3+H8oJt5Bs+BJq2Q6+Jw1mq1OGp7Z2cn1oBr817+R05x3O7uHo5EPj8/fwT+8+ws8846PJW2lDc+moPqAvYh5gGh9IXIYwAIN75+/TqTq4Gxp4ItBV4oMBLoMb6EgzixaWNjI4TPi1lIUsbz5hrupfEsHlrh3lGSvjH4G9V7NHLnu2Cn8Mbpm0eRFEwpm4tr0ieP3n4Iplc2A3BgOvibND89yBXe5eWl7u7u9O7du2gnk2f08rzfPG+Yn33DL9LgufECfd78Paknx/q7ksA4HhwcBIPCNQGAHF8L2HNHgfWmXySe8s3Njc7OznR+fh6fSe9lNptFDitNwzGEvje4H4Ausu5gF+PA9zNHXPfq6krl8sPhFZubm+HNk4JzdXUVe42uErBBKCDyxwkzw5ilXTlgWf20lpRNBRQzf9PpNBrBw2zlAVJXjqm8pvLx1D74nCN1/NKf84YremTtxYsXAbhg0guFQoBCCpgkBTh0Q4XzxTVwNogwobvQNdQAODvFNdC3gF3ICHcWyGeeTObV8FI2CuJyy/V5H3vQuw14/ikHTAyHw9DLyBV7ADtE6BWjD4M7Ho8jnQxChHuXFHoYBg/A/xd/8Rd69+5dHNpSLBbjOr7uTgqk6+/zkJIQizDcCU73mpQfYsf+U9iG/uAaHuIeDofBDkIqAPan06murq7iAA9JAWhxdNCFDqLS+0f+3elwokBSOBM4ZVzHi7KcBfXPALB5jRMhwT0pCHWWnQG+YF74rKRw6tHr+/v7ke/PvXA613A4jNQrfw6wF5hrNpvp7OxMx8fHoeevr6/VaDQCBDvWc1nNIw9cT39Mr33ySVIOPPEO80Jr6Wdd+WMkS6WH5t70ivR8CPL7vLlsoVCIsD+L4GFMwAD5fCgOzzUqFAqRK+qJ7T6RPKPnE/n/ns8Ck0B+3c7OTmw0FoE8PwfStOShn9poNNLBwUEI0HQ61cXFhXq9nl69ehXzg7ADrt2I++lFCBWbQZqHmfv9vt6/f/+o+MYF6SnQ6j/7Z92jTwHW5xz+LNyfpIzMMl/+TGnCNw7I69evdXh4GMbKm8hLD/NyeHiofr8fckaoCtbIez7Sb/Hk5ORR2MP3FIYO2UBRujJxGfYWOtwr8gFrQ2icE0kI649Go2h4jeyUSqUo+EP50WcPtgNDTxFBoTDvU8nzEi5jPlHUKFf2uMshz0ELtaurK52dnen3v/99MGcAH57f93TqWKWG04Htosit9Pi+U6YhjxFO739lZUXPnz/XixcvMnOKHLlu9fwznAfm31vxkZZE9KfT6UTOKAUZft8YdOTBHSXvqej5yzCosEKEazGuMKUwwsg4zhQ6EdaIuaTYxfu2eu9dUgdw9rlnz2elAwHMNHPJPaGDHcR7K64XL17o6uoq9iL9gR14+vB1S8HdU/ric4/0nlJbktoIbNPh4aG++uqrKIqU5j3RyZ8EYOJIAdBIXTo9PQ3HmoJqWEoHhm6/0J/IKvfoaVvoFH8e7KunESBb/J3P8QywpThWpKGwn4rFhxx92Ft+9xZ+6E/+ViwW4+Qt0hBgdyWFfgf8QxpQm5DiBnCSp43d3NzEwRL/6T/9J1UqFe3u7urf//3fY++7HvKfU0zhoDQvyp6OT8pBZfhGSA1jHiPhitSFA2+JyfFJIoRDAcjt7a06nY7Ozs6isAjlxucIlXoo0pOoGShGPHTP53PPKg9Uwwih0AHVLD4eOUzRzs5OJEJjCKjIL5Uemrrj4bEZ8Mh6vV4oQenx6Vxe0MBGGQwGoeQBB2xe2mJ8++232tzc1Nu3b59UHqnHw2DDuUy4x7hI4JSB/D3FNqTG3VMkPAdVUjhOOCZcn8FmLhQK0eYDgLW2tqZutxtt01CenB5FSJ7rOEBzReThQDfU3IsD09TowT4BPCkAIMkfLx4G2PffYDCIDgN8J3sAcMrnccQwCpxixbwhnzwL90nVLEU5/J3CF/IayaPsdrsxL76eriAdLElzfcFrvt8XSXZdmfvedwYC2XTigM8UCoVw0pFDQs+EJ7mm/+ysH50SCIdTGIEu7HQ6ur6+zgAxnH90OfLL31zOYSTdYOGwcF8poEjZWf7GmqNPeV53jGB+uA/Ye2SVfcbzeC4rz4KtAvQ4+8y58czfZPJQUEhRC585OjpSv9/XN998E2HaPJvpxp15lLJGfdHkNgXRDlLT16X5s5DWR5/jvb29WAvXtc7QunwRZaQFHWDQu9UgT+jNdP87mPR+ubTJ80Itd/BIQZCyB2n4vae6CKeFdBtvNeXFXZIiejadPhy96n1Oe71etCnEWRuNRtH67+rqKvQ9mAp5IgJGpI30M56VdSqXH3rSP3v2TP/yL/+i3//+9/rNb34TxAvXc9KG+WSdXEenuPBj46M5qC58KVvqApeykennmezNzU39yZ/8SQaY4Rk560MVHxu7XC5rd3c3BIOzcFHiVABK8xYfKFqEhntFQLwYwzeTv9fzPygw2d3dzeQoNRqNOE4PwZxM5pX6vpFYdPJsXDnxt62trdiQFxcXIUxeBYin7gaZ+UAopQegcXl5qX/8x39UuVyOk7KeAm2+ds6ApCxq6h19zBP6Y49UQeYxxnmfSf+2tramV69eaXd3V5Ki76x7tJ7iwPzD6ODNI5OEp2j8jAymeVIYSmeAfZ85G5Uas5R18fnw8CN7DcWLZ498UZDw5s0bSQqmAseMtANSBjDqnvxPKxVnpQDbbqgovBmNRiHrUjY9gsM8JGVAuBtpTxNwhoC5yVvzT1WWf6yRMma8Jj1umeWAFZ2wtramly9fRrGGg0yXGeaY+cFQNZtNffPNN5G+AYCjrc39/b3Ozs7Ubrczxpi5Rg9Ljwt+iCJIyjBH0hws4yRBIKRsFPdKgQpAwbtIAFCc1eL7JEVLIzoHcK8YbNobEpmDnPAUMb4HNhYgiuNGGgv3xx7Adnl4NM9hSvWr/z39fxH0b6qP3C64o+yyzXNRu1EsFnV1dZVpheSfR96QA/QgR4N7f1LsNXLlhXdOXLnO5BAVroWjxvOQOyrNdTl2l04UyJDfNzLrjhbzAwj1NSZFkM9AviFLzpKSCuinwYGJSHshN5X6AXQw+ePYAV4nDYc0zPX1df3qV7/SxcWF2u22yuWynj17FsDaCazUMXkKR/rzPjU+ClDzlGTecMrcFSCLz8ZmUra2tsLIubAWi8WYNARsdXU12pRAQ8PYQFkDfllMn3C8eAwyjAyg2DcTLAMKF6XibXrIc5UUTXlpVt7r9YK9XV9fD8+CzxG64vp4ipPJJFplTCaTMMwIOowWyttDm7PZLAw8TCz3LSnm7/e//73a7XYu2PR1dAFKQ6Z5jORTIPBzjg/JagpE0/em4KZer+urr77S3t5eGCNJ0ZvXZQymCcMIi0K1MZWWeJ2+TlzH0wZ4XZqztM708s8dFX8+XvP3OQvFGnuuIAqYPNmbm5sw0hT5cXQr+xCni33M/uPUK67t0QqeRVIGJGMoeObJZKJOpxNdBGCs0jVLDX1q1FPni2dftOH3mcqmEwNpiIx529raUr1e197eXuYgCApP+J18S2kOHNCldG2gKMKNfbvd1uXlZcawodtx9lMHZDqdBgiRFJEFAC2OkaTYU55LCjPPz84ywcyn0QJsBfsNO0QEjXBos9mM+6KDBffL8dOA0Wq1qpOTE0kKUmQ6fUhF4PhjUmqQeUAqctfpdOK6kAnpnvVncblI2dOndNjnHn6vbhPSCADPsLGxoZcvX+rg4CAKhymU8zQId8LdIfDWddSpSPNoIyAtnVu/7mg0Uq/Xi0NA0Ecw/bD+jhWkeZU7TpHXITjjKilzX7CW0jx64U4j4JLfqfyHaaaQtFqtqt1uq1qtql6vZw4SoOsAxMl0Oo1uGcViUXt7e5HH6tECgLanqOzt7QW49daeKUni657HorpMeBQ2b3w0xJ+yUGwELxhCIFMP0I0A4UTY0MPDw1CerjwkRQ4fC0Y+KSCO9xYKhVAQhCI5iozJIWXAQ4swDZ6r6YCEQd6KN5QGVDebTU0mk2g9sbGxER4zIBe2CWDBd2P4Z7NZAGaE9+TkJIqqyH9i7gELnKHO51l8Uh1cqEjEPjs7iyMGqdbOEx6EjTV7ygNK5SNl7BZhpKCZn70rQ+rJpwAbg00rD5gaSRkZ9BzLm5ubUHRemY7cYYBJDyGU5PPtCtzl1ufeq/jdI+f7HID5GrN/nFWQ5mdO46VzXfoK0toNgEhY1fPDCoVCKGWYMphVwHceeAG0TqcPeY/NZjOzRisrDwd87O7u6vT0NKMYGena5RlzZ6SfYqgWYTjLIj1u5yc97t/qz9Lr9bS5uRkGP48N4hrMP+sCM7S5uRlMn6QoPrm4uAimxtcTYAiQ5J7I5Ww2myoUCur3+2ELaNyPo+6ywXAjD1NPSNdJEN8PDlKleeGHXxudzmsrKyva39+P66H3qWUYjUb6+eeftbm5GWQIaQLMsYMBWCjqIvie29tb/fDDD+r3+5n19fXMczZTxyS1wYsyXO+6bnUs4K8Bxtrttn7xi1+Efu10OgHykBHXlega1t4BEU4x8kndBu+H+UbH0V+c0ywhDzydg/XkdX8WJwOkOThFjpBJMAGfLxaLEenAKSsWi0FuuaOzvr4eDo/rqvF4rHq9HgCarjAUHPpcE7mm57W3XcNGsYbod+xeuVzW6empnj17FqDcayPy5IDX8+T0UwitT8pBheVML5yCEmdE0lAU76vX65pOp5FnCaOS/g8bhcLs9Xrq9/t68eJF0OmdTie8/dlsFh4HoJNKXwcJkjJ90mA02TwOBFxIocvJKQUkwrBJc28YYMxrnrPKs+FJcYQe/cs4T5dQAt4+mw5wS5GIzzHglPVBAO/u7nR4eKgff/wxKnJTLzA1CqnRSwFt+lkHgYswXOaecrJSMOefleYV/O410g8x3ZREBtxBKBYfqpZ/+OEHNZtNDYfDSLGg+IQ9g8Pk9+b5hxh9V8J5ToHLsodGuaYDcZ+LWq0WDZ5T9p3ixM3NzQCr0+k09pd/D0CWPUBYCh0Cw++pL9wP7J+kMPbSHOTQIsUdKDcQDlh9Tjz3MTWOvJdrLsJI789fl5T7/P53Dzuy/13WkFEHfrCfg8FAg8FA3333XTjHsDSwUL1eL5Oe5XPqFdNSNn++XC5H39Pp9OFYXArgyF/2tZKyfVBJS5EUMoRM4Th6D1bymsn3c5LBK51hz4iGTafTyCe9vb2NE/sk6ejoKJMnja2AhYLNLRQKcTABfWCZy52dHb19+zYX1Phzu+7KW+cPMVSfa+RF16THeECaP8fKyooODw81mUx0dXUVToKkzPG8novfarUy/cZJuSOM7X1CccC63W4wkZ5CBfDl88y5150gR6w3cg1+SJ+dz7Mf2CvYddbLUxj8e5kf5N1TWbgHnu3s7EyXl5cR4SK3lLaX3mEIsoXn5xAg8A7fe3d3p3a7HVFj5uXly5eRYklBOLrFHWqeyQu68xwXT7XIG5+Ug5oauPTvqWF4ChCQn1atViPHB++Ah5lMJtHg25UTRUd49N7ShLYgKBbyivAWPDeJ0CCeEMaWicTLwOAVi/NKZFrmeHU+tD6geDabxSlSs9ksvGdX8MzZ9vZ2eNitVktbW1va2trKhDC8PY+DXK6LYPD9sAuw0973jHZGbkBYs9SwpwaSeXIZ8PtaFGDK+JCRT5modOO4Z0+uDakaXpDHZyWFccVI4/HC+BM+6na7EQLE4YFdweP3efViI+lBYXsuXQq205BgCr4dwLJu4/E49g/fUSw+nFxG4QsyLymAAJELmCDaaBUKhehq4c4hQMKVo6fi8IzMH0aI56ALRb/ff8SWpcrR1zoved/f44B/EUa6puneZI3dyeS9XtRAb1wcK38fBhZ9ge7ius1mM06/wzAWiw+N9V2m3Di74YVBxDGB5er3+3EdPz7SC0aKxWLIIMaY4lbAGuwOLd7Q886W8VmOBsb2eFEqIH11dVX9fl+SouWfO1SSMp1nmCd3bHDMuAe+h3xXyJ7T01PV6/V4Txpq9nVKZdId7zzZWITBnKSklt+3OzQ4CoeHh+E4sfbSPDeYa5Nmhd4ghWJnZydwBMclo38o9EQGiArAeFM5T37wbJbtu4qtd3lHFtGlXtMiPe7KgO5B//v1uCZ7CJB6d3cXIBMd6sWMpOJw0ttsNtP29nYAYU6+4vmecmhTR+f+/l57e3vxHsc4t7e3EbH2+UhJIccLqb5Ina6nxicXSTFS5sEfzA2+U7vcYKVSCcOFUmBSJYXHOhwOtbOzExuYasx6va5Op6PBYBBH4ZH0e3FxEXkXMDuwtYAJBACvoVgshpIiDQDjzf1gRAm308dUmldCI+R4Ynho5CW5AQQkIzCwEYSNfO7dw3CGGSGGCQHAuEdIwYOkqBav1Wqq1+txgkTKMPoap+ufKpZUJhZxuHH0UKD0uHWWy7XLNyH5er2eOaGMgdPRbrcz4Z/7+3ttbGzo+Pg4nI6zs7NQgn7GN/foDoyzCzhDyJUXCPi+43489OTPh8xgRD1MjrOG1w+ji7KESfa8KZxEwAVpEBx9ms6vM+8e2eB+cSjZs+RsS4prY2D82VLjzkgNuT+vjzxH5nMOB838nmcApMdMG4a0Wq1qfX09k9fmhoKwIgaHMGS5/HDgyPPnzzWdTnV8fKx2u612ux1N7mHZPX9Vyjo/7ph4JbW3DqK4znNh/f3cM7LquhrdhyNeLpcjogZIkbJHQcJ8An6xPewpomI3NzfRPQIQsbm5+ShqxXD5k+Zn0DOXbiPX19f1+vVrvXv3TtLTFd9pgarr3VTG82T6c4yULeU1l980xF8qzZviv3nzRs+ePdPGxkaw36z3bDbvqYucI0fSvCvExsZGFB9Pp9NY43fv3qnf78fRu4VCIZyGPLbacYuzqth7dDwOnjRvmcX1PcrBdwBO0cXYJFKrPAecHGonEzwq4lGB7e3tYJiLxWLs/1TvIWukAPk+4DAVSREp8/l1575UKkVdTB5J8hTh5TrsU1j/T8pB5ec8dpQb4gbzjL17leTmvX37NpA+HhKAgoljQvgs3jf/39/f6/z8PDwiktSHw2H0GQWYeK9QjDH37YZYUkw64RoqTxFMjoCcTCbRdJlF29zcDI/blYZ/F0JI2IAjJmFHUY5sbDcwAHhyVAh78V3OWqB46/V6gCEAkIOaPOYplYO8kecVL4qhd4XIXLiyT0PcPhz0UXzm4VAUjYNFFBNMD+CRFlM4KlwfZhygyjqjCAAPGHocKGQ1zS3iuunz+B6SFAqXgkHABE6YKxRACLIDgPQm6twneYvMG3LugJs+klTrusfOM3uolOfEAaMNVr/fj//d+eC+PVyWKsEUwLteWxTZTZ0nRupM+s88x2z2UDB5c3MTzDxhbwf/tDljzTH+EAebm5uRDsTwNItut5u5N4wz8uHyw/CeqawXP3PvqT5LHRtYUz7LPeCgd7vdsB3IPFEwTuOD6WL+rq+vg5EvlUpRPEVeH3vDZZ79DtsHUHaZcqYMXUS+Osd2+7o6KEoJBJ6DuUrt6yKNVC4ZzprxDMViUc1mU8+ePdPvfve7jAzRfQIdy/rwWQ93Qx45oIT8kR6K35BJnGEcOCcIfC7dQUHHkNaB/HrIH6CJUwTG4HXWzmsYpHmY3+tGyLWFfef7SHdkL9HflwhX2pKQlCsnaHh+r4+RFO0ROea1XC7HMcRgCHJycQ44eIP1zcMDqRPlxNHHZPejADUVvDzh882CUeHvfmNUv//617/OFIuwqNPpVOfn5wHW8D58AmABCe2cn5/HZieJGO/h6upK19fXmcIAQlle7ILRSg32ZDJvbOuHCsB4opQJd5IKUCgUQljIeQGEexiK17rdbuSGAByYSwSYtkTkSUnZ4hNn1DwJm2eD4XM2+SkWMf1b6mGm718E7z0d6ZxIj9NPfKSOGA4L6Rmbm5uhNEiW53eOjnWZ9e4NnIMOq1+pVCLESX4nR+Sh8LgHZMxz3VhXzzXMK37x9zMHMEk4hMg114GBgtUi33o2mwV7dn19Hfut2+1Gkj5AcjJ56OW7tbUVrBaAKQWmnqQvzXuspgVpMG44ig5cUnlNvfU8NjJ9H78vwkiNugPu1CmUsoWNOPg7OzuhL9yZAfwVi/Oe0Hd3d8GGuOGdzWaRL4pe5fQw+i46mPTUFAYAlPUDZHAMKvsEA88zuyz759l7Hk4tlUpqt9shO3w/up5nhV0bjUZaX18PAARQYS5arZZevXoV99PtdrWxsZFJW4DB4nW+h32KPfMcP953fn6ufr+fMe4OTl2ePczNd6Qg1ffT5x55dsHxQbq29/f30Y4RfUUaXvqMfD6NHGGz/ZTJ9fX1KJqmKt/3PcQD8uO5kLwPO8w94GggK+hc78Tj1f+sJY468pE6X/yNfUqLqY2NjbjP2WymwWCg6+vr2DfgmGKxqG63G1E+rs9xws4kY9cA824j+Ozt7W2mMJYoXrFYjBSzH374IVpxMdx25oFU3pOmx31ofPJRp08pR/7mN/jU30ejkZ4/f65f/epXGYFxMLa9vR0bl8niZKlKpaJWqxWLRysnjCHgELBAwjsNbKW5ofPTKLzhLMqEsL0XSvEZwB+KClaL8Kkrl0KhEPmwFIs4g0CyMeGlarUa+aXT6TSY2lqtpq2trRAmQD2AwkHIxcVF0PXkkf3iF7/QaDTKAIRUkbjySJVeCn7c6KWM1CIMv1/uyZ+Z0KdvKs+n8SII5NDDiawjTgyVyaSZsKEJj0oPXihzi4GEcefaeM/cq+fMUYTljpLfbxpa8bAQ6wlLS3cLwml+LjjPR0iUxs+wGYTdSaHh9Cjy6obDoXZ3d/Xjjz/GPuUIYKIZPGPqkOHBpyFewDQMm/f69TXmMy7LeborVY6LIrfS45ZY6UiVfgpoKpWKfv3rXwfrDBOCbOJc4SSzFuQbIyewrLCrt7e32tzcjPAgR6ZKc1Dqe8IdrNlsfkKVEwMpOOX5+bu/5g4FwM4ZWRw2voM9DsijhmAymUS+/zfffKP19fWILhE16/f70Vbu+fPn8QzoDklRVAhwYW286wZg2g8lePbsmS4vL9VsNjNgMw/cpcbcHe7UKfvcI09WeWYHlR7NKRYfWh01m80onEaHuE4mYoVMedqINNfJ3ku3UHg4FOj9+/cB1LDV6CBsNP/4DuTfnQYYcBx3HCdv0cf/TtLxO7LB/mCdYeMhu9gz1AWQhkNLPyJQ9ClmTsnN39vbizlpNBoZ5pZ58pB96kxwj6wTThl/r9VqOj4+1r/+679qMBg8cqpchvkOgLSUbUX6sfFBgJpHy36IreD31Mtz5YKCcwXm163Vaur3+xGeZ2IBd1SmEQqkzxfXx0vFaG5vb4eShnrn/dI8z9QVJUoHw0l7Hfe0EObZbBahfxYVBcw8uEAS9sEwDIdD7e/vB6DmfghfErpwZoBm2eTmUmXnAsa8zWYzbWxsRHEOBiXPm00FiOHALRVAxlPOy+caLlMpM5GG/3mvlA1PlctlHR8f69WrVzo/Pw+ASQ4QCqVUKoX3WSw+5PTA3BcKhajgRyFubGyEcbu/v1ej0Yh8PPIuUcBeUY/yQg792aR5yNMZG/6OQiKXledDAXpfUXLw7u7uIjWBvG9vxo/jRFEJCrper2s4HOr4+DgMP9+F90/KgPev5J4AUh4apYOFJDUajdhvLrO+zr6WKeD7EFBdhMH9unw+xUj4+wHzGOLNzc3QpQA2dADhT4wt7KbvG0+1kObgiDxgImLe9zO9N4wnrDq6ib95eD/PqPl6AQR8zabTh24StJxiPrwjBmyQO2fk98Hg0j0Fx4xjgb2dDmAGGcUmeLqMF/i4Y8hzMiaTSUZHuP1Mmeg8R8tfX5SR3jdyxLxJ8wiGh8xrtVqm6TvvHQ6HmXnxdYVoYv1I0eD9YAaALjmdkoL5h6lkDbyFGfmgnkbCz+RrSwoWlj3n6R8Mb6HG+jq+YLA3HTcQWQPkEgloNpv68ccf47PMDQVizWYzSAFJ4SD53nOZdt3hBZN8VpofSTwajXRxcaGrq6u4z6eIgNlsnl7nOvpTccJHGdRUQNybyNsgKWhBaGE9t7e3o6WHlN2EztCwSO6dAmwlZQSAsECn04lJ9IIVWFKEi4VPvXw8XpQY90+1HPfpeRuSwtNBAfJcDu6ZBwcCnU4nku/pWfb111+rWq1qZ2dHR0dH8X3Sg/GmLQutrdyz4f7wahC6y8tL/du//ZtOT09DUWNsUgP+KcqP9XcZcFCwCMMVnZQ9d53hyexSvoxvbm7q+PhYtVpN7XY7woEAshT8ugMDiw3D8/79+4z8eu9UwKMXWcFyIV+EvD1fEA8c+XNDwLOlLBLG1/cHDKU0P+9cUihwWA3+jjJtNBqazR4S7L0o8PDwMNODuFAoRHNpT8BnfpAn9jVpLIAq6WFPAebz8rzdQDr75mDP54b1doO6CCM18Hkj73XAkxezkbd8cnISskekB3YPGfHCpdFoFCDOHW+vPHcGSJqzp+wPZ/pJjXGw5uA079nTkco8Bp419PX1w1Emk0mkOKGjp9NpnHDmjdVJ05IUejZlLW9ubjQcDtVoNDJsFPfhqVylUknX19dxH9PpNA46oAo6BaDci6/rUwZ9UWRWeuw8Mc/87mvvMuTH0cJQOiiTlGFHKTJN5QjixmtJut1u5E06WOJ7HDjiaLBvJMXr4BDApwNl/vnhP5764nMCQ88+YX95mJ+6FyIOREp5Fu9iAoPsepWIMPfKs2BDsDUQIO4wuE7E8YPsYG6wU6xBHhuKLk9JFJfzTwGqHwSoqfBzQQ+N8jr/Oxjz16HdvcjDE5QlhQeAMHEd3odg+efxWFgkmKpOp5NB7LyPTbK+vp5p4sv13cOHhfT+ou4R8lwePnX2g/mCOcLIA4w9H/D8/Fzn5+dqNpva3NzU0dFR5ruKxWIoOhQoG84H1bm7u7sBpPf29vQXf/EXurm50ffff//IMOQJSwrwGD43Lh/Mw6IoTN8EUn7Vvj+LGwc2LqHRVqulvb29UGrO/HguMfJTKBQCVPIPw+WsQq1Wi9AVDhQAwc9Y5nsAlm7UHEwyABLSvJcr/1DeXmHMtYvFYoT6b25u4nQWHEeMCtWj9BMm9L67uxvsE3udRHwK/3AmZ7NZsKdpmI1iFp6ffVoqPfQ/xLCnbBJr6/ue9UxZOpeRFOB87uEOn9+3O0O8L2UtKpVK5OI7o8OpXxijVL+64ZUURSSAMVij8XgcRRQe4YJFleYtxdhHOGMO4NJceJ4DGcMGpEae74ThkubNwj2Pm6pu3gNg8E4pEAH03/WT0AAKXM8ZLRwydJ4DGCcqkDuvzsbWMK88Yx5Idfaa9eJvnlaV5lB+zpHKpO83v0fum7605KezxjjD1Gegp6R5kRrgFqCHLXXQRs1Kv98POfVcUcAh9+p9TT0q5C0GWde7u7vQRdgAPuupL8gC9w8Rhmwhv3yWVEXWnyJU7xDkp2txCpQfAoA98RzoQmFeQEahNc+CjLNfibRx79z/bPZQVOi9kFlPKZuek4LS1In0/58an1zFn3dB9yqfAjv8TPWns6P+gLxGHzLAZNp+hNCehzPxEKi4G41G2t/fj8/R386fCa+Aze3J6p4cDdPUarVUKpV0eHiYYU8RIO4FzwogjQfkxQkAbhLtO52OVldXtbu7G31Rz87OQsjK5bK63W60t3JQzjxzfRiyXq8XYBaPihO2uAc3bOl65QFWn8N0LlM5+Nwj3TD+zG7o84Cr/w3A5UrEW+V4PrFXLLOZJUVU4ODgIJhR791bLBaDYUFhAcrYJygOzw3CQQHg+fchjyhISZlzpAmJuoKBmcSgEsol/9aVnMvRzc2N9vb2NJ1OQ5FKiuMcqTytVCoaDAba3d3NFJbgeMIsXF5ehqIlv7xYLEYj+Z2dHUmPwU1q4KXsUXoum+lnP8RW/rGHK/PUmUojAf4/8vrixYtMP0eiQABD1z/oWeSAEB7OLayS5z4jRx7JASx4hIZrc99cZzQaBeBNGZgUsPF8MD+z2SwMMaCF/x0QFosPhTKEaaV5HiORidTZI8VqOp1GWhXFVzCptB1Er7utgKnz/SnNo2nMAXPiYChPv6a6OI30OEO9CCMlLaTHQIXBvvc6EYAg4XdsKCk/gDJkEnl2BlaaF5zCRPI/n/EUEmRBmhdBeaEoe4Xf3Q5wj9L8WGieGWfOe516jQo6NLWXyI8TEOVyWYPBQJubm5n0xvPzc5VKpUxHH8c1HISEnXDbVyw+dEOiSwD71Z0ictUhINAJNPz3CCzD7VDq9Ptee4p5TccHAWrKlOYpTGcQ09ddAZE/cXR0pL29PbXbbfX7/SgCcQ/aPQ1pnucBbYwHOpk8FI4QenXvmXsoFh8a4nvYiQ3goWoGCwk4KBaLEX5nIRFEGCnP7fSkbeaCMJKzSGdnZ1G0VC6XdXR0pIODg2C1OEqQsBxgk9xTz1GEencFxmkabMarq6so5MnzcB2s8BkXrFTwEEYfKTj4nCPPyOexZykrJT3uQ8kGdwYV5QW77uDSGSRpHgHwPLm7u/lxoVxDUgYkpjmAbHAPYXtYX5rLPM/pBRrkSm9tbcV+8sIQZ8EJ7VxdXWVex3BTMcp+wMufTh/SaZ4/fx73BqA/Pz/X8fFxBpwy3xxkQcEZ87uzsxNAnohDr9eLfcbn/WcHOc6mpo6Uv8b8LsLwZ3jqvqX8rhTooVLpobIdXYiM0qaJtQFkwRYxHAS5jPM3D4PjlLvzLc3ziZ3tgbny6AK6FvkGULOfeDZkm7QEDK1fj8+jK71NEJE1ZI0OFORJA5wB3uRhs3/c2MP+8RpRBQ9ds4cBtsjizc1NtEFkrVKW0e2pR+dcJhzgLAIxkJIC7hzy9xQrQC5dX1+HAwtQgq32CKezqA5+ee90Og1GEb0CuUOuNSl7xWIxogPkE0tzTJMytt6H1eXYyQgpv5cv2MYjFZBrFCki28gA+pouBOg/OkAgU+xl3k+eKmkRXjDIHBKyJ8+aOWdPIb98xp2u0WgUfZH5jNtKnjl1QFkrt2kfGx8N8btydMHyL/MvfYp1AxTyENVqVWdnZ9ra2so0pfVcEcKNfJbN7x4jjc5p5YNnQ4jHPVxffKfdXUAQTs8twhNB+F1RATCdkWRz0ILEUwsI5xeLxQg37ezsZI7DQzjoX4aS6vV6mdMdUsFgIzkzxb1yZGW6bnlGLl17B0SA2Twwmhr8zzlS1knKBzL8LQ0/kMvTbDb17t07bW5uBrjHWx4OhxFC8eIejxDwPwYXYOgtmRjI7M7OTuwnADFheICC5236M3D//IzjAlClbRrr5/laXkyDrPC83ntSUuwJIgy3t7dqNpuRGoD8ucdMdSlz4PftbBCKkWJF3gOggDVxIOrrmzLDKVD19y6CYc8b7hT4nn0KpErK6CwcW9IwMLaAUy/sQA7dWWINuBcMFXrT2/2RE+eGyPUW14AhYz3ciUxf88phf7bZbH6oBMCXzwMAJAVAZa/wbNfX16rVajo4ONDq6qp6vV7oWwBjvV6PveeMaGpgXa54DQJBUoRxPa2AOaBXbQoyfQ+7Q5060HkO1ucefr9SvkPthAi/c4LUTz/9FEfL4jBxchnse7vdVqvVisJlT2XyNoDIcKHwUORMlJJQeL1eD8cCeeI6rCtA0ju4OA6AoPC9hBy6M8ccAE69wb4z/35QEPun3++r2+2Gzux0Ohm7IClsAwQIJ0exFuAhZNUdLNIO0bWkH/AMPjfIWqvV0ps3b4JISHU9z81nUhzp7/uY/H70JCnfPGkYLGVLXRhTxYnSIkd0ZWVFe3t7wY6y2ABTmEavrndF5PcyHo9DEXMUKaxmukFcCWAAEB6Un4NZFp6B54QH5F4L3+PFLs5sEN49ODgIhb6zsxPejgPwFDjCqAJEeW4vCPOQEaCZZ/ZKPMD0x9Y+zwimxj5VnIvCQjngdMAuPb1JXI4BSIRK2u12MPHIpCS9e/cugBqKgvn1nql+8II0r27GMeM7WTeAJV4+v3s4xsOe7v3yrFwXT71SqWTC/Mg/8ispul84Q7G+vh6KGgDU6XTUaDRiL+GUYdzT/CRSBJyZ8P3hhn42m2X2k7On5AGmBo/14/0MnxsHs/zN5SNlfD7ncBYqZc7yBmtPyI41RsZub2/V6/UiF495Rt86EykpokSTycPJfKwv8kwOM+370CspyHKGKc8p4B78ORj+urMx3k8Xx8ntjx8LzHD26ebmRtfX12o0Gmo0GpnWQXR0obLf95c0B6cewXI7RyQCxyrPht7d3ens7OyRrkznJ2WjnGl2BioFhp9rPEVa5BFb0tzp2NnZUbPZDGcnTde4vr6O4lRyo71w1HW2zx1gDP3B//S25kRF7tOjUjh1frANDgh7BmeZlAKAqjQ/GtQL+fxfoVAI3c/3879Hnvr9fhyYQZvBzc1NbWxs6Pr6OsO6AsZJm/D5T0c6dx6BYB69/sELakejkYbDYRCIqV5NI5e+Hk9hyKfGf6gP6lMC6F/iuQ7+8NLDwg8Gg0y1HEYcpcCEe8K+pPB0nJnyApGrq6tMPgVJ6e5hO7XtoVg/UcJzhPI8A5Q7LBQL5I14Uaz87/fsvQlp2u5AApDqbAOeYaPRyMwvHiLfgccO+IHxI7eK5tipUKTClCdYeUowT7g+lbr/fz1Srz29/7wN5MqKdcDIEcIGXDkgoB8dioJrIseeM8y9eNiFz6A0vGgJ2QEwePEFP0tzJ9AVIkqF17hHQCz3CHgmXFur1cIRA6Sj1MfjcSYNhRA87Cz7j/0FqM5T1F4QiU5wZ8r3HfNAxMMBQ7qmKWPOszqIybv+Ihh56XGbqfRvUv4enc0eWnzd3t7q5OREh4eHoa/SSAvDQT06ivVguPPA+gNSff5Yb+7F8/rznoVnTKM56Xv4HPvFHRKXZZw45AeGUlIUdzWbzSALRqNRRKlg1tDhHtJ0ufX9CojwegD0hqcXSIpQNuCUrgIpoePr67bnKfvr9vVzD7dj0hwLuE50e8LRpP1+P+StUHhIK/L2TxRcuuPAd3FtHGuXQwgq5p7vIo0IgOXOOPaW73MyCGKAQtaUrEI/pToLmWV/8d408sWYzWbR85SoEf/8oJSdnZ04pAgw6cVmjiWkeXcW9Cxz5zgC24S8pXnk0+lDasz19XWkVTxFZKXy6q8hJ//XADVP4fvPT/3dbwYlAsi8urrSwcFB5PVJisXlf4RgMBhoY2ND0vxIUn9weomimPGMMdh4uyxMuokcKHhYnGdwwOeegitjnt2rNx1I8nnYsGKxGK1g2MCukAhFEQYej8cRNnUjCyhmw5IKcXl5qb29vchDpRCLCn8GmzMVrnR907XMU6Yo2kUBqCmzy/B79pECWJjDZrMZIWy8Yi90kuZAFGYPNn82m+cqIY+eq4z8U9Di9+4y6XmmDjhcyeGw+fzjBAJMvZiPz3goF+VarVajL+Z4/NBXr1QqqdFoRFgXB4jroyDv7++jIMrzo5kD30e8H0ABQ4fBdtDo+oGiR/aW71dX+qmyTNc5ZVWfYif/2CMPmDjwS9/HKJVKev78uQ4ODjSdPrQ129/fD2MnKUKlyA+FTR4KZw6ZZ4ozPc2D+0PmcEz4me8i58/1Lroi73n8f5dl3wcOlh0E8R6u70Ux6GAK/wAo3o1Aemi+78WofC5lLz3NyZleB5sQDDD/rCGt0tJWRD4PzjT5e9JIIP8vEkh1eyY9JjLcJr58+VLSvMcuvXudqfQ1cvn0uSiVSnHsJwCP6CKsY6/XC4KKtBR0D/qJCJnLlz8XDjJHjTqx5kCc4al/HuEE/PphAa73BoNBOFhEQQjJQ5xQCE7rSZwtlwefN/Q2rzt4BrBKihacHoFlLXu9ns7OznR6eprBQnmgMw98gpOcsf3Q+GgOqocMU8bhKa84HXgxFAqtr6/HpOKx4CHg9WC4bm5u1O12My12JpOHFiD0PfUWQJeXl5n2UelEeRiL5/IN5c+ZAhtPhvbKv1Tpc22vivPcVzw6ruPJy9PpVKenp9rY2IgTIzxcz2bw+aY/Gp7m/v5+sAGAXQ/dutH366QeTt4cOBhNveL0mp97pAYwT1ZTEJB6fhij6XSefI8So5E0ht6ZVI8ArK+vR4swT+VAeVYqlQjf0PDfr5sXtkehMN8YUWeAMMgU5/F87DU3blyf8CiKCwUOOPG+eDQ8R365V/pLoly9KMfZZN8fVEzf399HNwv2OXM6GAzU7XYzxQqp9+5riOJMGdM8YPop3vwfa6T37zo373kZbqS3t7ejMHI2m0WKEP1NcdyRMe9xShEPDhay6gbU2Rk3es7cODHg4USeJdUx/pzICyyOD+Q3ZXZh8fnbxsZGhEmn02nkmCJXhUIhTvLjXggdE+WizaDnt2KfPD8v7YjgTpyHTkkvuL6+frTm7vynpE+eLvM9sAjpKSlG8PXNC+3e3T0c812tVjMHl3haCvJYq9UC1LkcuSyQQsg6My9EbokEUUOQB3bdvqY2jigW4Jb7c+Y01SFP6SHIJF4DQ7BXAKjIE04T+ns6nery8jJDYlFn4A6kA29/LgfO7sw7PkJ+WcPJ5OGwjXfv3qnVamXWnJ+fiob4vPje/9j4KIOaN+Hpz3mbJ91YeNuXl5fa3d2NUDfKxVtGuOLEwFHZPBqNojclrCLgq1AoRL4gf2PjpvlMeQoBhcqioGydqXEWTJovaOohpYZfylZUpwwY93VxcaFGo6Gtra34HGDbi0tST7pUKsV52l4lDUi9vb3V+/fvI+/F18eVoc9NqlTyAPyHAO3nHCl7loKQp+SVz2Kkrq6utLe3Fy2TyLmbTudH0DG/5ErhWFAF3G63Qy4Zo9EoDCLOGHIF0+/OC+kaLs+E3FNADfBD3mBPMaAUGDmrKinYLp8z7g0lyIlS0kOPQnpijkYj7e3tZY4AxNAgnwAh97iRWxxMWAD2MqD79vZWg8Egqkdd96SgM5VNKRstyJORVNY/53AZTWUzZcr8ecvlh3Z0g8EgChjQq+PxWOfn57GWXpmf5lWTM43xp7ME6+X3wNpiWF2fcH/oRw+5+3P6cHDKM0nztYIVw2akEQUvjpIUeaWAFtcBbri3trZUq9UepaJ4FbQ/s6fzMCfIPN1ZvNk7f+/1ejo9PVWv13sE2N2OpiSEs10O+lwGPvfwPclwG+u2i+drtVrRPo4IH84QABCd6/jAdaRHV5xBJEIEI0tEhx7pkANuW6V5Jwwn4Vg/7L3b/TxbkpJgeXbTnxX9RPQNhj1lQ5E97wLAfoGE4Fk91YLP8vN0Og0yxGWbFB7/PHsbG+VYieH60+/5KZngvj42PloklQLN1EPyL2R4yIRQy2z2wKJeXl6q2+1KUjAuKEhCHoPBIIAhE0T+32w2C2oer4MBsKK9hzQHmz457oXwOffyPOzleX4kviP4sKY8Y0rHIzjkxPI9tHZwD6ZYnPclI8TquVAOaP15yFekhRTPgcAjXJxxnD6rbxjWmTlJN5/LBfeUvrYoLFTKQLjR92d2B8KZjnK5HKklnU4nmp9zSpd3pfC8UZgBDw8hs9PpVK1WS9vb28GKozBQKr5fyI3jPXwXzI4bcNYTRcQ/D7NjJDxvCvlkvznb78aX95Kms7a2pvX19WA9UJQYba7JNe7v72PevOCG+4SBYu5Spw6gxAk8qZwh8ymAY14+5KAiJ4siu3lgI8+Bynsf7OfR0ZHW19eD1YeFabVasT4YZow+8+Hr76wLLJcP5NVZTjdAPr+pUWO9UkKDvyFzyAjvoyjFnRzsiwPb6XQaetSPp9zY2Hhk4N2B496RKZ8HJxdct+BEce9pKgvOws8//6zz8/OIAvhaplHJ1MFynZ0SA4sgu34fefYjvUfmm2dCRwLOAGuE02lrB3uZAl5k4Pb2Npwq5BNb7kAY4OmMudsC1tz1PWF5t/3p3kznA/lApnx/MdiL7Ie0/dvd3Z3W19clKaJcs9lDlT/9UQGn7DXsiM9/ig9SMOnpZC67kIN3d3c6PT3N5JdLWUckdWDzvv9TCa3/UJGUf6EbBGcZUQxu1ACNnA/LRHvYCIPueU4gdgyM58ghKNxL6uHC2JB3gtLwe+Wz6aQ5swlwwDg7+HaFxkIi9DyDpAzI5Dnx4LgOrAXGGOH2TQADlX4/id8eYsNgIFQXFxfBgnC/qWFwZfehsGjqIbp8LIKilB5XFacbwTdHytpID89Fk/5Op6N6vR4b3wEYTBTy7t43ykKaNwnf2dnJpMykhYF+Ik+hUIiiJthawoR5Yc8UKHjVPv88NYWwF6k2rkz8Zwe4eOpEApgDgKo/G/8cPKdg3J0wvoO2J8wBn+l0OgGwUiXqijZP6eU5KM5aoacWZeQ5kR5yTJkpKVt9zHun04fQNnuhUqmo3W5n3gs5wPB1Q6fnse3cCywkxIDPMbqYveBOYF6uogMW5MGdLq41m83b+TEXtBx0pgj59FOnSqVSNB8vl8sREUBfs5c9t9BZKAerHsrn+p7+4HngHhnMy1dkDlKghzykxMCikQIpIHXHgpGudb1ej17m6DYAGnq2VqtFTQZ6AcDP9YkUSPM8fWdVWVPX36yLYxj0p4fJfc3cbqf1G66HUuJHyrKMKUZw+8FnHCTjIJKC4mM6nYZ9cibYnU72HlEqckwhtXh+Py2LPeMpbL///e8j/9T/OS5KbUk6H8xRKi9545MAap4yT8MNUrYxb3qD0rz1AqFQjDGKEgM9m82iibl7++5VMNkp++X0O4IJsMTY+wIjbChGT1zn2dOQAs8MqECo+VtaOYsi82umOSFutJ25xUB4uNTBqwsh39/r9aIVR7vd1rt37/TmzZt4thR0OsjMM+Qf8oLzPLRFGG783MN28MTI8/yZ7/F4rGq1qlarFWkpzhJi4D30gaJxz9QNlzsdyEbqdSLDKcvvzKjLsrOmKFrPReUfsodR9TxX9i97LA2HlsvzI/Q8WZ97dQctBXuulPz60lz+ke9Go5Fx3piHwWAQebv+3O5cujL0tX1q/dN1X6Th8yM9jm7wGs9HYRq5lx6Z8tQpZ/scKBEyxxC7jLPG7ojweQduzL3rRZddN77+N9c3HqbFwXLG1NtkQURIc0efa+HQMWjGTgTLU2AAs8iPzxX3xPMik7TfYv43NjaC5YOddUeWdJ+Tk5OMHWH4+j5FHKSAfpGIgZT0kOaV374O6AciizgL/PO0FH4vlUq6vLyM9ClsGdfnM6ldI49Tmtt6xytuO9M0IOTA/7kDyLOka5TOh+9VnwOu6S30PG+bZyNay9/cyV9fX9fu7m4GLxUKhThQxYkE9jOkndsetyvsNfJavR4ItpnnS2XW9XH6zO7QfipO+GiIn8mUsk1sGQBVhDAVUgdAg8EgNvxoNAqBQdi8SprfU1YS9si9VIyjA2G8ATbGcDiM6rS8yXHQ4H9HcbFJmAdeg/Hx0Ku/l3lk0W9vb6NXK3+bzeYNgz2pGaCRAiLu0QVlNBpF/zNyUSSFoN7e3kYOpCs/nh3hQjBThZenIP2zecDvc45UdvMYCe7XwQsDubq9vY1Gz9K8+tJBJx685+t5TinKiCR7hnvRvsfwqrlHl4fUCXQv10NSyCL/832z2TwMBlsPC+SHZDjLwNwgnx5ec2eJZ3Aw7DKBAQAc8zMMF8dI4lA66/T111/rzZs3uri4yOgfX8+UoUgNhq8r85iCgUUZKSvDaykwlJTRjSsrK9GnUMr2iPboDYUiGD0Hqny356h6SM+ZyZQdd5aea0nZynz2jr/uw/UJxrNQKGQKQHhmgAlRtRTM397eRss2aZ7DSq9t5DN1/p3N98iEp9Qw36QCoauZX/YTczQcDvX27dvov5kyYa6L/J/vNd9j/n96rc8xUiDte8/BozPQd3cPR3t7oTFOaBqlWllZiWgh+MBzOL36HtuMXgJvsNaFQiG63PA5r6jneZA3ZNZxAs+QOrnuqPlwcgLw7Glc6GbfH7zXIwBbW1tRdI4zip1yooHr4IRJilQX5BRnr1gsqt1ux1Hu6Bf2NkTGyclJppZCepzLz3DgCzHijmqefk5HYVEAxXIsx3Isx3Isx3Isx3IshyQtTtLVcizHcizHcizHcizHciyHlgB1OZZjOZZjOZZjOZZjORZsLAHqcizHcizHcizHcizHcizUWALU5ViO5ViO5ViO5ViO5ViosQSoy7Ecy7Ecy7Ecy7Ecy7FQYwlQl2M5lmM5lmM5lmM5lmOhxhKgLsdyLMdyLMdyLMdyLMdCjSVAXY7lWI7lWI7lWI7lWI6FGkuAuhzLsRzLsRzLsRzLsRwLNZYAdTmWYzmWYzmWYzmWYzkWaiwB6nIsx3Isx3Isx3Isx3Is1FgC1OVYjuVYjuVYjuVYjuVYqLEEqMuxHMuxHMuxHMuxHMuxUGMJUJdjOZZjOZZjOZZjOZZjocYSoC7HcizHcizHcizHcizHQo0lQF2O5ViO5ViO5ViO5ViOhRpLgLocy7Ecy7Ecy7Ecy7EcCzWWAHU5lmM5lmM5lmM5lmM5FmosAepyLMdyLMdyLMdyLMdyLNRYAtTlWI7lWI7lWI7lWI7lWKixBKjLsRzLsRzLsRzLsRzLsVBjCVCXYzmWYzmWYzmWYzmWY6HGEqAux3Isx3Isx3Isx3Isx0KNJUBdjuVYjuVYjuVYjuVYjoUaS4C6HMuxHMuxHMuxHMuxHAs1lgB1OZZjOZZjOZZjOZZjORZqLAHqcizHcizHcizHcizHcizUWALU5ViO5ViO5ViO5ViO5VioUf7QH6vV6mw2m6lQKIj/GbPZTLPZTMViMf4+nU4z7+Fnv4YkFYvFeK1QKKhSqWh/f18vXrzQV199pbW1NW1tbWl9fV17e3sqlUpaXV3V7e2tCoWCSqWSxuOx7u/vNRqN1Ol0dH19rcFgoPX1dZXLZVWr1fju29tb3d7eqlgsqlwuazKZaDabaTwex79isahSqaSbmxvNZjNNJhOVSqV4hnK5rEKhoNvbW02nU5XLZRWLRU2nU93f38dzlUqleM7xeKzJZBLvu7u703A4jPu6ubnR3d2dxuOx7u7u1Ov1NJlMNBgMdHt7q/v7+7iWzz+vMcf+t8lkonK5HOvh7ymVSvFaer10bYrFx76Lr7P/znv5fTQaFR59+I88KpXKjHmX5nPGHKTzlsrobDbLyEqhUNDKyooajYb+8i//Uq9evdJXX32lV69eqVarSVLIx+3trdrttiqViqbTqcbjsW5ubjSZTCQpZG40Gkl6WDPkwN/ve8nXtNvthmzd3t5qZWVFKysrKhQKKpcftjTyc39/H/tsfX1dhUJB0+k0ZP3+/l6TyUSj0UiVSkW1Wk2FQkGrq6vxfVyT+SmXyyH37Ku1tTWtrq6qUCjE/bM3R6OR+v2+er2ehsNhzMV0On1yDXhtOp2GTH5I7nnN/+fzvq7MxWQyiZ/9fTc3N59ddqvV6sx/d93q+zNvD6O3isWims2mtre39Td/8zf6b//tv+n169eq1+saj8ehF0ejUawFMoleGg6HKhaLur+/D9m8ublRsVjU7e2tJpOJ+v2+hsNhfA5dent7K0mx/9ClNs9x3aurq/js/f29SqVSyBX6bHV1Vaurq6pUKtrc3FS1WtX6+ro2Nzc1mUx0f38fOrpYLGo8HkuSKpVKzBXzhL5ClqWHPTkYDEL20eWrq6uh62ezmQaDgW5ubtTr9XR5eal3797p8vIy1/alepKfU5nzdeY96e+p3Pq+4PqfW3YrlcqM++Z++Zln9Z99XVZXV/X69Wv99V//tf77f//v2tvb083NjdbX1zN28urqSmtra5IU+g0dPRqNNBwO1ev1VK1W1ev1dH19rffv3+v6+lr9fl/9fj9kTXpYd18DriU92MK7u7uQv5WVFXU6Hd3e3mp9fV2DwUCVSkWNRiNkk3Xh2uxJSbFPVlZWtL+/r+3t7bhuqVSK77y/v1elUol7cjvG9e/u7jSdTjWdTlWpVLSysqLxeKxKpfJIvtx2rK2tha549+6dfvjhB719+zawFWvmupZ74JqM1GamP+eN9Pof07kfBKhcBJCVvu4T4b+nBoAbcqH019y4IQybm5va3t6WJG1tbWltbU3D4VB3d3caDAZqt9u6urrSZDLRysqKqtWqms2mVldXQ5kCGFZXV2NhUXquQAGVKGoMbgrWbm9vNZvN4l5RigiVNFfEd3d3j4xguVzW2tpagFXui/tYWVnJzDWb2Td53mL7uiDMecbb5zw19q5M+Bvf5wo9/b5UHp4SzD/2yAPl6XPzmoNYV67pPGL4kcNaraaVlZUwcoC56XSqer0e8uIyhIyhWFFMfI7vqVarAVxTA1soFDQcDrWysqKtra2MQkT59nq9eA5kbjKZaG1tLeYBOS4Wi1pZWQkQwHO7smXfpMqvXq/nAkPutVwux79SqaSVlZVwAlP58eumSi8FpP4/9+bfm+fQpUCY7+MZWYvPPdK9ilPOukjKOL58xh0UdFC5XNbm5qZGo1HIbqFQUK1W083NjUqlUhhl1hd5u7u7U6VS0XA41GQy0erqqqrVajjOzDs6D73f6/VizV32V1ZWAoBKCpkEHLvR5Rn42+rqaryX+xyPx1pbW9Pt7W3IF3KLw+a6lGf115gvXsMp5R4AGXkyBjnj6+D37Xo7tXup/nUAl3cd7jFPLy3KyNOvUnaPpXtdUthlSJvhcKjBYKB+vx92/erqKtb7/v4+A3JZr1KpFP9w4F0e0G/gA67Dvfv8s99c9zJWVlYCj9RqNVWr1UdyVKlU4nsmk0n8LCnzO6QZtsptAtfjOQuFB5KE+8Fm8N0uE+68uGyXSiWVy+XAReyTPEzg+jnFeOl3+WuO/9gb3D/Pl17vqfFBgMoFUuWYsmvpZnHQ54bEjYIzjeVyWSsrK+EJbG5u6vj4WNvb27q7u9P19bWur6/D4I/HY5XLZe3s7DyaAIBiKmgwQigZWC8UY7FYVKfTiee7u7uLz/vEs6Bu4NksDihWVlYyiovroOClBw9wZWUlPoMAORBIFzA11Hnzn76X96RrmgcCeP4U1PkcpGv/od8/53DmKQXd/h5//vT5kCtGtVpVvV7Xzc1NeKQpc+4KeTqdqt/vq1wuhwHmuigmhisTv54rXEkRJQB08F6Yp16vF/IrSXd3d8GySvNowGw209ramnq9XtyPK36AB8bY95r/Sx1OjASgAkcOHcKcujJzneJ7iuHrmBqNlF3gGqnjlPc9PhyAfO6RZzAcoEh6tH/52fVspVLRaDQKXYUMOCBKDRHzyPqzdisrK/E3dB2AtFaraTgcajabhW71+0QPY3xXVlbU7XaDqUWv++d4Hgb6knsk6iTN9Tnj5uYmomjS3LnEQKYOEoCFPcFc+97DsMPo+V7gOql8PaVTXA+47skjHHzk6alFAaqpc89I7zXFETznzc1NrONoNNK7d+/0hz/8QX/1V3+ler2uy8tLSQqQSiTKQRPfC3EwGo3CiUGeV1dXdXNzE+9zIJuuWbrH2Afcf6PRCGeMe7u9vQ1SgLVmHzk4luaEVrFY1Orqqvr9fmCHvHlln/hzwp4i4/4cec73eDzW7e2tKpVK6OrUSUr1LSNPx+bpqTwc8P/HqfooQHUUngdGnmIqUlCTh8Klxxv0+vparVZLg8EgFNFgMFC9XtfOzo4qlUoA2vv7+0xoVFIICt6R3zf3AevpHg4LArWOIuTeYJ/YEBgzDDGAAEEslUoajUYRgoV6T5ViqVSK0ECh8MAKIHA+/77pfc74XGrQU0XggpSCNl87/2yeZ8T1eH5f30UCp4xUGfpccN/OIvlI2WjWazqdamdnR7e3t+r3+3r58qWm06l6vV4wT4QBr6+vVavVIrSSOh7OAODVSgolhtOFwsV5gWFFhtfW1gIwSMo8E2kphUJBjUYjlBLyhDLlvvL2s3vDo9Eos+78DeCL7LrxZhC6dSCUyq3LciqH/O86xef0Y4qRtU+VbHqfn3OkIDTPwUrlyJ8DmZ3NZqpUKqrX61pdXVW73dbKyorq9XrIPdcdDAaxhoTaWUvkLb3HWq2m6XQa6RzudKWEBk44DBa6u91uh3FNdQpyz88uN+PxWM1mM/Q4oAHQAsjkM5PJJBwUj2xxPb7bw63ODM9mM93f36tarYadAFC0Wq2MDPuapeuX2iMHcqmcpmAhHen3fO6RNwf+empj/DXWsdPpaDKZqF6va39/X5eXl/pf/+t/aX9/X7VaTc1mM8MyuqPkTgYOVq1WC/ArzRny9H7Tn9FpLgvI0ObmpkqlktbW1rS2thZ6j+cgBQGwiswTteA6DI+AAThTwsPnCxY0XXteR+/7fLN/iNyCb7BNpO6k+iSV4dR+ug1g+D7z5+O6KVHwofFJDKo/bGoEUuTt7+FmUq8k3bwoxXa7rcFgEIv0/PlzNRoNVavVUJDknUBtsxgrKysBGNxzQMHOZrOYmDyPAPDBRvEJZ3GZfDfKLFjqvWCkNzc3M8KEoLtS9LAVKQ53d3cZTz/PGUiVn89r6oGlCiPP8+G9hUIhNmYeKMhTPv7eRRp5TpX/nAcGeCYHAz6flUpF6+vrKhaLev/+vf75n/9ZhUIhlChGdWNjQ2traxqNRhlnyY23K0Luhe/C6LKPCI/6GpfLZQ2Hw2Cj+v1+AFJAAUya9MAsEcYHTFQqFfX7fY3HY62vr2dALnPhAH42m6nX66lSqUTKAHl7vAdH0plcf5bUseF5nF3i/akCdBllLj0q43OdGvnU0D/lpH3OkedASk8DGDfYqQy3223V63XVajV1u11VKhU1m01tbW2p2+2q2+1qNptpc3MzHCtpHsa8vLyMEKaH4905qFarka+KfnPZZW0JT/IPMOnPB3nAmhH5cnviAIS91mq1QhZTAkKaO5u+jz0sy33zGq+js52E4HWP+t3d3eXayTyHK0+XpnKZ6qB0D+SBvs898pg7Kf/e/dmRXfRVofDA9H/55Zfa3d3V+/fv1Ww2Jc31ELLj60vaVKVSCVmq1WqR6pICP2mewpGHT7DLkEitViv0JvqGz9VqtXC+wQL8zRlWd9KQJ3AJ102dbq7ndt3vG7zC/HmaA+kxAOd+v69isahKpRI4ij3EWn3IkXD5TtfW35PaOO79KVv71PgkBvWpTZAa2bzX81B2KpjSg0E7OjpStVrV8fGxXr16pc3Nzfh+2CKMG6+hCCkwWltbi2R3BNnvAwGYzR48Yt5LaMGVVKFQiFxVvH7CPLCcLDbK13MvNjc3JSnDrqLgeQ1F5+AUgXQwK809EZ/vNFSXp7h4fwo2Uy/GvZ48D+mpsKA/26IwUSnT9ClMm7/urJ2HaGq1mkqlkrrdrvr9vjY2NvTs2TPV6/VQgsiIh2Fms1nk8nEtV0auiByg8X7kn8gB94gSJeTI/RNZgJ0iLebm5iaKobg+StvTCNKQPGAAQNFoNEI2q9VqZj9yT9wLc1KpVCIZP03F8TVLmXt+Tv/3NXVAmpdLCtBIwcJTsvC5R55DmucUusLnd2c51tbWgiUZj8e6uLiIz/R6Pe3u7mp1dTWMuzsJ5ELj0PBPUkamAAGsLbrVr4duIaQ6Go0y6VUYfoqa+B5nnbhPdLKkkMdqtRr3Uy6XIwSMPMAupfPjjs7KyopGo5G2trYkzVNjiDBg1NnnNzc38X3oP/aLp2mxZmkki+920O7y+CFCge8BoCyK3pXyyRNGnmPqJBOO0mg00ubmptbX17W/v5+5Ngyk6ybstjs4OEV8r5NTeToC+fZ0EI8MuV6+u7tTtVrN5FMTOXW8QXSUnFSKvZF9cAAFrA4o3QH33M003xU5SQv6/H3oZGd7K5WKqtWqarWaOp3OI7DuxIHLd55OzgOcT+luX8uPjQ9KtW+QPLTrnkJ6syxa3k2nHtRsNtP6+rqq1apWVla0u7ubyXtyqphcKr7r5uYmcvxgk/B4UaIpEKCYhGp5wu8UASAgvMZ3z2azCJOhtGGt2u12JOvDHiEUUP7j8TiqmNmQtVotnp3vXl1d1draWuSipBvaQ3jMad78p566fz5PUDxdwIFaStHzmTQ8tUhKMpU7BzBSlh1M5/KpzcjP/X5fa2trOjw81H/9r/9Vv/vd73R0dBRseRrCIXyfglBCUs74SPOKfe6zWCxGyotX9/O7g0yALAqQfCP2Avl+7AOemXAlRpd7dceDAiwYBaqpmSv2HPtPUshzuVyOzhwu16lB9nnmGf015PGptXPZ5TX/G9/hwC4Fu597+DPnOZ7p/k8BvRvvRqMRa7m7u6tSqaTLy0vd3d1pZ2fnUUoURorPk5rCwHCyvtxfqVQKUNrr9dRutzNhWBwS6YHF55/vU4pl19fXM+y861Tk9u7uLsK5yGmtVguAy307sw/48DoCl/WNjY0AxK1WK/N35skL/rgntxf8z75ljljXPH2eAk+3t3nOmsuBRwA/90jTF9Ln8r2WEgiAucPDQ11fX2ttbS10GtEfj8Ygqy6veeCeaCx/R369CNqBmQ93mNHJsLDlcln1el1bW1shn8iVF866c89aEgWG9HIGk4gB+zgvupaCcb7r7u4uIzN8ln2Ckwdwr1Qq2tnZiS4E6Vr53kxBZh4WTDGB7wcH+L4vPgZSP8qgpsPD2O4xpkYh74tTwMA/PIPBYKBnz54FCHVA6EYd71VStErZ3NyMifc8E4TTN7IzWRsbG5KUSV52RomF4plLpVJUv3qCNQne0+k0wp88G0qU7yoWi9EGCIXLPLLpyuWyWq1WhA3yvOgPGfdUWbhyTBVEeh0Hmnnv883O8NcXZTjIc0/Q/+YKMr1/NxI4R71eL/KKaIFGrjHzgexNJpMoquC6rlBZ4+vr6/CwYbpc/gAEKCIKtBgA4l6vp8FgEA4YSon9AHsKU+pMgfR0rpArTwbyBCvrnrl3BwAoAFA81OZAytfsKWPrMpgCSleADNdVLrtPKdNFGalM+mtSPpua5rQBqorFYqQL9Xo9bW9vh87xQtF0/gqFQhg29IWz/+n9sp7tdlvlclnr6+sh44PBQNK8ip90FNYCGVxbW9P6+nq8l+gUIMX3BwWzPLt3wfB5QeYBp+n88XmYNuzH/f19gBgMO8/N+3lPrVbT9fV1zL2ULdxzltRlnj2Wt/Z+jRR8+RqnwOFzjqcIKSk//9QHTnKhUAiiyuswsKW3t7dqtVrBbiPLyCc/I/vIEY6y51o6JvA6FIbrJkCly+rGxkbsCeQSOWGt3cHgb6w5edjpvBAR82Isr/pnAJQlxXylutGLsPgO9jZRXhxCCg7TefDvTZ0uno297HghvReXjU/Vtx8EqM44uIJPw8Wpwkw3SwpgXTB4YEAjnoGHbngveaYY2GKxGCFX8t0Ad9wbgorB9opmGCgS3l1QCeN77h9ABabA2/JwH9JDWIrr8BlYVJ6/VqsFa4GXI809dBKZqYTO82Ce8hrdeXAvJvWK8tbGr+meTx4A+Ni6f87h95ZuGka6CVMAnsoscjWdTrW7u6v19fUwmuSaEuoEEGJIHSxxf2kOVaVSCblGdnhPqVQKow6LORgM1O12g43yHpZ+31wfeW+1Wpm9w7Ntbm4GoHSHzJ1D9itGnL3kHr1HLJB/AAtAgz3uBtodMHe0fO1SfeNGxAGIO2S8xnXSIhnucRFYKCnL9qcOsusBfz0vyoETPRgMdHp6qtFopEajEREq5MkdFNYE/XFzcxNOPmuGcw2LJc3ZLYoHuTZ7AVIB56lQKIR+JU0AYOs6vFarZYpHyJVmb8FiOrvqMsJ7kVFff+bZAQsAnGKoNEQsKRi98XgcrKuDkhRI8vk8W8rr7HvXFel6pr+7LOSFrP/YI8+2OMhOiZY829NqtfTq1atHoWyilR5aZ30dDJH+sbGxkdFjOMaSgr3kZ7/3VE9L2S4qRDcB0YXCPD0Qxt/1JDiDHGaPjiEr7Dd0LUCa6wCyXU55ftd/Hl32CKFfm3vi9X6/HwWOfG8qX75OzuKmINXlgP/9GryP+/mUYqkPAtR0I32IMcsz/u418hnfkM6MwhIBSGmBgCGU5vlGMDcIPwLU6XSiyAN2Eu8Vjxe2a3d3V/1+X7VaLQCl53zwfXgzs9kslJakzDOxuA5g8b65HkrXk/5Z6MFgEELuyt97XFL8lSo5Fya/Lmvgf/fPM49pCNWBaAryfEOkf+PZU0bgc41UqadG46kcMVdOrmAx3rPZLLxlKQuKMBTkGXm4H+dHmjOe9/f32traCsdqNBqpXC6H7OBxu1Lodrva3NwMZpbr4Y3DEMA4ULXJvXgOH7JIyNJDQa7EGLzPgSRJ93y/p6YAWBuNRgCLZrMZ17i8vHwkLz7nqf7xdXU5z3MofF1T4OvvT1nvRRhuDPL+lsee+b5En2KUb29v1el0dHh4qKOjo2Bb0G8OSBmkLZF/6U4H7yMnk3+8lxA8oBDdB3Nze3sbBYSwTpubm7G2XmWMzmQ4Q4nsItueSuWEAlXeAG7kkusxzw4eJAWg4J6kecoNz4RuX19fjzoIL8RxJtVBMeuWynUe8ZASC66TPpWJ+mOMPPCZ7jv/3Z0CXmu1Wup2uyFPFH2SplSpVKLQEx0OeAVobW5uhq6Wsn1LcT7QgX4PrvddFovFYtQU8DeioWk4X5o7DN65xbHFbDbL7BtIiFTn3tzcBAbwCEDKAHse7Gw2y6QJAFR5j+/DyWSi58+fB/6B/EOHOPuft4Y8q+OK9L2pvKdz/jFS4JNC/HmePK+n78v7rD9IXhjg/v4+cjHd62EhmTw+7xV8d3d30ZqCU3A8BDUej+P0D0mRe4Ei2djYCOPvict8B58rFh/yAPGYaZGCUt7Y2ND6+noIWqpwUGwwDQARrs2clMvl8GoI/ZLb5y0qngKPecAsfa//Lc+xyFt7/z3v+9LvWZSBkfP7S8EN/6dGOmUxaFy+tbUVYWo+QwQgbf7saSCSMuuHcuHgia2trTCEAEvYfwZN15FTGls7cwo4xFBzD4Dm6XSqTqejUqkUDJU/K4rScwgxxOyxQmF+wAbGmudx44/segEZSpOoRl5LOEmZ4g9XgOwnviOV6TRiwPtcqaey+ynK8o818pxJf47UQXV5dvad9bm4uNDvfvc7HRwcZBqIS9m8SJwomPhGo6HZbJbJn3Y2CBB5dXUlKdvajGuSXoDT0+12NRwOdXh4GHIL8+MgEAMPawWQnU4fUrRI7cKp8kIWZ2dwGPk8suhznDr+pVJJ6+vrsZ/SnGlyDgGmz549i3lstVqPchp9fVKd4nopZRwZDhL8WlxvUXSu23YnQVLbkMot60Z/U05Sms0eTu7yNfOwP/8DVEulkra3tzMAS8raNGQgJWlcDqR5caqfjlcoFKLgENnkMx5l5fOz2ZwYQ2f5/3n2F9l3PEOk6iniide4J77DMQh20OXPiRbaekHiIY+MPOfC15KRrr9/fzpSHJg3PgpQXcDSzeA36X97KrzmAsO/UqkUdDnhKIRSeqDqea+Dxel0Gsf0UXXpHhWMEJ4VCfSdTidyCWldBZPA4nCSBX3vACGEbbe2tlQqleKYPe9diVLMM5rMA8YcAYCZ5X0I5uHhoc7OziIZ+/LyMsK5eHQuCA668kAYc+8bw+8tL7SUrtlTTGrq5X/u4ffkuZTp/bpB92dIQbjPFYBqOp03XMYQeyh8NptFM3EK8fg7rDyFScwbbI9XOHOPXpnM5+7v7zUcDjOt1ygwmE6nUd3sFak3Nzeq1+shzyg/og8pAGQQwuJePTrhOa0uH0RAYLn29va0vr4eynE6ncZeY91SUMlIGW7/OXXC8hzqpxjJFOR97pEaeOlxxaw/i+fU+vxNp1Odnp7qxYsXqlarAQadPU0dgna7reFwGLmg3rIMkDCdPlQl48gPh8NMfihrSe4p18KZcgCB0wKY9mfw065cHtkfpLVUq9XYfw5Gfc9ivJlf5jIFJswHzhsntrnBJsIHMCZCcXp6qtvb25gPX0tnPVNg4vqFe0jBagryUvCxCHr3KQCaAvT0b3ymXC6r2Wzqb/7mb7S1taX379/H+2FH0a1cCz2a9sn1/YEeAmP4vXr9B8P3HmuNQw8Dy99wuNPvQl/7WgJ4ifhyH3kRS0+18oJEae4IouMdoDtI9vl2W8dcgFUokmL/vn//PvZ2nvPkc5TKnWO9D0W40us9NT4KUPNCE3meoSsDFihlAvKYCxYARqrZbKrZbGpjYyPyl5jISqWidrudSbDH8Ds4BUxOpw8FS+vr6yqVShEm5xhITnWC2sajR8ESauW0E3Jf8G5Q2IPBIF7zxYEB8nnyMKkvHnletFJhPg8ODrS3t6f7+3tdX1+r2+3q/PxcnU4nwKqH6FxgUoXmSi41yq7oUoWZrncK2vx5FilUKj0+rcSVvZTfczIFPr7hMUiccEaLJUJQMKS3t7dxbjQGlLxmlOT5+XlUtSPryBw5ggxSTYrFh+Mq+/1+gDtpXgVLyBNF5OFRgCSgeXt7W7VaLQMymSPy/lAiaY6bK0mex4/vdcYJYEsR4f39fRyRurW1pZOTkygI9NQL1ykp+PL1yjMwKZhljvJGngx87pGCan/G1FAzZy7X0sNpPL1eT/V6PRgegGYKZAF7Kysr2t7ezhzFC5DkXsbjsbrdbpyoBlijUBBnBJlhvwAq0Vcw/JyMxv14cQgDXYstcCCJseU70WWEZp19wqADMFIn1YEF835zcxMtD7E1pVIp5tUBUKlU0k8//aRerxc6OZXFPFua/p/KgX8m7/OLNPyeUhbTnX5/HVt9cHCgcrms6+tr9ft9bW5uZk5cRHbcYU8jAr5nPL+a1/jfC/R8P6WAE8KKyCyRT4gmT+tw8OnD964/P4SDp6RQDMZe5d5IW3TW2ec0BYRStisAeMQLtXie4+NjVavVKJg6PT2NgmC+I8UWqRzmOfspzuFZPiX/VPoIQE29Om5EyrYk8pGyVX4tXyCu5QB0ZWUlTrqZzR5aTzlgRPgAATwo1+F7YJNub29Vq9UyTZZRlJubm6EsPURKDigKOmVj8MYJvw6Hw3hGD3uhsFzJp2yeNDf0sGCcfOG5iGy0nZ0djcdjnZ6e6u3bt2q32zo/P88UjqXClOe9+PqloDb9TOr55rHgPkeLwkKlz5syFilwSVlUf5/LbrlcDgYdlh6Z8Lxnad4IHK/Z+zh2Oh1tbW1F2L1cLkdEAHCKgkI26WgxHo/VarUyLA7/uyPkFaUwpfyNHsCw9YBfSRkDngIhAIArGN7DfsDhBOgWCvNm1fxcKDxECHZ3d9VsNvXu3TtdXFyo0+lklGKe3sjTK9wHrz31Hq7rY1FkluFOpP+eZ9j95/S5cJqQJ/L3pIe0qvRUGwDWcDh8FEmBtWSuNjY2NBqNMteS5q30+P6bmxu9e/cucvbILwQwO5MjzduSufObAodKpRIHA1BIiqyybwCnvMcL+PxZKHhyg873IKtcF93OHge47+7uamNjQ1tbW9ra2lK1WtU333yjXq+X2+/XZdrDxLwP4kbSI3vxlE1eBKDqz/CU05TuV2wMDmypVIqUkb29vQyp4N9DhMmdGpzvvAIed8JZV3R3ahukOdFye3urdrsd+hV9mTqL0px84jq+jsg43we2kLIsrjPAfl1+R3e6bvbIhu8lf3aiWO6k+r3MZjNtb29HtKBer+vnn3/W1dVVhPz9ek/p4/TeuK/UpvocfWh8EKC6cKQXSxWk/z1Fza5Y0w22srKSab/kjJx/HhaT9xGywTB6ON8XxxlCWE0S8gGm3W43vBaqrhEgjCVKHqOPsKV5Uqlg8Dx+ykMKJGEwXFk781oszgtmxuNx9C9rt9t69+6dTk5O1O/3IxfXhd3vJWWfptP50Z1+33nPkG7k9JopmFmkkTpSeRssT0ml81MoPITW6X1Lj0n+0SwfkLmxsRFVz5KC+SflxAEExnQ0GsW1JanT6ajX60WXgMnkoZUPxR7IXbFYjM4QbmzL5XLsGxw3l2EcI1e6RCaQd/famS/+Z24ccADMHayzh9nbxWJR9XpdxWJRe3t7ajQaev/+vb7//vvIU3RmIpWtVPY+5L1zL6nu8f23CAaekbeH0vt3+ZUe54KhRzjGkPdQIIKxm80e8qcJod7d3cVpfugq1t/1GPcynU7VbDb19u3bYJokZVJUcMwuLy/V7/fjmF7CpZIyAJV8VQAg9+6t3NDR2ABYem8z5MCEOfR/fi0cTq6L4Uc20L1eyILs8BzkplYqlbApg8EgbJLLMJ91W8e9pBGIPHKB9/rcfe7hYNvnNwWiUn7BDMTPzz//rNevX6tYnHffYf1p8Ydu8FZ8/v0eDYIccJlI05EYrDV75ejoKHKdvQUa+8MBrTPlKYHmubOQSaTDoBuROe7DI7K8huOf2mGPWLgu9C4wgM80fYsaAqIZX375pRqNRkTkzs/PYx0YaZTRZZXf80L4bk9TxyNvfJRBzUO7Tyn+1PPPW/yU8keBDQYDdTodDYdDDYfDCAf6ZJOrCYgsl8vRbJ881lKpFO1MUkDobCYMEo3+uSeEK2VtETAWqtlsRoiH/n3OoAEU+W7YNuYPYUZoON/XN03qgbExqtVqFE+Vyw/tgU5OToKxSHPLfL7TdcsLIabgM/18nmfkry/SSOXWQYm/J8+BQhak+bPBZrKZveUMigBHyA2NrzlKtlQqhfJDbmh+DzDkPGdO+ri/v9e7d+9ij3AdngmG1UNDrKv3tcSpA+TC1hcK86T8crmsdrsd4Jdr8RwuI85OSNkCKy8o4zsIJ8MIbG9vB6vRarV0enoax3C6Yc+TyzxFmK5/6nxgQJ3pWRTnKtWzLpvpHk3nxfekF1z0+329ffs2mvVjNNPiDkAYHSVIV3E9SOcJ3oNzvbGxEWxjofBQM1CtVvXy5UtdX1/r9PQ0o2/Is/O0GUmPwCVOnzR3Gr1wz9fXnS/Ao/SY6YLZZw/wXnS0gx2+B4DuLC8DWWKvHB0daXd3V6enp/r555/DSX0qVcWJnFQO85wTt6OL4mCl942OkPToeX0fO7Di7/V6PWNjIIwoDPWC0HQ+uC5dRRjD4TDknkgq9+j6mUFUrNfrhYx6Kz5sMveP3CNbzrhL2eJTImnkLHueNro4nRvYYde16FLshT+/rwsOnM+97ynXo7VaTZubm3FYxurqqt6+fRtHdvt6+rrlrXFKKnhqwafI7X/oqNM8w8BNPAVM0ht3wOBgDk+cqnoYnFRIZ7NZhhmikhKlI83ZA/8uhAchodDo6uoqct9cqZHf4ovJ8yJYvV5PpVIpFLMreowCeavM0/39feQFesjVBQRl6Ke4pNenoAXhm81m6nQ66nQ6AbhdCFzJ5THjeexTKoQoT3cuWMePycHnGnleWjon/nrq2TuDNx6P1Wg01Gg0Mr3pUo/UvwPDhOEDJNKeiVxrTkCDUfX5z2PQW61W/I00AdbH81AxqCh2Dx/d399HGI1/s9ksQqh0FvDrAFrYh5IyCppiGeZQUuSJ+fx4lwKMOnPa7XZjr+AQOrhlpD+nMpu31r4PeI48UPA5R2pwXSbT392guvx6Tn61WlWz2dTe3p4kZUL7/BuNRlFc59/rupcCUpwqL+RYX19Xt9sNuSMEz+cuLi6iGwCGD90PiMRZR5e4cU6NKdEA9LGDHXQzbdyI0DmohIny/UBU4vb2NjrKsB4MgICzzIAST+X57W9/G/UP4/FYJycnGTbQDXWqh1I7mUcIuZykwOpzjadIEOkxGGHPpaTW5eWlnj17pkJhHpHiFD26N1DwLClOFEO3OVlGpwfykLHprtMJ87PG7qj1+321Wq2oDUD3o59Zez7j+kyaF35BaABcIcBweNxJXllZ0XA4jDA8XYewCx6h8/sFsDMvbptT0Op7i1xX6nScNDg4OMh0ijk9PX0E5sFwrLHLgevap3T1x+T2owxqHmDJA61pSNknJc9b5H+UGACQiXePHaBIriXhIjwUZzgxOn7v5FD4tTwsgqBKWe+az0DHw0xVKpXI10DR+ZxJiqa3ftSeJ0Jzrw4smAfmxwWXa1OEAus2mUwyOZH//u//HukOTwGxPKPsQsf8eTEC88SzpErRHZZFGQ6Y0w2aAgAHQS7r/j7WjmI7PGEa36+urmo0GmWYHb4LBYUxR85xjLxIxFl/ZB5WFubfQ5Ce2+ngFvly5sgVA7LE3/w5J5OJGo2GqtVqGGNXSsyNz5c7hXl7wlmLQqGQCe+iHOnjenR0pMlkopOTk0whWN735znOqf5JozHp5/Icmc81UqXucvwUW+FywHvc+LpsYDTRbZeXl+p0OpIeGzj0YrVaDSMNcQBQw7FPGXv0Z7vdjgMlcLwxktwPRTDIMevH9yNbDjJcf3kBCAbdHUcP1yPj6FB/7fLyMnr58lnm2gvFAOeAY6IQnCBYr9fV6XRUKDwwXJzw5v05ff1SW5nKQyoHvO5z/bmHy6aTIK4HUx3if7+7u9P79+/1l3/5l6Ezp9NpVJfDaEJMYY/92d3+I8voY9aK93Afvrc8vYmDIcrlhwMkKIwGsDrxhI7xPeC4Al2MY8V9sA/v7u4iv5UIGjaAQkT0sjtDkFTgjdlsFik0bk/YM15DQKSZ6BX35Xv4+Pg4eljTG9nnjnnjexwTpDjDbQ/z/rH0lI826k9/zlOO6QZKkbQbBh7Cjcx0OtXe3p52dnbi+DAAJ0AL7wIw2mw2w2vgGjwsVHSxWIyKfYSVfCyAKK2k+Bw/8wwAZ98QHCjgyo3rYShardYjoUURI2gItyvLPNDoc4kHBStWKpWi7cXr169VKDzkrpyfn6vdbgfYzmM3nwJqeWudvidloxZt+L3lsWNu/D3PzgG9K1VpPvfX19c6Pz/X9va29vb2AgygvCiwYy/4nLkT5mvDa1I2sf3q6iqMGrmvKDkKX0h4dxCCQpIU7yOslIaR0vWeTCbBclG45+yWNAcMrpCcifX59e9h/xMiJrUFwEGOJEq4UqlEQWBa/MD35zExPpcM9/QdGLj+WoSR6tUP6d48/eA6ifUB2PHaeDyOLhNbW1tqNpvqdrsR9eFzfpKdFwZhfAHB6D7X3aSbwAoBGpAngKDvN4Cryy//Y4ABujD6nIPucsc+8NxAnyeXJZ+rnZ2dsDV5TrqHjN3hc6BEigwOHo7gv/7rvwYjx3Dd7PfjOsPvO5UFnIBFAKhSVg59+BzmvWc6fTjVaDAYxLox0NHFYlFbW1sajUZqt9uqVCoajUYZXYQs8jnkgygTLHrqxDKf9Lkm9W8wGGhzc1O7u7uq1+tBOCEXDpJ9v/Gcri+x1w640d8p2PXOKJIyNorXvSiLaNx4PI70Kdh+0gJ4H1G3brer/f39cFTdCYQwWFtb069//evIRR0MBnFPeWuap4dcBhwz+fufGp8EUN3rcWDiN+mvp0qUn1MmwBE7OZwsIvlNDAQ0DWeibL1nJN9BGH9nZyeS1QGyXqlKQQZhAO4fD8OrVDHak8m81RT3x/ciIP8fde/15FiWnfd+cGmQ8Ehbts00ydGQwSdKoWDoRRF60H+tB70oFCKHRtSY7pme7jJp4RNIA3MfcH8L39l1sqoUvMHCPREVmYUEDvbZe+21vvUtsxFgV54O4Bm/LyYGAIFxwURhoRgJH0kKwH56ehpgiRDHU8ymK8A8RZj3N1+zNGyTAtoveTnw9PlNc8t8bhzQ+nMgMzAmnsuJQ0AHiGJxXcnO+d9+D/7xXcitlAVPy+Uywv707EWe6NpAyItCLTeckoKRwmvnJw4fBt7H53PmbJED0ZS1dMcTJ84VI+9H2XFVq9U4cIBiAJiocrms169fR5SC/QfISXWNj8nn12Xevfg8QLttjhZrwZUqd5drf7+vDfI6n88zvWaRLViZ4XAY9ycc7kDTUzQAojgZrDFpA2nkoFwu6+rqKvR5tVrN5Fi7HLoRZd3Rr7yfYi7ABvLCvaQN8EkZHZ872Fo/HtKZVi9GcabK5x3A7bUD7B3GQiN/QFVezcNTNjMlfFwP82ypXf3SVwr48qJS6YUuJMUCW40OdT2EDMPo+Vp4lMn1O50l0lZVrJkz8t4KbzQaRX1Jo9HIdNjh/shuStKljovLBjY8rSXxeWL/4CDCtIKV8gqeVqtVFNN6egz3lrKnkPE83Aec4vtpuVyq2Wzq22+/1Xg8Vq/Xi8MoWJNUJ7sc8HseW+3jeer6rBzUPKHzzZX33qcG6xeKSFKwksvlMvLyqHIGEFBcghctbXqRpcwRoVYWeDAY6KeffgpBg+Im36NWq0XCP2wTrVR8U5AwfXJyEidHIVAO2lBc/vzQ/Cyue35OwbswuRAypxgCChNc2aL8O52OBoOBJAUYz1sHN2yuCFIAmjonHjL1e31K4P6tLncY8lgH3yD+jClg8Q3o9+52u2q1WrGWqRGBWUIm+VwqJ5xERqUvYIBcIJTYarVp6u/5eHjRhMc9bI9s7e3thVFE1iRlFJdHCgCSPl/OJLvD5HPIfkYeiVrwHoAxn93f349cXM9JlBT7kV6eo9EoE6ZzAJc6HNzfjQWXy8S2gVK/nOFzfes6QMovlmLdAPawpUdHR6EnG41GMKBe2U+o1Qs60UGp48I4yEdFN6Lj0FO9Xi9YeWc7U+YJMChlw5KunwDIMETOXDJ+3sscup7FcUL2kREHCMgotoA9RqTL9XHqKKROBXvt4OBA3W5XNzc34Vz62uY5Xi6r/ro7Yn6PbbhSICLlp6W4zvWLXFJpc0iEyzoXYezhcKjr6+toUeURG9IBAHd5Rcpc6NtarRYHLiyXS9XrdXU6HdVqtcAeqX3mQi4Yr5NT/j3gnN3d3ZAtB73cwz+HrfDUgpREASy7w586Cg74Janf70eP39VqE6Vm38EYt9ttPX/+XIeHhxHJcxvrutT3Y94auwx8Sm4/66jTFHSkwpUnfOmmSj0d/3+9Xg9v2D0HKvTxaFw5enU1XgCsI68BPAuFQhQI0EB5MBhoMBgEO8XC0cMR6p7n9DDsYrE+E31nZyf6tvI+V+IsdLownuCcKh0UpOe3piwAQJbQDmkJ5XJZrVZLL1++1NXVlQaDQQD9lEVNvfF0XZ4Cmr62/to2Gnqf3xTMSNkN5MAmvVh/aa106vV6pliIz7vjwO/pPpD0gcJFZnC6OEaRda3Varq8vIxzqQGdXH7cKF4vCobvpfCJcQEikXmYXG/s7Aw0jqTPoTM4PBPMFyFlih557jznwJleQDfgpNls6vDwMHTB9fV1RpZTcJKuf8qYO5Db5uspxf0xpzHVq6vVOk2k1Wqp2+2GbLgBhVHi3g5UcQgoGgKMcjGHDw8P6vf7odu8Kf9oNIoCrFarpVqtpkqlEkyVF5b4HsPZ4Z6wqZ637DKZMjIQE8g7e4K/e2qB6weAgveJBUBT0Yw+QG+m+h97tLOzE2CBPbZcrkP9zDOfy2N6XUbTNXd9kqYhfKnLwYqzv+6Upgyhv3dnZ0cnJyfRuYRcUIqRHbwB/ACPkDBgBc+dhiwidzW1bYwPvcvaV6tVHR4eqtvtflAQKn3ooKQYR1LIPpEl0vxwXpB7CAJn432u/HAA/36fe5dVOhulY/W5RzdS4Z/2AOa+yPjx8bH+8i//UsvlUj/99FOQgKkj7b+n85Lihk9dH026YnCp8KcT4w/D311h5FUsMlFe+Yiycm/HK9pSA0f4yhUrORiwreRa4A1UKpUIIUwmk8jpY6xeSOAelysmL3KRlFlwqrH5ex775hvEc3K5P4o1LRwDJKNE/XOwBLVaTScnJ+p2u+p0Oup2uxHK8/nP83z4jqe8W1/j9DXem4KEL3U52Ey9NebVX8sD5T5X7hD5/6WNoed1DxmhNJ0F8jlEoVKdj1IuFDa98+7u7jQcDsPhcKWFM+VK0cM2yI576ISMaNcGIHFWyT+DLHJxD54FBeYFgc7iMa9p2InxObgmPOoVrLVaTcfHx+p2u7E/ncXLC736ujPPvMbapuHwbbnydKq/njI3yHEaNeB9HHZSKGwK6Who7/uAFApnQFnjXq+n8XgsKcvCk9eMjmFcd3d3ur291Xg81mq1UrPZVK1WU61WC9CWNul3dsdTRZA3HDlkYDweazweBzHhcpE6LOwT75Pt+8XnmZSIcnlzbLA7O55v66DJgX9qnGu1mk5PT3VycqLT09N4BsbquiglLlwWUvvBXnuKUPi3vlJw7Xs+HSvv4/+3t7cBIl2vlMtljUYjDYfDOLUxjfilbCRrTfQVgOq6ISW5isV1AexoNFKhsD7lrl6vq9FoRGQqnWfunUbG0vf43KDjPCLm+5N9hI4ql8sZAO5OKfoPHLVarTK6PXVanTirVqtxTLyk+JzPC58jVfLs7EwvX77M4CHG406Er3u6/lypLcy7PivE/zFww890UdiceUAhVQgOOAGod3d3ur6+1uHhYWzKlHnywhSAAYn0ftIJ4QCKLm5ubjQej6O63xWJK1mUGiF/vo8whOfHsTmoWvVeeQhJmgfiG5awGr0tYT/82dl8AAH6DzYajQxILZfXvVEpfCDfL3UUfE3TtUg9dX8Of1/6mW250vACisSNwVMgnc+l3iCOCkn33NPl0cMf9Xo9o9TciHnzcN+onrAOUz+bzeJYW76zWq1m2ElXnHjSzvg764C8vXv3Tnt7e3r+/HnIOnLEuAEQ6T5GJr2332QyCcYAhZ2GTh24F4vrPKjBYBAFU86q+HPOZjO1Wq3Iu3ZF/RQwTQ1D+rdtAqV+5bFkec+WEgWpXgVMkcrk/aO53Mng2FucYjeU6BH/HCB1f38/ui8Q6if/Hydjf39f1Wo1Ck343jxg5YdP8Bxpnikn/HAQgTtl2BCXXeTZnz2dc8D8crmM4j3m2U+cckDi4/KIm+97/t5qtXR2dqabmxv9/PPPsdfTcQB83fin6+06DLv1pS8HNA6e8gB4+hqygNPR6XTiXoTe3WlyBym1WYVCIdo7uv7hUB3spH9usVhX5lMEBFnAYSKwnshUum5EbB1P5DHb3vUH4Hd7e6u9vb2M8+FrSv62kwypnnZmt1QqhfxKG6LBL8bvOayMnflJowTL5VKtVkvHx8eq1WrhLHhkIk+3puvD5ev/1PVJgOoP/pSSTJVpnoefh5xB+0wWC9xsNqNxrbObPlFeDe/5QdLmmEffJLTnobKd74Xah71JwRhetOfmLRaL8Pbw5EulUoSvyD8EsCIAMenGrvK3xWIRFZ70ppzNZur3+9Hmged2z+rVq1choO4lVatVHRwcqN1uazqdBtPgnrnPD8/l9/B7OpDKczq27Uplzo2dX+7xpc/vXmyhsGklg3whR4AAin4eHx/DoLnXC1AsFDbMosu/V0VzP8KsGHwKTag29ZA8F+PkXt54H88X5ouqZeQbZQ0oQGaQB2TXK8IXi0W03mIPwBQw58j47e2tJpOJptNpht0lJOWy7fPebDZVr9d1cHAQRuSpcD33SJ3C1BndRsdK+jBnLwWivh/z5Jq/LxaLyOFlfQqFgvr9fsawe4N8B//OzFPZ7Penmt7lvlAohNyS4lEur9v0EI71SBVjd2fIQbQ36uf9sJq1Wk3tdlv7+/tPAnR08N3dXciks/DudBO58+JBPpM6ulyEOVutViatwO0HcgzgaTQaarVawYp5wZPr53Q9P0UefOnLgYk75Xnyy+XOAzppMpno+vpap6enGRvp+fnIJ/d0cOf6GzDLHmBM/t2+X3q9niRF5X69Xo+xOpOe6h72itvWNDogKYDvwcGBisVi1IxIigJAHHmeA3vOGNM97+AyJdpcZlxG/Pf0b2l++97eXtxrf39fR0dHOj09Vb/fDwbYnSnume7vVE9/DknwWQyqTwYPl4eEHUWnjEYa+uP/xWIxmJP5fK5msxkKiSPyPPnYc0hS44wAY+jwHDDy9Dbz3mP1ej08JFcUCIYvOkUbhMZgB2BLATkImH/eBdYBPEqs1WqpXq9H+BUmDmPuvTeZu263+0GRCaBiPp9H7h69OnnuPJDmY0rXLzWUKSBKDf42XMyrsxHS0wxU+vx+uQLjH/1BcWAwqhhq0k+QeWeKOGsasOu51Vzcg7ALfSpput5oNOLzkjKFT3zenwsGwZvot1qtYCr8uVFuhJ5ms5lqtVoAWAAv8wqg9EIaB/wAbJ6Jimb2BYz/fD4P483nUX6cAlOr1cLJ5PlYI18vN4z+njwFum3ym8prSgR4aJrLx+/GCSaR6nxJUYSKMfbwpOej+n73vp84Ss7Qu84YjUYajUZhiPf29lSv16PjhDvoLmsYZfZsGoJF5zKWtJ8qY/A1Tm0CcpM35x5BcB3hebc4WcgrDsBPP/0UBTX0zHR9LynkvFqtqtVq6erqKpzSFGAwpjznKY1M8tqXvpzJ+5hsStl0FeQaOzWdTvWb3/xGg8Egwsk46JIicuh7IgWlyI/bZD/gxPOx0eFU7h8eHurw8DDqVhzwgVPciXGSwx0fJzictHLbQXSBRvncj70BIPe55Lk9b5X/s1dSMMu4fHzMDe/ns47bWBewFHv5+PhYb968CSyUkpIpQ+5jTx2sj12fbDOVB1Ad+aYDQAh98/gmTQdF9R05bP5w3MMbTsM8eVjfj/FisvzMaIRqsVjo5uZGo9FIt7e3GQHCM6KqWsq2V3FDzP29kIpFhoXwkBMKF6DsvdTckKIYUXzL5TJyt05PT6MhPyyAA2uAgaQAPZPJRN1uV4vFIrwg1sDXkvHzf2cVUjCXekOpp7Ytl3vYT8mxlG0Lwue4fL4cTK1W65y8brcbBVG8x2XV7+XvoUjElSfzyudgtcbjcTD1pdL6eFU/cg8ZduUGi+//l/QBSwqodBn0tUcBcs74dDrV5eWlGo1GMAAervVxuOGB7eJ35om8XZxQZxY8YnBwcKBWq6W7uzu1Wi3d3NxkcrLyvHee3V9P/58azm2R4XQ/+R7MAy+p7DogotiSNCVJmTxM1tgbg/t3A9p87ugK4aCS9+Jgk3pEkQoAERlw0kLaROqQIXfqUr3K7zhw6GHXt1I2P5b78JxuxElvSY02n5E2+tHHWShsjiK+vb3Vn/70J/3+979Xp9PR8fFxOJJ0AvCaAgquAF0+vynIzksDyCOIvvTFuqU2Pu+1VKZTHfz8+XOdnJyEQwBIQlaRYbAAsgLg9wb5/j3YzjSlabFY6O3bt2o0Gjo8PFS73Q484gCOz7jO43nyWEK+n7nBsaeuBqxDFJfnQLcxH75XXCYdODsGSuUXIAsZgqwznouLCz0+Pqrb7WYKKvkeadNPe2dnR8fHx+p0OpkTDX29/Xt9jT1iwNg/dn3yJKnUy+Fyjy/PGKcMRZr7sVqt8/lqtVoUieCt8r2A0PF4HAwAAM6FjuO4JEU+Jpsa5sYVaZqPmQoq3+FgIfX0/DxgXwhAo7MKhUIhjghst9uhAAEJPpeAoFarFc/rYS8ECkFzkO2CQnNsjD5n60rK9HrN85jyPBtf37wQv//chss9WJdDN0K85u/Lexbmt1QqBTijN560OUwChwbZJuE8VcDeJ9SZQt5H+ghsvcsKXjc5Sx4m5/+eVsJae3heyioRWHled9CAkua4QTx9mv2jgOlf7MrSlZYDAO+J6nlSjJn74Lju7++r2+1KWhdNvn//PhjYdI19L6Xz/rGUgFS/fckLJyF9jlROJT35OhdMnp8FL0mj0SicAtj51WrDPLvRT3PjIAb29/cjT548aRxrgKn3bEyBHwbLAYAbWuQX2UaW2bvsN5xCjD7y7/1IeXb0t5TtPewkRJ6z4gwl42R/nJycqN1uR4ENJMrFxYV2d3czJ/tUq1W9fv06oiJEF/zZn3KY3RHLk/dtuJ6SUdY9BSZ5DmOxWIz0JXQrsgPBs7u7GymB3De1Y5AJyAPFb6RnOXbp9/va2dmJ8DWpKX7fNB0gdSj8vf4sfBYbQaqVp0hx9HMK4PKKrXkm0leok+H7+IzLPoDcI9LSpp1Vu93WxcWF/vmf/1nValXHx8c6OzvLzDPP3Ww29fLlS/3xj3/U+/fvM2SIywEX8+SEoc/Lx67P7oOahh6eulIvifenibT8dI+B9+KB45EvFpvq+Bj4/7u4FI+4BwKgo+iK8ZLnSd6fe2QoOcaAAPtPFni1WmU6AOzt7cWRf9VqNaMAfUMCKrzPJPOLgU/zObxllQMHP3kKhsHnnp6EVHoDbBxEuTJMPSDWJ31fqhxTr25bLn8ufjLGlJHw9/v//ae0OQ2E+fTP+fPf3t5mzr0HaPnaOoPI3JOLidGH4aQ9lANKAFxq1N0RdMcMgOpRAIAHwIS/+/O65w9jKimzbwuFTcNzd3KkfC/f9QLjReG6InSFu1qtoqK/0Wjo+vo6E65N75vKqOsO3uOf2abLcxhTQOcynWcEUodsuVxG4ZqzMs6e7+/vR/W0M+IArXQP0CMa/Xx7exsNvBm7Hxfqzj8MIs/pJIc79bxGNADH5vHxUfV6PY6/ZYw8F3uKiBH38pxXCqsA2X7lAWa3C7Tccvnm++r1ul6/fq3Hx0cNh0N9//33GgwG+qu/+it1Op3Yg/V6PbqsvHv3Lu7tP9M1TW0vcuDj+9KX7++UrEidRF5LwSxy6tgAvUNfU8An38m+oOjZ5TcFj8vlutBnMpmEXgM0wgpip/nuPNnkdwCzpJDTvP3pDHy1Wg0MwIl6k8kk8veXy2U4lDDtHpXlPtzXCQn0PiQJp1V5eiRj9LUoFArRXeK3v/2t/v7v/14vXrzQL37xCz179ixyrCVFTQBFq8PhMAP2P4YfUnn4FCnwWQwqX5ZukjxhZCAOSNN7MZlseO41GAwiH5VQpOefeu4TINPPP3ZhZYEIRfV6Pb158yYAL6CT4ilJEQ5zRhVGAJbXqwcp1sD7gbXEs0aYve0DxV8AHcYDOHBD6vOcFlv5RsBQeP9NaZ2viDEnP4rcW/cG2bipsfa5TL1cKev9pB7jl76cMZf0gSy6EkmBgIcA08/wXuYxBQ53d3caj8dRvIHc+Pd7UQoG33N5KNTgpJQ88OGesDOlrkw9ZcRZBRip6XSq29vbTF4qz+qA2kEFv/NdXOm6e+oHz5UqLFeU/CM6QX4kfRGlTSECe81DbnlA05m4VGflKcttutygpoZE+lCfunz6XvXiJRwCL3BDf/Ad6CNnPaTN3Hsa1HK5rrp+9+5dnNHdaDRCF8H0e6EQzsjHHOPUlngl9v39fbSqwpCPx+MAxTw3jk3KdLHXGAv51IBpZ7m413K5zETAnHml9oDnoyCLPtmcnMXcET1otVqRsoXOkfJTTVLd7HvJ5+lLXx+Tx3R87py4XkjBJPcFcGKD/W/MubRxytIoH3LPeoE7sJVHR0eZPr3c2x0riqUZf6lUCplDFtwh8jX1yASRBZy76XQakWRknfu6g+8FfEQKcHjcuZcU5Nl8vj5xi2jJx+SkVCrp2bNnAT45KInnXSwW4cCCWegJ7qQXV54su/76HKzwWTmo3DRlQF0A8rz3NKTmXpG0nmTOkV8sFrq6utL9/X1MjgsjoSrAGMCRBWdC5vN1mydyk/zveBIOUP15EBrGjTeeKk6+fzQaaTwe6/DwMJMT6saT05weHx+jIAXhXi43x5qm880YnI31dfDEaO9owOYgFLuzs6Ozs7Ng5nq9Xi7zkie4vqYomzwvOQ+8bsOVyqeUn5+TKnt3ElzJOvhnvfn/w8ODrq+vNZvNdHZ2FmuTMnWFwprtvru708HBQbyOTNG77/b2NuPAeBgeZ8eT5/0EEEAuPVVhngqFNYtPO5XJZKK3b9/q5cuX4TihlPPCMTwT88JFBb7njfpc8oyAeubEoyOwzjBl7XZbzWYzmDiv5qbVFzrAr085Sula+7Nsi+zm5X/5M6UMUx7gZs4pthyPx7FGyCNhc8LzFGR4CFvaFKcis4RKb29vdX5+rvF4rPl8Hh0hCoV1mx9PQ/K80xQ8+P5yUICseXcBQvmFQiH6YsKidrvdjE7ifuhQ2l4Bbrkf92RPYxvYY87AktMK88o8oXs97P/ixYvMqT6AKfYz4N3X0OXZdU/qmKS6dxuu1Ja4LX3KzvCMksJ5Jj2KlDbkFCCH3sVBwcY/PDzECV2+PqRRSOu9NRgMImVpsVh3zTk6OooWj961weUXeXGHwR0u5MQd5zSv1Lv8IAcPDw/Re5UCqmq1GvclssbcpW3YAJLIKs4h73E74gddOLni9RCNRkP//t//+wwWySPPGo2Gms1mBsOkpFAeJvy/kdlPMqj8zGMjpKzXxEK4MPokpO/f3d1VrVaLDX9ychIeOO+/v7+PFgtMPgvCItM/FAUI0HSFyOd9jNLmODRe97wlckOgzPk8CpvP+QLwnSh0jjXj/Xg9lUolvCHCmyyoLyIgnU4B/J33+0ksePrQ7sViMdqatNtt9Xo9NRqNYDt8Hvz3VPnlsai+zm5Mt0VhslaeF+lXqtzTceeBFQyZy8JqtYo0E7zz6+vrCM1wb+YQhpDvcJABsworz/ukbL4cBhvwiWe8Wq0y7Ky/n73DCSmj0ShylF1+PVXElTTRBgcwyB9eOgyDF50UCoX4DPMAUMAJfXx8jLxe1oUwGM8FWHj16lUw0zhdeXqG8afrnq6n64NtAahcqXymOjX9e7qXCSkfHh6qVCppNBrFyXrMO4Vz5JrB2rtuWi6XUTmNHsOpkDbFWIBCQGCxWIzoVsqoewTDjZ/vW4wmhhrwV61WY+0Zm6dtwZB5fja5hx4Z8JZ9AAUcotVq9UGbt+l0Gnmly+UyQvUcg+mV3aQYuA4CpEPKdDodtVotDYfDzLo62ZC39i4bKaHxJa8UUPtY/RlSsOqygT0nhQ47yXoWi8VYez8EJM3FXCzWx5unEQMcYdjKbrerdrsdkVhSCfb29jLRK2mT4gGLiLy7LHv6GLqQOeAnefXo2slkEpEsOrzQocijxYBnxxjgDPQgc1Ov1yMaXSgUMocg+Zo4geDpW+hbno014l+z2dSzZ8/09u3bTNEaVx7e899Tkuip66MANS+nwF93YOKGgI2YCmG6wVBCVKqj1Lj3bDaLSWPhoea9eT7GHWGkCApB42QpqvdTIO39U8nVRFECFnwuWHRC9zwnRrPf76tWq2mxWETIdzgcRlsflKgrF8/BQ6kVi8VQyChhFDnvd1arUChEUUKlUtFoNAoGlzmDQcPw5Cm8z/HS85yXbTLwrrSfGmOaA+fP6YwV67KzsxPMNGCVNZYUyhED6gDeQSDyhbJA1nltNBrp4eEhiit4njS5nfFTsMQF2+Nebao82WuEPKfTafyfsXCSCp9jvxYKm1ZByCQKyguncOY819ZzqmHZHBSzDuw3WglJyhx+cXR0pJ9++ikXWDoQesoouiykLNWXvlLmSfqwW0YeOE2fo1BYV5nX6/UMm4mOnEwmGRaf/Ljb21u9ffs2jLYfg+uyPJ+v+0u6I0PY0/U7wJExwri48+SkBLK6Wq3COffUg1KpFCFRWFS+l/6i5Jd6ugF6F3lzpo99Sjs/Z5GR206no93d3ThJkAp9aXN0KmvlUQ/2Imvj9zs6OtL5+XmmlVXqcPlYn7Kr26B73Z7kkViMM03rYE1Zb/Y9TtO7d+8iSkkqFM/tsgE7Ss/vg4ODSL+7vb2N9mewqoeHhzo5OYlIrh86Im3qXBzfsLa+Hs4+8jqMfEpe8f6Dg4MguPb29jQajdTr9XRwcKCTk5MgEiRlom2pw4MsFYvFaDkJ7vH0A5cXn2NfEx+f7+n0UBn29f7+vprNZsYuuZPlOonvTomEfxVA9S9lYdLXU0PgQpe+z19jYvEkXDi9EhSvACH0Yig8jH6/HyFKAOFyudTh4WFUVvZ6PV1dXYW3ARvgG4MFprgKJoHnRqm68ZUUrKXnhBAGG4/Hurm5iZYreEHcz+eGzeQhLoAAXjvsE//3zc288nur1QqFhve5s7MTJ7+wVqkC4UrZZl/LPAW6LQbeL1dmqSPFxfz5pkmVKI4LMoLMMv/SxmPGKFLsBJiFFfV8Y+QbRpTmxx56JFwEK+vhHA+DOoPE2FCyAEdJmcK5+Xyu9+/fR9UsRwbf39/r3bt3+uabbzIsmN/bgaM3fmdOnDnL86Q9UsH7ncVNWSEYOf7Rqgu9kXd9jHlK5XgbjLyUZRfcKfY592dypi39G/2Vd3Z29ObNm9ABrls87Wm1WkWKhefcYexg4WEU5/O5qtVqOPFEoNJ2Zk4CpMae31lHlxvGh+HlHhRqsKfI+cTosv88PM+9nfXy+QbIIJfIl9uoZrMZJAl7AYDg8szze1QMXYR9gXzIM9yMy/cUr6WyAGj40lee/k+JKenDGhaXb0mh39BZgEgcW0/983W9v7//4DAPbN1qtdJwOAyAe3h4qJcvX8Y65elz16/MtXfoQb+mTCn6Vtoc/uD1BlxgFhyj+/v7KOCDxKDNHsxpOt8A1dVqFbmoHNDDuFPHHPvjThPr4gQBe9fZfLDJarUKIJymNaQymxKb/I25+tj1WQDVhSClvZlgBwE+sJRl5XPej8xzJqD1HcRhhLyHHbk80+k0wgB4TYRbYVhHo5HevHmj0WiUydnEEDqT5blHvA4gZqFQCMvlUoPBQM1mU6PRKAwBYIPTfp49exY5YA6kfRzL5TKAswtWyqSkwgFg4T0pPc892u22ZrOZer1eJhTl65sa8tQg+pqmQpdugm24PsWeOQBKn8s/j3wXi8VgZTyPmPATzpV/x/39fcgo42CdWStv7o9ThMEnVE5xBo2+2ROMk0gC70dWU0XkShcF7gwWp1Qx/jT3mygD8wKrVqvVotsAqSjcA6CCcpM2oWMUXB6jJW3AlzOApASRoM+eTPXTU8xTnqOyTfKbKu90T/papjLMz0JhkwfKerbb7YioVCqVCHsT/uPeAEzmknA1UajpdKrhcKjxeBxzhgFmj/jaIi/IAXLqnTCcNQckeA63O1sYbnTczc2NBoOBRqNRtH9jzDwbMub/+G4HeA5w3VFwJ5E8Ph+f2wVnmvw19jqAl9QruhKw3ilrl7fm/rfUkfvSV+pYcaW2wgGRtOkQAXlCioXrZpjq1WpT+FQoFIKxZM79WF7SrsjxPzo60tnZWZBdyDxFSK6r3UmSNsexp3uRMabgG7vhhJoTSxzJSr0KDD7dBByA+niwD763yuWyms1mgGDG6QQbF+9Jc8V9/aSNfDE/DuD39/czhev+XI4J/V7uaH2O7H4UoKYGO71xuhj+mZS5SA1HsViM4zgRlNVqFRPm4NDDnJLiVBm+31MDvKEv97y4uMi0QOGz/J2z1bm4D0qZ8WOkGf/d3Z36/X4UdABIHGjSr3GxWKjdbsemTStbpbUHPxwOVa/Xo7G1K0GYA76f5xgOh8FcoER9XTBW9Xpd7XZbtVpNvV4v5jh9bypULmx5grytSlLKVvOnGyUF5CnTwXv5vdFo6OXLl2q1WqEYkTdpU3GJh8/awQZ4VSSGinArVcLugReL62rOs7OzkHGMG0xumsfs1aO8nqa+oFCYn4eHhyjKour4/v5ejUYjnEYApoe9XA8QJpvNZpmj8dhnPLezv5PJRHd3d9Fdgnn2NUhBGEw2hVKwc6lj4WN0/SRlk/dTZ3sbrvRZnvo7hgM5Sx2xSqWiw8PD0EXNZjPTcoZ7SBuHwZkS+k8TPiSqBXuKXiPyUywWI5Lkp0axB1lPB2J8Fwwpe8j1bB5Q91xXnmU6nerq6iq+g3QcjDHPns6py4uHN52hZB/7Z5h7yIiUMXLgzXnq7GPABQz31dVVxs75ejpQ9vE8JR9f6vL1TfeylN13eXhBUtgz5svtnAOe5XKpXq8XOiAFruhQjgonpaVer+vs7CxC5rD+gF8HfIzfHX7+7syiy6ez83RacabX78sYuRd9hNHFPBvf6xdzwxzymuMa9qfratYG5tdbuPmec+COPXPZBUR/++23+sMf/hCtptJoVrrefjnOeOr6ZBW/CxKvcXMm2g3+pwYBcIT15HdH5rwfxUNIiclFoWDwU+ND4j/CztGIvtFhqTxPBQXp3++esINFFn44HEaxibQp/PKq12Kx+EFOSgpqEJ5GoxGg2E+ySHNWHdySb0v439ua8J3cg7wyilRSpZGuoQuXv8ffu43GPs3jSzeKrzPvSZWptHk28t5OTk4iVcOLmPiMGw9kAOZE2hThFYvFyImmcp81QwFWKuvTeNywufJl/9ze3kZ4yJ0fWPmUbQGgYlyl9R6bTCbxWTeiVD2na8z9UXa8/+rqSkdHR5koCHPu69FutzMGDbl39sqVsL8uKZjUNF0ldUjSNfb3OjjZFtn1K29POZDJi2hJG/ak1WplGGqiSjhIzo6jOwmPHhwcaLlcRrGnpEx+P844bCiODaFZX39ew9HxlA7P3+M9KcvtTpfbCuSTFJqHhwddXV1ljCy9qUkRS9l8B4HO1PFd6VwzPgANedteoOPgmy4XpB2g16vVapA0Tzn4qd5ymWWdHSR9ycv1jOvR1Ib4T8YPDqBBPnOLLLKWrMvu7q46nY6m02noIPY3DglOOznyq9Uqclmd9GFsANXUMfIcZRwVJ3PcljiGcV3qDmCe012tVoMQwAHs9/uRF+42H/l3Rw4dzEUkK30mMAMRDtInIeo8TaVQ2ESXSffxfVcqrTtR0G2FDgqsa0p0pfgitcF512cB1FSZp0YgNfoplZ8yGExoasAKhULmnF1XYChPLhc+qplXq/U50GwQFtlPICHvByCHsSexGsHxRGjYMsbowrVYLKIpfqVS0dnZWfRCbTabGc+LXBWUGM/PnHru08PDgwaDgQ4ODjLHr0oKJcf8kBdWqVTUbDYz4/NNIa2byANQuZ5iGP15WdfUI/N8nG0z8E+xpPzOe7gclLoSwcilPQ8x1CgjLp8P1knKAjVkAkOPzLoiduZxOp1m2jAh9+4Ve+oA9yTnCoXiIV9aC3GP2WwWyvTw8DBjaDw1BnnyvDdkmnuPx+M4jpW9m/bhY3zFYjGTVkDOK/PggBh586pbX9M06pOucxrheUoWvuTlOjMPbPv+k7Ismn+WQglvbQPrzolHFEFR7ImMMJePj4+ZYgsMKOFSrw3ggnVJ2/9JyuT08ze+J89Bdt3oANcvDnAALD88PERUCSKiXC5H+o2DE/abh0W5B91j2PMejWEuisVihsVNAXihUIiuE6Ql8HfyqBuNRoYwYE1Tti29HDRt0+VymUYyUpvB34vFdXTo5cuXajQams1mAeo9JY2uClJ2rqktoaMP80JLvevr65hrl3F3Np5yUp1VLJfLYd8d+LlTwn09dO5pVo6R/Lshy8AxzvbX6/WMruf5ubfrNaIGjM27BPFZ5o3CM95HHjfryFxJG1DqpGC5XNaf/dmf6Q9/+EMceyrls+WpLf6c65M5qGlCeQpafWJSdi0FqwgNibxUCXNREMIELpebJvcoC8bjrM3Dw4PG43FM2Gw203A4jLAqChEBbbfbYeQId5VKpQhbcV/3Alw4EEg854uLiwi5jsdjff3118E+wFagnGC0AMcIgi9qsbg5icWFz8fgm9wVGILp1c8uaI1GI/JQ/XOpwf6Yd8N6OnD317fhShkJl0GeO5VZrtQJgR2SNvlhFDZ5Lih/BxSy9rT78HUG1AJs7+/vI/+I76NoCbmkGpsxuoeO0vaWaDyHyw25WeTpodil7PG47DvkBsaVE1iYDxQbModBTlMHfO48moFxAUTzPfzdewbyvfSZPD8//8AZY60ZW8ryuyz4/nbAtw1X3njz/v8UOwXop12XF/Ysl0u12+3QFeQRTyaTOH56PB5Hu5o0ZQRWC+Ya58hlkrZ/fJ8zkgBMB64pmOHvvjYOSHCIMJYHBwehW2klhMN1cHAQ84B8e/0BY0Kn8/vd3Z1arVa8xjhcr3JQAHLvoWOey9lqno09h8xSO8E8OoDLA6dPGf4veeWNKQX1/ru0kXNycjlhy6OWAFS6pniKigMt7usnSz4+Pqrf72s+X/fpxVGgXRRr7alz3JfIAkATrOGyAHvr+9OdqTw7iUMEk4vOBqBKm4OAeD66UsCo8h6vfaB+wPWA7y+vHeA5vebFZZZnZH+5zAPkp9OpGo1G2BLG6WubR1a6vPyrGFS/HIQ65c1recKXNwBYERhHwJi35cHA393dxfs9ZML3Scr0NEMpTafTOEKMe5KDeXBwkGlvA81N2yp6kkmbljl+YVw5cs2VMOwA1DyK39sK8XycxesL7qwUQkz+Bx6UFxcw/7DAq9UqWg7d3t5mPB6YqW63q6urq8h9cXDl65xn2Pm/Cyvr7H/fhsuZtKecKpSMGwP/DM9F7i9OhzPgrBcbk3ZNGMY0B8lTNVBkhNb5ThhE1t5TNvhelCwMg6ej+HN51bQrp1KplAHBxWIxumdwTvjDw0Mw8nwm7WuZMpzuYVer1QAEDjLd05cUbXuYawcmGCNAuzNqnPfujpz0YRu81Gv3dfbv2hbZdRnl//7T35N+jgtj4Sze1dVVptE5z41MNBqNYKH8CEfuDbOIXgL0tlqtTCjRw6bO3nvonvv5/VMnkvVHrjz9Bbl2kEray2KxiIMEPBTpl4cr+U5kcLFYRE9eTwfg7+hAUqYKhULYGsgInuHu7i7m3B17dEu73dbJyYnevn0bPTCZN9/PTvb4Wm+b3Er6YKz+91TnShsSjKJigJizf/6TdXe9gkM8Ho9DDgqFdUR2MBiEjLCmfI6IgqQgvtwWkLYibZw7fqakEPdAzztL6naDz6VsOXZmuVwXnXKYisvd3t6ehsNh5pS25XKdGujEh8+/54GnpAX/d30sKSPHDlgZv+9DOg04wOVKnWp+dzvwseuzGvWnit0HkgdgnI3wvwHiUGDkhuDFMzE8AErC3+MPubu7G2fYSoqiC8aBQqO1UqPRiJwfwAV5L51OR5KCGcPrByQAGCeTiXq9XuSNpEp+NptpMBhoZ2dHrVYrBMFzO2g1VKvVMkY1nUsYLpgoOgD4xmYevR8cYTk8SxTs/v5+9PFzJeBr6wLkcuBGMn3v5wjal7xSAJJ6cykYdyMpKeaXdAtXkjgwKDaYkuVyGa1vfI15/+PjY6SVECJkPyDHpKJ4fqeHfVI2FTlz4w+QLZfLwcYiE96Wh7QRmDEqaHHmUsDu4Mj1gMsNoaPf//73cVqLj5/ehN4SRdqET/lez6lFtyDbtVotw8g5cPMxu9FMX3fHe1uudKyuT12fYCylLNDCcfHUEy+g43XyNjFGyODd3V1Et/z7IQ/4HGuJs8v4nPVEN3kEjctz2jwFxscJU4X+Z+2ISLFHvH/1/f29ZrNZ6D7IEIpP3TnH0HpEiYtIHN/hRYIA9nK5HPnUyCDjJFwKu0s+Lk4mBWUOoF1fpbopZaPy9t2Xvtweuh5NsYATHYVCQbVaLeZG2hRwAkDdcfEIITbQe6TzfnLrYbv9NDXW0v+xnnwnugY21SM62NmUnEOXcA8YUgerrlN9P6S1BpPJJFMUzr3u7u50enoa3w0uA0RKiugx0S4AsKcduoPOa0QeUrxFVDBNuyDVh761eU7IUzb4U1GrTzKoqSeUGnF/zTeTL5rfi5+1Wi0YTRQkFxWjno+XPjThGmjt6XSqwWCgQqGger2u4XCodrut+XyuXq+nWq0WAA3WhXw8lJikqEJ11gug66eV+Jm4bBxeA2SQBO8hUhQ/rBjC6Wwoc4THVKlUorrPQSqbh3lKq6cBItIG7MImeP+yVOnlKUcfoysGV+gOXL/05aGlPMCdpzT99ZRZq9VqarfbOjg4CObSHQQUz+PjYygEWvlQbezgrFgshlzDWnHEXafTiUgAzBZznecJM07AMODOnx3QuVgsNBqNtLu7G+woTuBisQgAQhEYPVk5nQ1jzpgBuF4AiOzwvqOjowi7Is8oYS8ScZ3hPS9dpvg/gNv1BEqUK88B8bVO5fxTyvLf6kqB6VPK3Z/X388ep+MDOgo9iePv34UcM5/Hx8chSx5ulRSFKR6OxmFA1vxEHulDZslBtpTN80fGkR9nO1OD7Ewx3TJIUbm7u4vI3MHBQQB2xoBDBrBOAd9qtQ6RUrjjsgjgQYYZC+Oaz9cnbhH54tm90M+Btoek03VJbWseGN0GvZvKLHLhNSbSh62xiIbQexeHguf305skZfL6kVEiNe5UrVarcPZ5HzYQe4w+goDCcWHc7mw5/vD1QK+7g/OUA+/A3etR0Gk8H0Aa+3B7e6vVat0l6Pnz56pU1gfxYMulDWZgv6KbOdwE/OGOnjPBkBccksA92ftgMZdhxv3s2TP99NNPGdl2OXUc6brsU47VJwGqL4Qvhi+AK1AfiA/WB7RYbM4d91wQJhJvnvA7C+PNjx8fH3V+fh7nL9/e3qrdbmdAYLFYVL1eDwUDmIV5YTE9h8WfE68JYeZUlouLCxUKBb169UqSouCA0Fir1QpvjA2THsUHIGg0GpI2TIIrQJQvAImUh1TBuVJgIy0Wi8jdRbHSmocNQON5B07puqfGMn0tHfO2ePJp+CSPdQBQf4ypkDaV9xg9TicD4EuK3CFpsx88X8p7SWLIpI1hxvP0fE3YfcbFOqIg8GYBsjyLKz9JmUiFH1SB8mF83nEAlr9QWLcTWS6XIUswT/Ru9HxUBxCu0HZ3dzUcDmM8vO7pKswdc4r378qQZ6JCtdPp6Pr6OtNHMgV1fMZD3szTUwb/S188i+fjPsUM+0/+zvy4AUpTg1hHPwwCgORGFzmGBACcwSiyZuhn2FUPmSO/gD4iAuwHntNBq4fXXbd4TijPy0+eHUDDuDlBiCIanC6iU4B5mDgKzGCU6D0McGY+YfIANe58OSvqetufDR0CSYPT5TKcrm2ec7Etl+tNDw/7/nYswPPs7++r1WrF68htpVLRzc1N5hhS1pH3kTLHhY4iiuVzCrtOhJF/ODhe7Ma6cvGaO1isK/KNHQAvuCPv6Sg+Hpdlukx4OhT1NX68MD3XeVZkkmJpl2Xug46HgEhtOvrd07YYC44Y4/c1rFTW7ewAtS6zKTD1K90Tedcnq/jzBIoN6B5F+h6udFDF4jqBvtVqaXd3N6qjfUG8dQwnK1B5ivc0m83UbDZVqVSi1VNK8xcKBTUajQgDAtAIWyJsjItNkXp3LAK9wJ49e6bHx0d1u91Mq6HVaqVerxebaDqdRmNn6HPCUHjL/X5f3W43s7AAZD5DKIn3uFfqYUln2WDDSKj2+a1Wq6rVahll6uvN9+QZ+lRZSh/m9G3LlafMeYbUI/a8IgdEDs6RV1gdZAzHg3XHAGMo5/N5xtvlewCLnvCPd4tDQvsaH6sXCeCQkRyPofOcOfI3YQgAAt6ihb1RqVQ0mUyiADDNPeJou8VioWazGc3aAUTu6LkBLZfXJ6yNx+NMg+xUf/Aayt0v5oCrXq9rMpnEs3MfdzyeckLymNZtMfQp8EgdK64UnPg+xFicn5/rxYsXIdMYYwcOAClPq/B7Ei5Hd7x79y6jp3B4PFzPnnGgwe9umHgvMuWOjYMEZ5/QWynzmaa/uENCVTTH+lar1ThSmFQFDHihUAinDyYKPcnvbhPcCXBA76cGMhb2EiCfse7v70fOdnql9jTvebfByXIQ6j+5fOwuq+gf8k9x1vmMs94QBegx5t+dY3e8hsOh5vN5hOZJOwI4etqGA07XNWnaBzbfo1ROhHi6CkQBMp7OF+N20Ir+40Lfz2YzjcfjSNXihK0UELJHmFuAMc/qhdSOMzwXV8o6fMg1TtR8Po9UycPDQ71///4Dhyx1VvyeT8lIen0UoDp4cbYh9ep53Y27L0rKqkiKavparRYFSi4IpVIpjDpC68wjxr5QKOiPf/xj5PFxvjTfzUIAfCkMca/FaXm+J02qZkOUy2V1u91YLFhQGAmUIcrn4eEhclyZT+67s7MTBV2cUIQnjsCyGZgLjlFlU/kmcYH33FPfPORRApZ9TfJC9CkwdQHLey3dzNty+ZxKH26MPGWPnHW7XXW73UwvOGcx0/nzdh207/G8Hd7vvU8xaChY1hgZc0YUZQsY9nxCmE/Gz7PDWPIaazUejzPMEA6kO3yAGRgMZ3mXy6X6/b6Oj48zeYOe1iJtWGiUdOq9O0j08C8Gw4E/3+uVt+5g5DnJvt4OJrbx8nH5c/gcucPlcu06and3V69fvw5mZ7VahZ5NU1RYf+aK+/E5r+bnHrwHgAXwwzHzFmepEaWYE4fKmW3GCQvqxAh6nehWKhueQiAp9ge9JRkr4/AOK+688R2TySTyVpfLpYbDYYaR4j6e4gBI8XG4vEMceF4rET3em7fGKVmQAr1tuJAlX2tn431/w3CT4kAXBC/4BFxRyOnEGODKHS6IGGnd6g5ih/A+rDeOeV4hdKFQyEQA3IFKoxmeAsBnXQb9b67Xn4o2Mk/u6DNPkFr39/e6vb2Ntk7ILz2PYY6LxWJEv3wPQZqsVquIBDo497H4GMvlss7Pz9VqtUJ+PdfdQX1qS/Mc7M/BCx8FqM665YWV8ybXf7II/nqxWIy2BF5Jng4U1mcymcSC+0JfXV3p+PhYb9++DUaWBs2egEyx0mq1ijCBtDlWjc3jbM1TLKHnegJkyfNgAVButLbwHC7mxDcnLbGOjo4i/UBS5OzR9uXo6CiOGWw2mxkGje9kwefzecbr97wR3g9r/TGW5inmhudI1yzvfV/qSkE0v6fghSsvFMF6EhJMFZqzKP4Zz/kBaHnxlLRxhij0QL55D4ZeUoRlcGDSMaCUCGWlBt0dTfemd3Z2Anjwvfz09ljkQPF5BwKz2SyK7px98NNQHJh6bhny6syCO1OMBT2EoQBcef8+B9N5Oip1kFkD1n6bZJc1czDiRIAbAo8AMX6iSp1OR0dHRxkGCZ0obfZImucpZdmdtNAnT0eMRqNwSGBpPAIznU7DoSA3leI91784Qdyf9eZ31hynj+8ghYp5qdfrwUayf2jXNpvNQjZXq1UYW6JNDkKq1ap6vZ4Gg0HkSGIH6BSzXC4DYKZ2AL2BXibdhbnGIXQ2D3nPi2B9ilD4klceSeEESaqPpU2BmueCopvcIUJ2vGjO8QnfxTg8xY33Swomkns5kPZ7ucNElEDKNuKvVCqBA5BjB+jcn/nA4crb0+5osxfBLNK6Lsd7StOWEBDKHmK8yNz19XXUu3DRncVTFvgc84deJXrrQBl7QISYHOIUpKakgL/uOOhj1ycZVPeU3ej54qVevoMBfw2lApOJ4sETgmWhfySNxF3g8IgODg50c3Oj5XIZIXfainAfvr/VaoUHBqDAQ/IQARPnhRi+2LwPEMmz8V2eg9Hv9yP/YzKZaDAYRFoDi40Sv7+/12AwUKfTyYAQjoF1MENVKfdxz9TTG5hPDzGxVu12O45nvby8/ACcp0xNuoYp2POf23KlQNTX2F9/6v8+ByhRL5KAsYH99O/xpH6XG+7PxnclhazQqxd2x50JlDLy50qUy524FJzy/ew3WEjG4bmDVNZXq9WQK3o+kkwvrcEQeeCueNkLt7e3UTjmoVEHU1wYdEATCpfcJ18DTjD56aef4uhe38M8q89JnuPpa7AtF/qBy5/JQ4L8LZVZ2J7j4+N4nRD7bDYL3eE57ehdZKpYLMY6+x5HdpF7BxeVSkXj8Ti6PyCz6DhJwey6HDtogXH1VADGJmWr/d0GSRvniddgaAG5FP157iw1AnyfAxe+l3oGKsz5DmfwXY79mdhTEDKsITLM8dawh/RU9Tlxds7X3x3abbnc4XTckAJuaTNHpVIp6kd6vV4AIWxkKuvOjvuzO9iaTCaSlAF12GL0M31Gi8ViRHFZR49M8gxOkvG9HplzLAEO8VPuwCWMydNjfP7S7yX9ygkIJ6Nggz2yxzin06nG43HUurAv3FFkv/laOeuN4wauoH0itT3ff/+93r59+4Eedb0LGHX88Dly+1ltpviyNHyWegB5oNTvQygjTQxngd14QkPD8KC4mMzFYqHr62s1m82Md8xEeB80LwhBqHyBAcEYVqe5eTYUoyskGAXP3aAi+fT0NMJB8/lc/X5fy+VSR0dH4SVdXl5Gj7br62uNRiPVarXom1culzUajWKzcYQq+Y/pQqNQKRJbLpdRtUgHA77bFaQfd5YawHRd88KjrkC3CaS6skw3RDrOPIYC5YVDg5NDSoYrFxgQB/seCoe14btg+2B3Hh8fM+sOQGCsjM0PWHDmD4+d8bqSgW3nHg6c0yNYyYUjtxVg4cwC7CUy6LmL7A+fc4C3pJBHFCSOHeNBmcH0ediVsQOIut2uyuVy7I/UAHJ5/p/Lra8767QNl+tQfuY5UnnPwlyVy2UdHx8HmyltCh5ms5kajUYmzxcA9vj4GGlHi8VC7XY75s7JCl7zVAz0CmCYsLpXBXuOK4bU9Q4/0wKrdG08akFnE29xA/hkD/GTPFPshbRxXr16mv+73DnpAYvFvoZB4vlh7Rzw+7phJ66urvQv//Iv6vf7wcZy5UWoUkcqBTRf8nqKGeT/qaPuAGhvb0+tVit0CU4Nn00LdySFvsXms1asbUqA8Q+2HL2Tpl5JG6IBNhd77/UC/A5JxedSsOqgEhnhdU9N8toSx1h8TlKmFSBzs1gsYg9QFA7xUK/X42AN5Fna2Kvb21vV6/VMbjr7zyNlyJ5HBW5vb/Xjjz/q/fv3evfuXdQjsL4uF6mcfO71WY36UwCSKksm0oXP3+ODZZKm06nOzs6igChPOeFpe0NyDC+0Pyd30EAa0MWiEsopFApx1J17yZ5rgsEFeLhyTJkW33x+PGuptG7x0ul0Ise03+9Hw10UGUdX7u/vq9lsxnPANtEuhQ10e3urXq8nadMRwL05PDQ2Jfk2MNVe7LK3t6ejoyMdHh7q97//fYZlS9nQp4xkut7cY5uYKPfauNyo5xl8X1deKxTWleztdjvSN/K8aQAksghYpMIcw12pVMIx8T6UKBautAWaV5R6dANnzlkwckh9jTykwzWbzYLt5XUPZxWLxUyBzP39fbAC5FeTx+SsGLKAAYc1TQ+o8PQd5pp0AkKvrAnGiDG8efMmCrk4nCJPBh0gpA6Xy8Q2yS4GwY1Yugf9GVK2GGcjfU4cVz/e1MEuDhCRJt8L0+k0Ijz+XYT3q9VqsI3o6IeHh3D6Xc6QE2TY9YyDOwACr5NLyElX7XY74/SwB7jv3t6ebm9vc3NWKZiSNmeXI5MYfO7paVTMGe+bTqeRruDjcIaTeSBkO51OdX5+rp9++kk///xzpEA4aPd/jMVrGNxx2AbZfQoXpGRVCqZ3dtYnwwEC/cJ2LRaLOOkMkEVDfmfNWUcnoVICjaLVdrud6ZmakhlOQDDvRF15Hz89+sMzuc69u7tTv9+Po1aJkjJHaR4s3+n6X9o03QegUrRE32r6phM9YVykqICHYI0pxKWNIN+RstbsPxwASTGP4/FYh4eHGo/H0dEoBfsOcn3uPnV9MgfVUXXKlnKlg2EgqZfPwtRqNT179ixCd/5ZBu+Kitw//o6CAoDN5/NoZgu7AtCEcXXFBGuIYHneFKCQRukIKguFJ8JYyEMhHIZRoGpZWlfSkn6wWCx0cXGhvb09HR4eRniDTeKnaFF0gLcHte7rwrz6s7mS9GT95XIZ4PfZs2f6l3/5l/AOnZFIgShrnDLn/l4HONtwufyl4+Xy5/VwI5/DyLdarWCS7u/vdXh4GLnDJycnYZDdy+Xe5JxRmCEpAJ+voYcPpc28k5PHe1l/5MRBtLTJkUJG3KCxxp4f12g0MikJi8UiWC+v+mevImdEM+7u7nR+fq6rq6vI96JIhuevVqsaDocBPomikIOeGjUAt+ekwV4j0+Rf7e/v65e//KV+85vfRB9kLgBNqmxT3eU6axsul13fW3lKPZVndAhpVJ7aUyqVVK/XNRgMdHV1FUeAwjBJG7kjtx5j7Hogza0kdI7suU7vdDqZ1AwPeaOPPLrFensVMnI7GAwy9QvkxhUKm4gEz8ueIY0B3c33Ie90ssDwUk3ubB0MFN1PkCl3AL3BPOvgEQ1PXfPcxK+//lrPnz/X999/r1//+tfq9/uZXF+XBy5fq22RWSm/2wC/p8ypyzgFz54r7eFq5L/ZbAbLTFEbf3OCxdPy/GIOOV3q6OgobDxOGRGy1WoVTorfm3vkOfsO5vKYcI5adQfPow6uq1lbZAHnxlMRYFKJdGHjIe/a7XbI2sHBwQcH/UgbB8AZUs+PdrznnQ8ODw9VqVTUbrf1q1/9Sj/88EMcYOQ6FbnI01Ofo3M/yaByAzf0eTd18OqTzN/c4MMuIlgIFRveGdXd3d04cYdN3uv1tFgs1O12A/SS6+bFE7SXQgE5O+X0OP8H3ELtY/BcWUoKAXEQAbNDGx3Gz+kYkoJ9wzDf399Hrh8eHEwq3uDd3Z1Go1E094dV9hZSXtXvTAqbhz6H9Xo9wNL333+v29tbtVqtEPJ0bRFYD1FgNFPPHsHbpitl8p9SklJWCfE64J3+fLAgksIL5fJQJTJHeBzWCofr6uoqI0vz+Tza3hwdHQWT4LnOyCcMLc8EUOU7AaA4SqyhlO0PiLOE7M5ms2AvUHYobwAhChTj7i1bvEsAeU97e3shzxgUFCyKLE+XMO+ABlKDnMViXf7jf/yPevfund68eZPL3jh4Tw0l79lmB0v68NhedGsawWC99/b29Pr169wwImCJdYet5nVn/rxgCWPJKTO85jK1WCx0eXkZTC0Oi4NZSaG/AJZczmQ7c8szlMvlTKoMcu2nDwJe0r2NQWfM5KICFmDlOV6SgqjVahVOlRfq+Pg8auBOFGAKZwzHC31NCgZt3TgUIzXmT0V9eK+nHnzJK7UReRey7PqSloc4GbVaLaKEecCc9zlQd7zhrKp/H++jO8NwONTh4WGAuuvra+3u7sYBP45fUrDrbKszpehpnD72SUrGkWoDVnCHyHNSfa5gOff29tTv97VarTQajWKs/X4/jtZ1R69UKgVWSPWHyxYOFTKVOsb39/fBPDMnlcq6D7fXFPE9zA/fkTLBjleeuj5ZJJXewA17CgA+tZkKhYKOj4/jBKlyuZxpRZJOmDOov/vd73R4eBjA8Re/+EWAvNvb2zjHGM8aAAEd7orDQwJ8J20ZSCBGic1mszhrmga/eOgYf6/Oo1cZjG8aqqcJ9HK5jDAvaQE+17AOCJjnNQE+YFiZMzwynhVmmWptWnmMx2Mtl8s4SWgymWg2m4WiTwFoupbujPjf0s98ySsdex477O/Jk/Xlcl2d22g0ggn1+8EekUICC48S3Nvbi1AkLPXFxUWsG59fLpeRxM738BnAHeFtxs4GJ0zDKVSELr2nIkrO2TDApUcIUCCEIReLRRTv0aIHjxznDXaLllqEoHZ2dqIggVPf2CMwtqenp/FcDlhxzMbjsc7OzkJXAHSYa/oQj8fjYMZSxvEpOfDXnvrbl7xSZjdlZnzcPvZSqaTT01N1Op1gUqRstT86jL/R85M5w9GfTqeRF+2Mto/B6wk8ssR4vCLbgQvGzY0fOs5TWRwAAKhXq1W0tSqXy8HceC6dlHUoJQVLB1B3cDyfzzUcDjPpIkQ6fL/4viE8jEyS6rVcLiMvmpCu56dizJnPWq2m4+NjHR8fq9frRSqFA3p3slPHZVtSq1Lb4TLqQNGvQmHdq5ycxsViEeuYRrWwhR4BZU2kDV5Ahpw95+8+noeHh6j74PfT01NJm/680qabEY4Se87tADLlTg9Aj8tBLM43zG0K3H3vMmYcHRwtT+djb/HsvV4vQC1FVqyHF1IRWeDYdhjd9B86lRoasI8z22CPlNFOnWmXh09dn1XF71cqfH654U8VPoxUuVwOTxIFwIA9x8eZut3d3djoKGAA3Ww2088//xxn0wLs8KIQDMbnhosJJkS/t7cXCpnn57QRAIek6IcpbYCCgwkYSQSb5G+UmrRuG8F80TPTvRpXQqzDaDSKE6Dw5glbcTlDJilOh0Dh3dzcBED96quvNBwOdX19HUDKgUq6xj5/zsqkMrMNlxv1lOn9lIPFnFer1Qjt4yw4uy8pWHdnMj3cT1gFlv/u7k4nJye6v7/X+fl5OEY4GihOaVON7euQznlqrKrVauTcudMHs7ZarTIhWZQcoTZnlsrlsk5OTiJchMMzn8+jTcn79+8DpFLskAIgwA0GnVCXV5eyh9iTi8VCnU4nI9s4XtImNMURtN1uN9hl5iXVUy4P6esuH9twpdGqdIwuA74vd3d3dXR0FE61lD1GNAW0gEd0BvdFBjxv2pudFwqFTD4xsoPuxZDi6EvKGDb0FIa9Wq3G2uU5yOR14xRxAhV6mdQBZ9g9T5/3Pj4+ajKZaD5fNxpnngGyFxcXkdPXbDajQwWpENgX9pG0Kf71vY8z5+AU++UkCc9ar9d1cnKiP/zhD5Kyra5Yf7cLDra2zbHysbLXngKKpVJJzWYzdAUsJp1M0lA5/wqFgobDoVqtVuAJSUEeAeb8s/4T+0UdCNEeDgby9XJg7QWXPCf7i7+lANWBLPK5Wq1Ch7rTx7Mhr9yH19CjnibCca7MFzYfBr9er0cLTu/wgz71kL7rnHTcHtFzoIl9JA2RqAz3fEoX8x0fuz4KUH3S0g2RekTOUGFg0oVEoUmKoqflchmUPRsZJC9tPBlYQwCBtMkPQsj8/r7Ag8Eg01eShcAIE8IhlIuyxjuFPUVJe7sQ/iGcFAsQNqVY5eDgIAAgDAUbi+MDYTRhtVDc0PMYE4AveXnME0LMEbLtdjs+SwgY4Xz+/LlKpZJ+85vf6PT0VO/fv48WJ+maunLhOd3Q5xnSbbhSFi0N46aesJQFMcgSCkzanOohZav0udgbvK9cLqvf76tcLuvi4kInJyc6ODjQ+/fv1ev1dHt7q9PT05Bxlw0Ho4zHx8W+gvWBwUHWPFzr4Z50vZBbmEtnw8itQ26azaaGw2HkgFcqlQj7crlhmM1mcQLcwcGBOp1OePaEjHEKWR9kjD6TzD1jThkp8qtgvrywwJ0OX3f+lqawbMvlY8wz8lxetFEorA/iqNVqAdiRQe6V7ldJEaZzFtZPmYHtge1HDpln9AyMZrPZlJQ9M50xACJ9b6LLJEXfaweYfI8bbHdEkOter6dmsxlg0g393d1dxpAT0ZA2gAlmzk9Qu7+/19nZWdQkYIe4B/dHFhkv5IETIf43XwPA6cXFxQd5gk8xUNKHqR9f+nIZTcfDertcoz9Ys/F4rNlsFsVET9kY9BWpU9gsHChpU+CZRlN8HtEJRCUlBUOOc8EegMEHC3Bv7DEOF1EBZ0U9fUb6sPOCt/cDc3B/t7vIjh+ww2mE3oLQUxxov1kqlSJCUCwWg6Tg99lspqOjowxRwDid9R0Oh8FwY6dI3XIyhOdzOXDH83PxwidzUPOEzQXRv8DBq7OQ/A3PkbxMPPZSqZRRkG5UPA+FqmGMl7f3aLVasYCeCyEpWnrwz5s9ExJ1j81ZRGljEAHCToMjuG48CNkjuKVSSa1WS4+PjxoOh5n+a4vFuvebKzU+7zkhXnhDDheggudxMMLcAjZ43t3d3cxcDYdD/fjjj2o0GhoMBpmcM640F8XZnTxGZxsuZJOx5xn4vI3CaxhhHBscKb8vYXiMkAMFNi7zzwELzWYzmCA8fRStV+s7+ETG/OJ5vGE1zhagks/xLO6FS4rPMj8oGuaBXFRvul+pVHR7e6vRaBRV9oyb9kR3d3dqNpva3d2N3GmPLJBLxRwRSfEQMRW7yCz7xYuqHFhIiqNU00pgdzpSZpL/f27I6d/ycmZFysorBgQgxt6HPfHT6dL8ai808Y4N6Z7xaM5gMPigYT9/Ix2AVAxkwnP/YV1LpVL87ufYw7rASlar1Xg/wMPZXdeNFLe6g0+x1HQ61XA4jH7PXuvgETvmEuZTWjtXdH7Z29sL51LagCTmiPu6AUbfAxqc/eN96Ol6va5utxvHR7quSfUX3+cAblv0b/p8eQSAjxlgU6lUdHV1pYuLiw8K95BJfrp8+r5FF9OhwgGWAy6Xq36/n+mCgvw1Go2MHoEwcqdJ2thibPpoNJIkHR8fh3yS1ufjZz4AsowPecAGOOHBOJB58mgp4iuXy9FPF8xD5IFUNJ8zP2FzPB7rzZs3evbsWThWRAt8DmGskXWeZ39/Xy9evNCf/vSnT8pI6px+7Pr/LMSfGnr/LA93enqqr776KsJPvvH4PKDLN7lvVHJG6vW6ZrOZbm9vtbe3l2Fa+Cxj4LscEOM9UMzhnhGX54syLgd/KInU+DEGP+Pa8zV2d3cj2X82m+nm5iZy/er1elSqEjKVNiCZuS4Wi5Hz5wCVcSOwbFpXFp73d3h4qLOzM00mE11eXmbA91Ngztc8z0vahoux+UbKG38egHWGBAY6Narkt2GgU4/dDTgFGC9fvozv8sb1pIMgU87g85oz675f8KhZc+SACAXPjgJMAQnMA4DC5wZZT+eNk0uIPGBQKIDq9Xq6u7uLAyHouUmKAIATFuzm5iZYB77bm7m7UUD2JQUDS3rPs2fPdHV1FfPgDpV79qnB5LVtkV0ul6E8HZP+JD9vPp/rX/7lX7S/v69vv/32g/CclG1TlhoKN57FYlHD4TBCh1K2SIX2aAcHBxG5ocjU2UPWH0MLc8M9HQQTgcIBJO2E9zIHfozlZDKJvQSw5HcMLu/l+9kn2AacsBSIz2azcCIZK+3ZOJCFfGiM/mq1yZO9ubkJlpC9jQzz/bQbRL+kTlNKGPCag9YvfeWRVnlkBu91J5ECu16vF2kVzprzPieQ2BvoNneCcKTRc562VigUojMO7/PCImlznDTjWCzW7S7JDcWBWCwWGo/HkjYpWQ8PD7q8vIyxdrvdYFhZJ54F28HfYETTnriMCR0KKKZ4lWgXtQD39/eq1+sBZklT9LQIyDDyxIvFonq9Xqb3sbSx/ThvzAksM+mNz549i5QtKVuU7DbSZeBTjtUn20z5oqYekW+i9G8pWINNgRVJwc3e3l4kDacPgDDivUrS+/fvg5mp1WoBLv273dvyvEDfGMvluhAm9Vb5HcCGR8GFsfe8o/T5eZ3QGMpvd3dXtVotOg8QpiMdIGW6UrDt34Hx8bAnXRJICUBAXJCZ52azqW+++SZYXN/4qcFOGUdXSP7/bbhSOfQNJ33Ymy0F8dKmhRhsoaQo8CEM4kxRaiRcWbVarYxx5m+ezI9Rp4DC+8nxdw/de2sfSWEAydFzT5z7rVabk6xQNCgJ5srlyY2f9x7GuUoT9pkzPsPc0SJFWjNT3rifTh3kgUlrNhTP39cH+QfEVqtVNZtNTafTyJdNm7A/BUJTZ3tbWKg8QMpefwpMcjmovLi40HK51Lfffhvy9vDwoPF4HMc/u8OCwyttogCkUdXr9ZA/BwqHh4dqNptx2AdzmabBwOC4kQMkohNhwnF0CIt6BTyOIWNh/43HYw0Gg2CMUvvjOhwZd5Baq9UCILpOQ49TNQ2oxhAfHx/Hezjul++gODUF33yn63ppcxwyoNxZvzwZ3hZdy+VjzLv8dRxOL0r+m7/5G719+1adTifuRyTA9dByuYzCUPqoI0+ed8zvzjZLipQ7fie1is9IinQQyAV0tbOcfJ7x0wbSWwNSaOo1BpIiLC8pOqgUCpuQPdiBZyEyhXx7qh/6F5DJvWDm0cmk8NXr9ZBPuvu4XLEnnbwAlO/v72eApkdRDg4OomWmp+MwT8hAmhP/seujANUV4+duDN7nCqJUKkU4kB6IqXeIB4sRZNPj/QLqyuVy5Dqh2PCwPUfKPRM3xoAKlDOhdB87C+P3cuPlbVVINXDAmOab8Fye68JGQJhHo1E0PGfO6vV6nKTF2BA+p9+9WAHmiZxD1svzoBgvYyDPlTwgzz1z0JV6VM7s8azb4MlLH2f6MVTuXDkoZx2r1aqurq60WCx0cnKiRqORcdrYFxhO1sWNG/LcbDYzY8Hz5b0ASRh7xgA7yvewhqPRSMvlMnP6CkrUwTgKBkYJ2UZ5Oij3HFeUHvuE5yZlgROn2F8ehnXlPp1Oo/UQzimV/RTWkNfN98FoMb/MRwoq+H+5XFan01Gr1VK9Xo+eqx7NQSa4X56zvU1XHuvkaUdPOYN7e3vqdrtqtVrq9XqRx0v+JPpsOBxGoVChUIhjldF7dEbBGalUNseY0gKIvH3G6YVBdA2RlAFckiLqI21C5a47/XUcLVgqByHk75M6Im0Ag6+5O2qE23lm5KndbgeLRqseuk8wJ+PxWDs7O9FyCicOW+BFZZKi4wBAjGppd7Jcj+7u7urVq1f605/+FIWsPHMKVLnYL9vgXKVjcdlN2TJssjsjODsObvr9vs7Pz3VychJzCJt5fX2tg4ODcBaQGVjN1F4xHg6TcD07m80ixY2IlqQM+cD4GTs2GIef95J6yOsPDw/hqLDe0+lU19fXUbDnrQOdUMHmYyM84sH7ILNIm/QiVQoB3WGEFGAvEKlwnIdMAmLfv38fBxs46bVabYrfvYUoxALPk5JqT9nn9PooQPWwpQuae6SpEnWB4CcLSSETk8qmRnh5MGdgCMfDXCGctDuACi8UClFJTQjWvVMUpy+WV1NiNKVNb0nCOw64+Tx/Z0688T+GAOFFmaH4KRBhHujF5r0oV6tVhM7I6XKPitNbvABC2pw+MZvN9O7dO5XLZR0dHYXAO0PM58kTrNfrur6+zoQWXSG6wPnml/LbNH3Jy4U/NeauLJ2d88+hPAn7XV5eZnqfpvKNR0vBECHs6XQa/WcxfqvVKlqqoFwmk4larVbcS8rOKcrg9vY2Cko4xYmN7+xoynZ5LjcKyB2yxWIRY2NMPJOD5tVq096M+WLfEGYFnACap9OpBoNBxsFkzMyPe+TuNDBG5g8jBEhl36OQveAqZfVTRs0vd5i/9JU6+KkDlV7+fLDM3377bTR9R/5w9j3cTL9eZAU9g1MyHo/V6XQy53ezd87OzmJNpU0nCC9G9QJT2Jf5fJ6xBbe3tyoWi2q1WhGex4i7c+KFGDhzHlWr1WrxDNgWgKm3nMLwUj2OvGB3aEeFw3ZzcxPkSr/f197enp49e5Zhz3AiXT94+BNZJqyapgRBsLRaLR0fH+v9+/eZNX1KNj+Xifq3uNCj6f7z//O7k1dpJIr0DnSJ5zyTtoRTge4kLYS1Z55xgF2foGf5/l6vl0kl8Ojl6empisVihM2lTY0An2FvLBaLKBBkPdFVi8Ui0hIBsbPZLA4cQT8zRvafkyhus3gftob5lxQY5ebmRldXVzEWiL9isRj6m73OM/i9nf1Ef/gFqcie9EMwUvzgxCUXcv+x67NC/Dx8HtuUXm7gMcAs0nK5LjSp1WohdJ5/AZJHkZKgTj4lv1MIwOIh3BhLZ7Y8BOrhPsbqRR7erN5ZU8+dcrZQ2oSwWFRpw1r53/g/xpSCFl7n1CkWez6f6+rqKoQDEOtKkYIYNovn4jBeGGLm2NMUMCAnJyf6y7/8S71//17n5+ehKPK8dX+elF3dBiXJ5TKaF3ryjS8poxw8/P/111/r7u5Ol5eX0cLH8zfdYLIuOCIoslarFe/BaOLQYGTJc3Pm0E9PQ2kXCgU1m80w9IRtUDCEmACyKTh1L973pY8dBS9t2g0RZoPlT50/WgsBiChOwZmTNsVMMB79fl/dbjdCtnmOsP/fL8A6jhUHTvzTP/1TzJ8rVGclHPj779sivx8D0M6kudyxfoAs0p+c8fd8YvQv4NH1HVXsAC/vHoJOJn/dW/Iwx7u7u+GswEB6WE9ag0mcCezEYDCIpvU46hhzScHCAhyJclArwH5zu4V8w1yiL/m/60cK9pD5y8vLD1jr/f39iF5QL+Dzz3dKCkaZ1wEO9Czm4plOT091eXmZISVSB9r3hQO/bbjyHECXWXc4uZwV9P3KM2H7PNqU2jZYwOFwqNFopFarFSknfAef5TheZAwdDIHmRACsOfYfgIfeQ1970alHAGDTi8Vi9LaVFPJ4dnYW8sCcMBepw+z7gNexPamM+Twi16T8kcqDA4YdgUjzufc1a7Vaurm5iShwCpALhXVe7+HhoQ4ODtTr9TLjd7yAfHxOxPWzTpJislzJp8Y/BX7+5cXiujjo7u5O33//fSgkwnp4RX4/FCfeNGEZGEruT44UDfX9FCfP4eM7Gbt7NjwboAEhccYWYUOwWRSU5mq1OQqMCj4UUqm0bjDdbDa1v7+vu7u7yA3xnpVHR0eRT7VarTSZTPTzzz+r0WhErh2GaTgcRugOwUuLWqhq5R8CS8jOhbDdbuv+/l7ValWTySTm0D1h5CEFp24otyHUJGXTEfLGnwIA3zA8287O+ozo29tbXVxcBKDCQBJO53J5r1ar+vnnnyUp5NVzjqg0PTw8DPn33o7cH+eLdSDfbzqdBjNZKGxCtMiCy72Hj3DKPDrge88dGGQXBZY6XOQsck/2AS19PHRPOJ8WKeSBkc/rLYlwtADLeOWMkapVP2aW50QHjEajjEMtZYtLUjYnNZxf8nJ5dSCaMlHSh8UmKcviFfVu5NEThB6Za58LujGQRw+IWCwWIbeDwSD6kMKOD4fDTBEhBpg2YKRoeacSrwzGoDImUgWktewRNUPnEqnCsXfmzveztA677+zsZPoPY6i5n4N6eiE7AEV/ujM5mUwiZcDXx8Ek+240GmUO9+A+t7e30e/bHV/GkurdVDd/6Ys5lDZ6lyuVZydMWF+iiv6MyK4TQNzHo5TF4rrg+eHhQYPBIN5D2h/3RH97azzehyMFgeCnWTFGHHF+ska8DxuMrLBmpVIpUp+w9UQEXrx4EVX50saR4Xdkx+1LaneQSeaJceCoEi2AWSYVAPnxQwGcyGLOy+VyFFf5d6MrDg4OovUgubjuZKby4MTLx67PajPloNQ3hbNQKZWbKs1KpRL5ThgXvHYP6TPo8XgcHi+n2HA0J+/BKPX7fbVarWBmmRwUloNZB14oAhQFgoln5WCL3BTmBOaT1iowUp4jAoPFZiCN4PHxMdqqOBuFwsc4EL66ubnR/f29jo+PI4QgKQoXAAfuwbMuftoJa+FdBVDG4/FY3333nd6+faurq6uMofM1RvEzD+l7tkVZpkbCx+//eAZnxvkcTAb5T1zOSOOgII84OuToIWeuuD3ETfsxZBPGyfPfUCCVSkW9Xi/GJa33wXQ61WQyiXArysrTUXh2QK9HNxi7e9duIN2IA0xpn4NnX6lU4nPI9uHhYShn9gAgldwzD6v6fpQ2pxC5wmcPEd7jOcvlsl6+fKmvv/5av/71r8NI5UUCeN1/3xYWSno6bJvqWX/NGX13thyUpsY0/Q5vZTcYDOLEP/YKbW3m83nmOEqA3sXFhebzeUSxkDXkZDgcBhDwxvqAV8gH2HoYUi7P68fJI+8ag87vHkVD9tmnvtcBz8iXh3557ul0Gvnn7BMAUa/XU6PRUL1ez+gCD+GjY7FXyLSzzsvlMpgnwC72hitlm54iiL7U5Y5/Cibddjhu8BQ4Lic6sK1ug1yP+/NTsEZaUSrr6BmA4Hy+6QzhIJne6sigpEx/aIq5OaEJwMpzICdcpAwAhGezma6urjSZTHRycqKjoyMtFgtdXFxoOBxmUmSoDeFZPDpKGgs2gkNUAJjIOgQHJyLSCQk8UCwWM8dxM/9p6kDK3vsYRqORer2epM1piB6+d0zIvLpD/dT12W2mUiY19ZR887gA+QJxJBbKD4DFgzrDyeIvl8vwNj25HMBHs/Ozs7M4KxaWqVarZSbTQ/7FYjZPle9BwXrLncViEd4PC8B9/PSTNBzkcwboJUmZ1hYeVqVX3+PjY1TDAU7IyaIhuS8ySf28xnwPh8Ng7xBcNhCVvAAfcp/4Xn/+PGPG/1Mgvw2Kkisdd57B53K2gvei3Cgw8ZNqmEfPzcHJkTbhPRwlr5YmMgB7QEsSHDNnM3FSCoVCtE2Zz+fBcsNOUqXp6SjOHALy3NlEuXmenLNhKLfJZBJ7I41KINvL5TK8a0AN91sulzFeQMOzZ88kbRq3M1/MOSFfQp/cn1xGQAXKuFgsZnoAshZpJMCZRw+n5cnHl7rQHegrZ7ilTYTKnULeT55n3nv5v/ShbocJl9bgFHbcHQYMd7FYVKPRCBDQ7/ejU4UTCORl0n5KUuhkdCHvRW5JIaCNE5EBBzDeU5J6gbQK20GpR+wkBagtlUoBOryIyoEUqVQPDw/xvThVjUYjmF56+8LoSpt956wqaQ0eGkXed3Z29Pr1a/3yl7/UaDSKHOKUsfI0D9ZyG/Ruqmd5Npe1FJDgYKL/3JkGlHnqmrTRO743KAziH/Ph6Wp8FykBtVotcqQ5VAcgyvg9mgq54M8D2w+eGY/HIWvupGCXOZHP7QF/73Q6Oj09jfHwHNPpNNJKeHYHd66XaW22WCxCF0rrHtWFQiFaBF5fX+vo6EjdbjciEmlUhjn0KAct09DBrqNw4o6Pj/XTTz9l1tnl4P+GEPjkSVJpmIEvS9kzF0guPkvFLgU5k8lEb9680cuXLzO94dLEdiagWCxGn1AMJ+979+5dCO1oNNJqtU5qH41GevnyZSYlwHNRPPeVqj0P7Xj/UVif9PLWOigqNhVMFqGbQqGgm5ubUPC0L0JpIpT0dGTcPBdH6fF5BIcTXHi/A3AqwDHYMGYwtLxPUrQCYiM9daXelG9W//1LX6ksfozhZd2dNQGgMmdUPMN0+r0JnTjgc6eIvnUOGEns93Pq2+22fv755zD05JLiHWPY+v1+GFEqQFEi3uUBFt/DTs4sOcND/zxkmb2CgYT5cqaJf4SRafVGqgKOGHuDfQYoODw8DKAMkAAsw9ItFuuDLMjjhanFYHhYEGPh504z38iAlH86z7ZdKRnggDPVyc5OORPEnPJ/d/D9s1yLxSLSMyjAc2MIYPJ2ee/fv4/vxVkCbHrBHjKYspvOWPLZfr8fxS+cQkbuIE7f5eVl6LDUcCPXyKUX2HkRLmMkXxbZQe4I+ZZKpUipwhGS1hGWo6Oj2N/MWQq4HJC67vb/o2/IufSQa55RdzlIU42+1OWERcqYpc6UtGlnBJiTNmArfa87NDjFECkeKfKjkbHFyAatnlarlY6OjqImhLxjcIi0KUqlHsZllfA467tYrNuQka6HLd7d3Y0QO+NGFnDUxuOxbm5uMr1V0fXsR0khT67XkN9CoRA95ovFoq6urrRarfTq1asolhqNRhoOh1osFpnj0ovFYrSc86hZXkQGp9WLLyEWyEut1WoROXPdxVz5Hv0cvftRgJoKvyvMVLmnX+aCimGrVCrqdDqx+QeDQYBHBFaSrq6ugoXhs/V6PdNLjDDPZDKJCdnf39fh4aGur6/DM2o0GpmmtyidxWKRaflRLpc1mUyiAa60UZrehw+B9HCTh+lhejlEAE9IUrCcVF7zHvLAWGDuyXsI/SJYnpztrCwKj89yLi7VoW6U8R5ZNw87UXDiOWl5If9UmN2QbtuVKncpC7LTzYTy87/zGcKa7u0DyLyoBOXiLMhqtQrg9vz583Aq6JGIM0c0wB0j9gKFVcvlUp1OR3d3d+r3+wEEUD7k73njdAAsytJzTtmLsFK0b3MgCugh5cXDk14ZjtzjoHn+HgADtlWSer2ejo+PQ2+wpzhRCF3kjdAZP2sDg0hKAvOeB+bc4Kf//9KXO37OlrFmqcHnQm4x+twHefUiIp4V/Q7TvbOzo3a7HfvAHR/kArLh5uZGy+VSh4eHca/z83NJylRPO3sGCy5tQJqHusmDdwYch9qPZiSPlT2I7iOCwT090oU88775fB6nA3777bdBAlSr1ehuwHs53teN9c7Ojo6Pj1WpVCINgD0gbRxfnFf2rDuGzA+6ttFo6PDwMJjWp+TDr20Ap1J+KNj/78DUSSaAmbQB414kxpq5buD7YE75P+/ztnt8N/Jyfn6u+/v7iOiyz4hCsUekzf5wjFKpVEJXU8gKccFzpfn20joCQZ4mhaLoUqJfPK8zwchyGhLf3d1Vp9PR8+fP43RIHP9SqRTyVq/X9cMPP0ThV9pvvVAoRFoEedSOb5hD8m6pe3HMwH2olWGPY1Nd7zqe+JTsfjLE70LD707VpoxUHnBBwfFQ33zzTTQ+lpRhQ5bLpZrNpiaTSTykJ/f6JqAlSbvdzoRZl8ulut2uVqtNNZ60URSATqhzP+ubZ0bA6QdIiAqvBcFpt9uR0+JFVaPRKD5DcZNT4gj/arWKgg6fK99gy+UyQgeEQ2lm7KddeRK90+6ch06z+NVqFX3aOFuXjX58fKwffvgh1t9BqguZbxT+nycPX/pKAWZeVMAVqm+mtGUOc+G9DrkvzCDADmXiuUDIxmAwUKlUUqPR0PX1dXzXarUKA4fsYQjJSUZOPLxTrVY1Go3ieFA3gDwDBtqLZTACAA5PasfQ++k9XrwFYJnP52q1WjFuwLADDvYmuVQHBwfqdDoZwzEajfT999/rxYsXmWpZ2hyhTFGeOG+E6xxwYzx8LE+BVJfjbWH/01BYHtvgz+M/b29v4zQeKVtwlrJ1j4+Pwd73er1MVTROhbNMFBbBlp6fn4djgnzt7Oyo1+vp5uYmo+dYB3ro0oeRdlPocwclrithzpE5KrG5v//k/ezd0WiUYeWQ5YeHB3U6Hf3qV7+KfsLMGZ1VAAqsyWg0CsdzZ2dH79+/V6fT0Wg00nw+1/PnzyVtwHgKUEiBYJ35m4eEu91udHVxfZoCPfaIOwFf8kImnwIdriu5ptNprLuvGfrWC5I999Ll2QkF72NK9ApbjazBXkJETadT9Xq9CIMjI+7g8WzIA1ErCAWiZg4iSQ2kyPD6+jpk7ObmJlMXgo1Cx6Z7frFYBH7iWF/0rtskQvF8/vHxUY1GQ8fHx/rjH/8YdQ1EbOnPTQ6qOwBcTpJ4+oOUtadEwZvNZnwmfQ53Ulxmnro+yaCmNLv/e+ozKFcmvVQqhUKB1udMch6SiSHHjIpPCk7Y7Dwc+aeLxUI3NzdqtVrqdru6uLgIhpGCI5Qd7CmKAkNIUQoTCBPG6VYsAs+BooVtdO8JQOynhiAUXLzXQxMIuxsaryglxIZBh0VDibpCZB0QNEJPFxcXUdwwHA7j5AeE/quvvtLbt28zRj11NlxJ5oWetgmg+lgYf5r4LX0YTuVzHDnrlzNZfJZ/3ugcVoWCIAp6OJBBWiudP/zhDyqXy2q327q+vo719NAovfIkxWkekiJUORgMMuer84zsI+8v6MaW93BfT/sA6AEY+IwXEc7ncx0eHurVq1fa29uLdASO8cXrhv0CFFDhfXx8nOmjeXt7q36/r3a7nQn9klfOHmWuAV+AHcbeaDQiipDHmvLTnW0+uw2XEwKpwfS1cNbfmTgc5UKhEGE5T6Hivn44AuspbY4h9SM/h8NhONyNRiMML44++ub6+jrey35L2SVSPEqldfEchVGSYt0cgDholfSBHGOUAdfSJrxLI3Tu42zcs2fP9Od//ucxBxAEBwcHIbvIvOuOt2/farFYnxA0nU6jc8RsNtObN2/09ddfB3AkQsc8np+f6/j4OMaYAgIYOnfIeFbXTS4TfG5bLrcPec6V2/FyuRxt8yCM2u12Bri5zvDPu0PieffIM6F71hViiigDBBaEQbfbjSJk9gMykx6DzphIAfBndPA1n89DP7NHvB7EiQHAr4NcwPfe3l6kflE4lTovHi2AfOA7aGEG2UbNS7fbVaVS0dXVlVqtlobDYQBMdA3yh2yuVus0BIgPdC5jlxTAl73rDPn/DV74JED1m7jizEPAKRuVeu1HR0cRhqvX65mKYTYjwg1bRNidogru696JtDZu3W43hJ68IISSyjLyPG9ubiQpgKQn5ZMesFwuIwTLxCJk+/v7mWbrFATwXZKCumdMGAn+xhgx9K1WK57X5xUQSwWz5y8CgFHSvk6A1lKppFqtpvF4rPPzc7VaLbXb7WBOATW7u7uhbN1w+/3y2Mc8mdmmK90cDuLzQCyGxQtHVqtVhBhdQXpuDc4DAG25XEYKCvegDQohGNhvQt8oNc5ZxtCWSqWQd/YG4JdiP18bmHh+T9cFgy5tDqYAzJI3hZGWNn0zvaflwcGBXrx4EWc37+7u6uDgIBwp7yQAq+by2mq1dHR0FM/kuawwFADY8Xis6+trNRoNNRqN0Bnp0XuvXr3SixcvVK/XdXNzk6sA03VP9daXvlIHMGXHPKqROorValXHx8dBCHhqEnrTi3O8ShoZgw0EdGGIJQWL4+kv0saxr9fr6nQ6mWIS2FVkG1ZWUoAICAHYH08tSxlCdzTI26zVasGQkcdKYSsh12JxnULFGE9PTzM60wFhqVSK/U1FMn8bjUa6ubnJpHzV6/X4/G9+8xsdHx+r0WjEfWFkaf3nTdyZFwfEVET786cOtAP0bSEGfM28iMnlVNrYFD8eFBnkIBAnuorFoiaTSWaN+D72LeF2cpk9n5U1HAwGMd/I+F/91V+FHqWvrbf2A4cAyKSNs0FKAIfs8ByMme+RNsXVFNehCwHK6FUIM1L6kFlqSbxFGXuXOYfdJSJCSuXe3p5evnwZeaiLxfo0uffv36vb7Wpvb0/v3r0LZ0Ha4D4ndQD6MNueK828koaYEloeneT6FPP/yaNOn7pJHgrO2yy++chhkDbH3blxYYIRaCYgj3am3QLKCcWKd393dxdA0ROlyXmjMtsLOYrFzRGSntfJc/lpLBQTrFaryBfCsyfvzgWNpvoYX4AoBVUvXrwIAADTzHO7kqf/JQofhgTW16vrYFKYX4AHxWYwa55gXq1WAxSk4YaUecpjT7ch1OSXe9R+pWGUFHTzLFSws75U0sOip3m6zAu/4yCxthh7QB0K9PLyUrVaTcPhMMJU7sGj8MmVJlRFCzZkm1wkKo/xpqXN/kRxkidVLBYj7y7NS16t1v14kTvGQqsnkuORVdqo4DBRxEf4mLUgH6vX68VJZzByvhZ0ESgWizo8PFS5XNYPP/ygr776Sq1WKzx4gICkTD9Kv5cb+dR5/lhU6N/6cv3H/1MWzZ+BcQOmMJL+OeYXmQWw8twAskJh064GZx4WH51aLBbV6/W0Wq3CCSI/+uuvv44ev4BWaQOqZ7NZAAj2F7qLCztQrVYz6SKuW9DLRMvq9XoUmvT7/ehEwP4izMkBAY1GI9pZeaoAQAiHDccJxp7jXQn1o/8pvqLAbDAY6OjoKNaLuafIDHDC3q3Vapl9zufcAUYWXE7QNx42/1JXXrTC9WsKTlln9mFKeqF/wAiknrhMcD9s493dndrttsbjcSYPmt63OOA4RCcnJ5HG5IDLxydt0vS8FVulUlG/34/9MplM4jhc1i3FLTwL9pye1r72REZpnQlm8j7UzK3jLVIiGAe6gCN8OQKZYnJSJGnp2Wq1NB6Po3gVHOSRC6LaHHHMeFhTz6NlHl0uXJZ53o9dn9UHNWVnuByQoATzPru/vx8eCQwT1DXKks1ILgkPAZXtdLO06YXH6xxFBnBE0BBab2ODQDM5sFje1Je/0w7I8/Pcc6FiGUWDd+GtRrxiD8+FvBuEgD56Ts1Lm/wt5gilSBGOF8UAOBAucmdgWvGk3r17p7Ozs1CU3ni60Wjo7OxMv/vd72INfS25fN39PZ8SuH/LK1WQ/hpGwA2+MxKwzzgqgE2KKpgL1tmbPHN/nCc3rjDm9DRls3NAAiel0SCd8QFa8aBRgNPpNMYBYC2XywF0kVcPgeP0sFYYbNgs5KdUWhd+jcfjOI6S+xweHgZA3dvbC88d9pm0BgDRwcFBOJKSAkiQ0sCxvrBcGHCqownNIc/D4TBSAfhOALwn77t+cTlIwV4q31/68v3lBtkZEx+7s5VefMOcOCj1e6cpH6RYXV5eRoEcjgCFJTCUR0dHGUN+enqqg4ODKAgqFAqRygVYRMcwxm63G91NcG6kDYGB7uM7kGU/fpfnG41GGo/HmZAqRSOc807ECMOO3cF2kb9I1xhJ0dqPHOp2u603b97o7u5O796909dff62joyPNZrNw/sfjsX788Uc9f/48imZZm16vF3at1WoFi4gu7nQ6+sUvfqE//OEPAfTd0ZL0ge7aBtnNc/DygKnLL3vUu9NgT90hQUZhOpH529vbIIzQQxROesoS+8hzsmH66vW6BoNByAe23p/HWXxexylCDz8+Pur6+lqj0ShIMZc1cATRWSIHsJFEMFutVuhVB6BOhOBQemcBSVGgyglvdPFhvoja0TsbHDUej/Xy5UtVKhVdXFzo66+/zrDB3LtSqWgwGOjx8TGiX+he1hOnzyMlPn5/jk+RAp91khSbwY23ez2+eC6ULnAAs0ajkWFVmUQXZADnYrFu33BycpJB8pICiGKMPRHdm4BT6Q97ybW3t6fBYBDNa53OJ3RYKBQiDw9m0wWbXDc2B0qVDQLDCUtART2eHZ4v+RqADObKgTCKGKEmpPDw8KButxtAwAtG2KzetBiQ6gVArCcFNx4CTL0krjyAlzKqX/JyIMLlQIXLN78zjJ4Ij1KEZU5lEdkB0K1W68I30koYB6F7wCyJ8nRNAATv7e3F95CG0ev1oqCPNQawjkajUFAoLcbNuvkhEx6+SkNGKPvHx3XDajpRwEIhN0dHR6rVahlGDiOMsnX2YT6fB0vEe3DucK7m87na7XbcD73CXPCZZ8+eRecLxg0QA+hi9D1U7M6Uh89TNv1LXxhR6cNTpVJw6iwaDjKgE53owMANiOt0dC6vkxOKAQYIUJAHs8icNRqNWDs+x1jQ9zgqrDd6l7WiklnaRL28Up+rWNyc+02hy2Aw0Hg8Dl23v7+vVqulRqOhWq2WOXGPf8w1PwHQy+Uy2lrx/DhbxWIxClZhq96+fRt7FnnGFvT7fTWbzegQQEQN5p8xub3c39/PVPGnKWapHKfE0Je6UnY3/en2wlk/cixJb+A0LVKXUqfSU+VI20NH48gSPULmmV/Wvlwuq9vtRoEf95UUjo0XZXFPQKazvMgLOfhghUKhEK/x/ejjlDFtt9vB6lOL4myupzR4wR+gG9KDMdGhyEkabAPvw/npdDoh241GQ7u7u/rtb3+rWq0WEUD2+87OjrrdrobDofr9fnReYW5J14KVdpnIc6j+1QA1T8icffDX8wbk4LXZbEY4PWV0uCcKjQIhFozvxdDDjNKb0tmm+Xx9RjmLsFptcjzwnlgslC7C7p6shwClTUNpGASEQFJ4Sx72vb6+DuAxHA4zOYUoZpgH5gqPGjaBZ+cZnR2hfZQ7DR6mQIlSCY6ChNVAkZO7JSmqWbkXG8nX+Skgui2evJTNO5Wyssvf0/F66ASjyvGDKCDkx5WNMwGSggXFKLvxoVIU54W19rHRX46UC3LceB6+l5Zk0pqdx2lBqTAWr6j0feb5Qx5ZoOLUGdF2u63Dw8MA0OTSOUPncgF45ihMZAo23wEojNWLFy8yPUwxFOxHSTFfy+X6ZBTYDnJ4AQX7+/sBElhnXzs3klzbIruMMzXyzG26H3nN9ZenDzHXGDMAD3PjLKoXcwISvXLdU5/ccJIfR7jSw7NEhRgj30dOH2txcHAQEQAcGV876cMCFHem0MGAZQphPYLgYIG583Z9zuYhw7zfiyBZE4gOgDL2ydk9jr5GBxMleHh40PX1dUQi0CMwYOxhdLDPA8/ie2kbrlQmJWV0I6+jO+g+wpojX0QCfZ/6/3G03OGaTqfqdDp6eHjQ1dVVsOEQCqxloVCIOpPz83OtVqtoAYmMsn+QVY8s8ZzuSBLlqNVqOjs702AwiM8ho8i0y1O73Q6CiWLPlGFmXpB98BJ72xlOHEP/To8oI4M7OztRB0RNDc4aNUIPD+ujoyHhIOdItZCyBCV7jPZbntLFvKcEwaeujwJUBMsNMV/oi+PCmQogkwFr0mg0wqtMw1H+D2XhLA0XAFVahydhTSVFPy8EBo8HL50iFe5D6JR8JIwpY8JgIlR4XIvFIk6f8FAqIQKA7GKxPrVpNBqFB0YuE6wridmwV+Q5wQZkFuz/NQKcGIEi3dvb0/HxsQqFdUI3PS1httJ5LJfL0W+QNYM55r0o6pQh5/0pWN0mBjVvLClLlufNuWcMuwmY9zByWsgG6KGqWdr0vcUDn06n6vf7khSpGe12O9jD2WwWOcAOJjDevAfghrLb29uLoo1yuRzAF3A6Go0+mAdXRsjqbDaLfo/k2h4cHKjRaKjZbMbxos5I8A+HslTa9JGkz6+H2vgdJYsCPjo6ilAU+x8FjPGGrUBJunyjp2jqf3p6GmssbRQi3+mRiW0BplzurLjezZNpdBPPAAGAQ8Xe572AeNdpRGVub28jZ1hSFI+SLwyD7cVvrD96aTqdBvMobQw3+gWGlOdD1zprS44grfq89RBFUJ7zSVrA/v5+yKoXouAEuaPIPHuaAKAFhxGjLClSXQqFQswPYVi+D6afOd3Z2cm03PEIVqFQiOdE3n0/kX94cXGRkdE0NWXb5DePRU1TbBxHYAdhQSlmchvsz+nRAO5FCzR0E+AMHe3hZ09t4oSz1WqdapKX8w0ABKfQV5rL92naaQgdXigUQsezZ8gNdeYdDJFGT5wI4H3ShhH1NABqDsBbdEaoVCrRL94dRA4YqNfrUZkPDgAsO0ZgbJ7L6kXhy+UywDn38s+5DU4drrzrowA1ZUXy/uZfkgJXhAG2kHAiRhnmhQlzZgllQJgyBRKey0bIx9v3uHdVr9dDKTw8PGRYAknhfaPwULjSmvXtdruR4+qeAgwFwkL7JgenFEWh2MvldVsNWFsq6FD4MGenp6eR1I0xdmaZAizCyAcHB7HZYKlWq3U7CKpCAVvQ+Mw77BkAu9vtqt1uazgcZhiWdA18Y7g8bMPl65+y++7RufPlf2POvQjPK5BdWXoolTWWFJ4s94UV5z27u7vBWDI+qihdXnAqKpVKpH6484Uycg+ddA1nMVIWwb1hWglRwb2zsxOFJTC5XujE96MgU/Y5Zaf57p2dnXAgS6WS+v1+7AlvIM24YQP9bGev3Cc0jUyThvDrX/86Mya/0rzLbXGquFyJp+kJaYSKn/zdq4mlTV9oZ8ulDUvLnHpOPAwfub7oPk8dYT9QiEHo+/HxMU6mSb8fOef7Xbeg03COYTXp/LBabUKwkiJ/bj6fhzHlFBuADnrc97q0CZn78yILpPLgCPqe9a4ZODkADsAR94GMoSjX5RW7WCqVdHJyEmuKoSc1gVC3kzN59jiViy95PZUjze+ub7GP5J6TYsE6U1SGrgAA4QijGyqVSji3nt8JweP9VV2OJpNJ2EP0jkcFcI4lBbnkOsPtBuNBnpxNxBGRNk4Wn3HG1Pc6e0va5JeiB1OnFHmi1gfyA6eSv8Om4iSRK0402ltsORFAOg1jZi8dHBzo17/+tV69ehUOIe0qf/zxR/3P//k/417s8dTR+lcB1DS0xO+pknSg6oOQNqFDeoJ67pGkOM7TcyzYlN7/0UHAcrnUcDgMg8YZuAguihDGZ7FYRKI/i0S+i7OECAuL4ADQQ+h42BhplCleEvMAw8q9odUJqVcqlchNQukdHR3FQrdarWg3QgiEcTvArNVqYZQoTnBA8vr16wAFx8fHGU/Rlau09oIODw8zIDfdjB9TkNtk7FmzvPG5J50yoavVuh0UHiLGjyIiz0PKC/VzHw9N4ygQciL0ieJCPmhNAzj2vUTeFEwYYRZYnH6/nzG2yCNy4esOQ3x7e6vpdBphSliCRqOhVqsVhQOMD+CPc5IyOrC2ruTu7u7CIfMDNcixpT0cFdkobtiylOnytZIUDCufJR0CNjt1tNMx+5xty+VMkfRhsWL6LNJ679KneTKZBDgitxOGB0ObRsUwIoAEZGQ8HkfVM9EDnHwYe9qiAVjTPFrSDyAC6IXK/oA8IJLFM7ksIXPePg3DiG71giP/xzgYg7Oyrr/pMblarTQYDLRYLCK1h1oI9j6hUUkRkndjzO+pw+GsUsq8AVxJH3Pixtd/m6JVXCn4SB3VFIhJa3wwnU4z3TvQr3SzYb6cBXXHhp7gw+EwqvUBq9hx5NbljTUjFcTHji6RNuDU6w48ygGJ4NEBcIMXIvva8RxOkri9cuDqF3bLuwkwHsA4Ywf005ISh4C9zf7zuhPPd/Z0IJ7Lbdz+/r6Oj4/DXlBsXSqVYt/mrX+qcz52fVYOKpd7oZ6LweUb0elpHgwFlzcBTDjhcirNCHk7O4tBlTa9zzDmfIbTOGgE7UAMo4mCYTywsa7QYA8kRW9BBMGFnbwhiqEIH+AJIQj1ej3YCcI8KCsUP4nGbDqe28eFQN/c3Gg4HMbzwqjBKqBAYZIRXDd0jA1D8tVXX2WEiGd0AUuZ85Sl/NJXOn4uB5P+d/+Jw5C2v6EtGYoNppwrL6rgecuwk7COGDHWlyPiPDTFfZA1QkywpJ4YX6lU4mAJmHYUJ1WdDvDYS7BhxeKml6mDU29vIimUJzLLnvYCLWkT3sU5ZY64OEXm8vIyogqed+5MhrNg7jAVCoVMfpZHbwit+vr6e5AT/7kNV+pQpWNzWXUjgIPurcuIHtHOxnP7MKzoEpc3DDpgd7VaRRSM//M+l22KezBkyKoz/oAGd/pJSyAKRFQM4+npYIDM9H0wuchJGtFx5on3oA/Zk/TDZt97PiTEAY4qqSw4sxAi3Beg5N8vZQEcYBsHjshbs9kMEOZXnkPhMvElLw/D+5UCEScJYDp9X5Mz6lXqyBPOxWq1OQace7pM4wRh76UNMwmAc2ecezJWB2vSJhXFG/YzHi9MlhQgEd2URpi5eD/fBfZwvedOFkwy40f/8pqkiHB4a0NsACQV+poixuVyGcQZ8oWD4ON2khCC5fj4OLPezJ8XYfMZ/7s7nR+7PglQU4POl+WFmvw9rmCdWfXQU1pFz+swJbAxvlDz+Vxv377VcDiMBF73xKG3ya8kNFmpVOIcXA+7UETEBXD1HCq8fT7P3wG9VC8vl5t+bF4gg0KmxQOVrIQeYLo43eng4CCTQ+MCy/ySN+ZV4ggb4BSBYrOnCf4IB+188PqKxWL06XQAxhj8s/yeAr0vfaXjSJ+buUzZCH5HUeJ8oGzY+OPxOEJTXjQiKaOEUFQcpweDyhh9f6EQiAzgrDC/vBdlRKsalwtSE8gpJUKAMiV/DvCYhp686XmamM+cuPFH0SP/yKUXL6EMV6tVpqqegojXr19rf39f7969i16n7969C5C6Wq2LBw8PD8NoF4vFYANQ3PxtMpmoUFinS3A2fApgfa3z5OVLX+nYUlnNiwwsFuvuH85O0u6F3FLPM3MdjiONrqBQhJAhxtznz+WcHtQ4xJ576vsNPeV5rO7sI0sYRF6nCwsONe+FCYKh9fnzvcUeYB87iMWQ83/SWNChNEsnZ9BDxkQyLi8v1W63o3iQub27u4uoCK95FGW5XPeCHQ6HYatGo1HUVaRpSq6nPDKwDQ7WU/bfWeAUN1DQjBx4jQT6yu+JHUvBILLAPgCk4lTgbHFv7gdJ5MAQUMe90FnoXtclXteRAlFPV3LH3AGu2yHGlKZ1uM11++9kE44g78OZhCRjjpgvjxbT9tILColKs3ZesOhkX7FY1M3NjSaTiV6/fp3RD9zLW3T6vvwUOJU+I8TvxQR5bJRvEmdV3XvFKAKouM/e3l40sAcMcR9H7O6dINAAMWlzXCMArNlsRoW8C417txRXpSeeuDLg+2mH47mBbCjCXzBuJO/DSgAWYTLccHMfqjnZCDTOdUaKOeRiExWLRX333XeRs8sFmOAZOp1OgBbPx3GGY7VaV5yORqMACt7SgrVIFea2KEi/2Px5GyEPpLhy9TA9BhDghdE8ODjQ9fV1hBR5fo8OSJvm31SZ4uFKCoCF/N3c3KhSqUTl+8nJScaL93xtV4AwoTRkxquvVqvq9XrBTsFyYZA9YV9S5MUS6XCQAMskZfOveF7+UQVNThQGAOPj+cyz2UwvXrzQdDrV0dFRnB4znU5jfr25NEbbc2mXy2WwTxiQdrut09NT/a//9b8ya+xymhfS3zYZRi64XLnzfwc+gBlJmXC+996FSWFOAJJEVyRFn2pkn7QMB6joaM8t9pSMlG33feBy406I7zvWykOvPD861PcAoDNPB/r+cbCE0XbHy8fNHLN/6JrhaSjUDHQ6nXjN/05LLmyctHHamAMiBrDFrVZL5+fnmRQff7aUIPocQ/9vdbkelfLH5joXwgb5QIegN5BpdJqkaFdJxAV5I4yOoyZlQaCzjLxG+yTGClHAfLPeOPrIPXbR9RkyCKnjjiRYCbnwlMY0ugRwfMrJSgkCACTOkLRpVcZ9Pe+UPUfx+Hy+PmELwsP1ubO7Ds699uD58+dhZ9kvpE7c3NyErPNsKSj/2PVRgOqgNPWAMIT8/tTnfeKh51GasI1porArLBhOWspMp1NdXFyEQuT7AYV4u3t7exoOh5kWVRhbBBTDx/no3uLEaXNX0AggzJqHggiVwgwAujlalDGz8ZgHnhUv3RkiX1iUH4aFTcf8UvFKThjtVvCkKNaaz+eR71etVsPbury81M8//xwdFlKWxsOqfrmMbIuRd8YpLwqQevK85h4vQA6gj8wiAx7CYK0fH9fH27bb7djksOmEWFGszC+yslgs9Pbt28hFdSUkbZgXBwAYWOQCJQhzgLLnNWmzL1HMKDkqSovFYhhHD8U4G8I/cp89J4yxIMcAXt7L78wrvV75XL/f15s3b9TpdMJRIlLhgD2tnPaCnVar9YEs8Du6IHW6t4VFZbwOTt2IoS/SsBl5+0RyiBQBjpwlfHx8jDQpnFc6OGB8Go2GBoNBJrqCfnO9g65Ab6LnkEUMNoWd/oyei+eyzngB1rBVrB3vSVMTkDmADfrcnWoYWkkhcy7nfI+DZ+4NGGY+sBEQCxSJFQqbqBmn+tACzsfqpAnfUy6XozbB9ZTrr1Rut0F2n9L9qSPl+tZlgveg4zza4uD/5OQk0wLP5Y7iNvKGSf3wKAAyiXPg/3fgKCnqBIhMAULd0XLQmqbQeWTYwSi6l/lIbRX39PliP/nloDVNi2DOfF/xOu8DD3FIDCw0z+mOnJOHrF86H+xFWlWl5GZKBn0KL3wUoLph5/+pV+Bfyhe6Z8EAUTSem9FoNKKdCPl3vtmq1ar6/b56vV5MzLt373R9fR2sKfdHUOnrSZsbSQH6AKtUBy4Wm+P0eB0W1MP8kiKPj2ci3IRQeHsSxoqyAwzy3Mwlng1V1H4KC0DIN7eHIXx9MBgoY97D87ARCTVRyffixYtgWgqFgk5OTvTzzz/r5uYmmMFUIeKY5Hn228akMmZn+6Sss+VA0Q0+4U0PI6Os8hLPHby1Wq1oGcX6DofDyGfb2dnRYDD4IC2g1WoFwECxMlZAgOecpUwVQABHbLVaRUN79iDhozQc6rlLKHEAJAoPuXa2C4bNiwNRxJ6OwPeUSqUIH7OPfI3K5XK0nPLQG3uUvdLtdsO4OxgGCHQ6nQi3OnBOQak7Jdsiu67APazHWD0y5M6jG1eKw9wwEq72inqP5OCQ0V4JtpW0AN7H/Zh33lsul9XpdMJocXnUyFNCkENn2dzIEdbHsUHuHKC7fPp8SBswT6oD8sVnPB/PGVpnqQHikAUAIcbh+5NTiXguiA32CGlnOGc8a8pYMVfufDkL5bLh+mtbLo9a5YHqVB/72rDPvW0de5Y1os8mthqHAp1N/ik2HT20XC7VbreDSGIdPTWAGgAfG+NyuYC9dyDqDC3O4FOOOzLpl+tkB4QOXl1OeR9jIkILKUCE2tMmeD5sD0yq64mU9HAA7fvVgTgtCulcQVoRuIS96Pjuc3TuRwGqg1EGlIeGuZweTtm3i4uLjLLh8pOlmCAEwjc2Te97vZ7u7u5Ur9czrJW08eTp4cgJTs6muGAQhnQvmZ8pO4byljYJ0yh1fpeUuTcgHINPRZ8Dk8fHx0xrK4ApbBtpC6mHjaKdTqe6vb1Vq9XK5NouFgv9/d//vTqdjr777ruYSwTUBRPQUCgUdHx8rKurq2j/w7P72qdhG5+7vJDOl7h8Y6XK2+XUvdSU0XFlx1zg5OBwrFarAEUYc9rz3Nzc6P7+XhcXF+r3+5pMJsGM8p2ADcIipHLg9fM3PHTPxdvd3Y2TStKWJNyD52Gf4CSRZsAclMvlMIjk2LkR5VnZ18wphjh1WN1LlzZ7k33gwFTasGt8X61W09XVlQaDQeiCdrut6XSqVquVWTfGBJDd2dnRixcvIuyKcU/D+j7mbWL/U6P+lPzmARMiL3QKcaCFXDN3pCzRRgmZQu52d3ej1R1tfDC4kALVajXSmgBVGFFkAPlxpopIT5qK40DEDST7wUGBp5zw08GIr6eDdC5nWfkMe8bDxSmYcOKA6JR3APAoAzqAqJS0bi3o6QfsOfYpc0FaAd05pGwhpsvvNundp8aSEl38o+gOHYuuQO866eVpGciT5+tKCluNXgRbNJtN7e/vR+QgHYu0qdfw+WUM4ATGwPM6gHRn0h1njzrl6SK/F+Niz7hz7UAY2XFHj7x+aRPx9fsDrKW1/APw2d+OTQqFgobDYaatn68bxBzjZX7L5XL0osaO+jy5/f2UY/XJIin3evxL+FsK7FI2zTf9bDbTzc1NsB8YFO4JYHp4WB9RenV1pb29PR0eHurx8VHff/+93r59GwIDoGNjS/rA2yWE7aECvgfPF4MIc0qlHqwESgFmAiXLax4eYJE5exwm0nOWXNnQdgtBBHx4PzcUlnttMKWwIRgmb6XV7Xb1l3/5l7GWzWZTq9VKz58/18XFRUYpLJfLKOJ6+fJlJkTH+jvT5NT9tijHj12p4+RgSvqwL1uhsMkl4r2kpADy3Yg6a3J+fq5isah+vx/ydXV1lTGqkmJtpWw4BjYTBYSyLhQK4RG7V05EAEDJWnkKDoY2bdsjbcJNyCEheffUHahiGDxMhVF3/eAOHMqT1IObm5sMe+TsH/u62WwG488577T78jVNdUiv19N8Pg+wkOep82x5DM+XvlzXpqHRFEinTMp0Oo1T9HBapE2aFXoGlop14cARUoA8cvDw8BBV5eyJvb29SM1wZ4PxEJ7n/ewT5t2jMC4fOM4exncWHRlMwaxHjfz+bld8LpzVKRY3ubHoWdIUJpNJpqApBd3kpzJuZwYx/MxVsbg+SKJYLMZBEqwJP/neFy9e6Ntvv9X/+B//I+TB7e22saZSfgvKp1gy1xGz2Uzn5+d6/vx5yA+YwetSuCd20eXghx9+0IsXL+KeyBDrViwWI0KFXEJCIQfYVOYV3e37UVIALmkDJHkm9CM4JrU3vI4+hqBIU1EWi/UJbq1WK2QfmfVomOsJbFCptC58AruwB3Hy3AmgSb9HtXiu1Wrdeu3du3dxMhs1K+4YVSqVqH+AADk4OMiAcO7r0YrPIQQ+yaCmStHRb/rlvjkZPALAA3P+LRdGnbwFlMD+/r7Ozs50eXkZ3lG9Xg/GiIdloTzMzaJgkAGGaY4Lz4JnkuZOeSgA2h5BjgksZ9tgwIQ1Go04UQWqG/CBMFIBikBSzY+y8wo8N6SFQiE8Hubs5uYm01C+Wq3q6OgoIxAo5t3dXZ2dnen29jYTRkVoK5WKXr16pV//+tcaj8e5no4bTk/n2CYWCnkEgKfjc1l1eZAUsuPHhi6X67xllAGKhrUnh5M2ZsztcrnM5Pa58cYz9tAJ8unOCJ4xcpoXpneDj/wgux6ZkLJV7ShU965dIfOedG8XCuvcb/Ls+LunGnjuF5eHfMvlcrD13iGA+cMb5/OcfMW6ekoBY6xUKvqHf/iHDwAo85TnVH+ON/9vdeXp2/R1/s/lrOPt7a1Go1GmP6ykCNHzWWQRvYU+oo3aw8NDnHzmYHVvby9T1DYYDMJRcpbFQ5nIehqRY3yprHHxGSmrZ1wm06gU9+Z7Fov1aX60K/QonjtefL+kzF5I04DY8y67xeK6e8TR0VHsZWxSsVgMG3Z4eJhpIO/kBc9BNIN2hGn0Ko+924bLCYz0ygOvPNfe3p7Ozs7i6E1aHKVgnLQVaeNk8PxEnrClnmMJRiC66I6Us6HMvX/eHS6uvPA3cuV6zp1H38/udCFLTnohWxQ5pT1FIZaQXS72uXeYALdIitRHGGv2PcQC5IfbO8iSx8dH/fzzz2q1WhGd8egr38+z+f5P5eJjcpJenyyS8psiGExiytKwKO7twVKOx+M42QDD5ZPN8WSeNE4fUzb0zz//rNvb2zDuVCV7KJFEaWnTOgUjj/fgISDCYIDdWq0Wi0p4l+fzZ3Svh+8mv4vFAwwDdrx1FYYAhegeIiwoz8F4ycuB5S2X11WiHmajKrJer6vRaGTAiXtb9Xpdl5eXH5z57MVnzuyyofzKY8236WIDp2ArBS3Sh+EVlBSGgSId7lsoFD5gj5irg4ODONEHhUv3h5RZkhRMO4oUJ4Y1Q0ZZXw85sd98TwHwUBKeZ5cqZFfkPJuzs9wbw4zzg0NFCxf2jwNrXwfmHHaO/qqLxSJOCyK3zNsMwaSwLszLaDTKzAXKEgW7s7Oj8/PzTDjMDf22saZ+pUDEDV06dne+vEn+y5cvI6pCuhBODbLjzfNxeHGYvUctThF6qVarZQogkFlvX+X6gp+u41Jmyv+WAln2JWvtOhOGlMgYRIR/BzqY1AfmMH2vV2/75ZExjyKwNvf39+p0OlEkhfyzJ1gj9j0pKs5ESRtQXCwWM90T+LwDcuaF79mGKJavZToetxE+x9hanHnSjDjBy0EOnXkAr9yrUln346RombQgUobQq7Rgc8eB73PW1NMNmXtkhbQUJ7a4sKNuJ1PW250saVOg6qF55gvnBD3tNoZxIb8OjCFL6IkNEEZfoC8ZL/dER7CPcJTAGtT3eOoQ93MibT6f6+rqKqJlefLxuZjho1KdejxOVUsfnrHryt9RMrQ0guUJ0Cg1WnEwcby+v78fRVLeoopjSWEdeX13d1fNZjNAIkd+pa1vMGhMMoqRo8mcTWWRnSX01jwsDJ9B0fAceClsiru7Ow0GgwyDyjhYeHJRESBnvwC7FF9J61OzEA5ycqRNH1fAi68ZfSVZPzeE/O5AyhmplK1wOdmGKw2LPcU+YWxSpY+DwAlLAFHW0R0Q5vWHH34ILx+l5p4/BSg4JuVyOY7S9XZVHi4H0Hnunhs+aWPwvSUPcuzsEArXWSMvhPJn8/A7MgbQJWcRxcc+wlNnr3O5YkQ+/J61Wi0UtBtjB6TO+AJQyc92J7larWp/f1/ffPNNJizrhiJPdj/Xo/+3uPL0LsYoJQ18vy2XywjtSetn43AQCACcDAwWjgLyjtxCKiD3Dw8PYfB2dnYi39rHQ1Ef+pQogwMx5NbZyJQVdSaG13lO9JgDSlgvadN+j3Ehs17AlOf08Dv2ieiIs7vuaDlDhKNEXi3jJC+dvQLTRzcKN9jMQ6m0zgu8urrSy5cv44x4xubfz5i3SW7zmDLXv6mTVSyuU+TIhZSUsZ10RZE2jgL6yXVZvV7XcrnuXjEcDjP6lxP8eN3Zar7P2Uy+K8UIyJOP3WXP9xV2nPE5q45sAjyxDdJGl4N1+Jwzk4yPsTN+yA90so8Dfc//vWDx+Pg4ngfb8vbt23gf6WYQMswN4/EaDdbiq6++0unpaSYP1uXj/xMG1ala93DzhJLLhdHZHXJqCLOkuSVMIpPvHjgKwHNF2NRMMl4Dp3uQV8WGL5XWZyM7c8uzFYvrxrZeyFEoFHR4eJgpiHIQwOI5iMwz9k6ho6Cur691dXUVhxQ48JU2ht7TAQCakoItZePyf/JRj4+P1Wg0Pgh9sSYYCyohPbRRLpfD8PjlrI2veZ7wbcOVjuMptjcF7bzGOr97907fffddGERyiFIH7P7+Ps7QRuESbr27uwvl5yHpTqejo6OjTOuZlKFxtggGFm+eUCUyyuuACGdAXfG6snIw4bKBPHueLJ66h+197zM+H7ODf8bCHHsoCVBAX+SUXeL+OEwAKmct3IBx/Ct6JAV9/HQDui1XqlOlD1tl+XhdB65Wq8g19/uw1z03DzldLBbq9/uhqyi8xDHz6IukOACCA0fQ4+5g+Pe7XCGv7LE0B8/1Ds/E+iHj/M1Zf4CozyEOCk5dGh7nb8wJz4c+cCbMQ7rogWq1GofFEF2ZzWYBhCEZ0MM4mG/fvlWpVNLZ2VkA5rRdz5s3b/SP//iPMTcUIvJsqb7dBmIgXTu3ac64+XNMJhP1+329f/8+jihGJkgZoWWU52wiV852LhYLDQaD6ITinR+cHScChIy4nKEHvaYF2WItUobUnQdkJ3V8Xb+6foQpLtaCAAEAAElEQVRM8IiVYxyP7uaBZcbG/nH7wfsBj4ybkybZi8wHQJNC3263G84ba+WdjFLsgs04ODjQ2dlZFAM+xe6nEdm865ON+pnkdEHS9/mm4X28FwDEkWJMOIP3fA8m2QuPyEfz8CyfZaH29vbUbrfD89jf31e/39ft7W2ca+whEr7LQyecEiQp8kgJNZAc7GwOwoKwIWDkLhJuLxQKmWNQJ5NJCA/5HQAKnttZBAAQx+zxfYRY6/V6CF2j0VCz2cyMjctBPRsCT9+PXKVAyxkx34R5cpB69ttw+VhSBu+p9/OecnndMufZs2dxVvxoNIqN744bG/vo6CgUxldffaWrq6tQGO69osAODg6iOTLyQDiSC7AGMyRtukikMowS5Hn5Lld+/qwe/kZ+HVQg727snQ0ilYHXceDoouH//B48ByFXTixh/p8/f/4BEyEpinpKpZJOT0+DWfbjO50F4BAOd6J8DVwetiVMypU6Ten+43KQDWg6PDxUq9XK5OXhADjAkzY9pDF2hPSq1aouLy8z7c1wTijsBEDkgX8nJ5zo4G/IAXKJ/HEPl0MuvqdYLEYhqzNKzsL6vHmeXxoB9LQUZ7Y8LaFY3PRGJXI3n69bGs5ms0iRYK95a65msxmvcyTr4eFhnL7T6XQya4jD9dd//dc6Pz/PRNKYg/8/OFj8TOU41UHL5ToH1cEQeo1Wke58oUucNJI2vcpXq1XUqXiXBGkTiazX63GEuEcQXReg5/Lk2wEr9hT5YfzcJ03LSIErMoiOd8DJHBYKhci9Z7/4uNDRyI4DZPSy44j5fK5Go6GHh4eoqaC7Ae85OjoKuYWJBi+4vsU+sSbT6VS//e1vo17IU15cbtm7n8ILnyySSoXKJzov18KNAP+nLxa0PQ/pTfI5WhNvEWHy/EifCDwPFo++hw4gC4WC+v1+pA+w8AiFT5a0YWE8yZhm4OQQcl+ACZuJ/E2ekcMCCBXxjxAQp5J4HiHP7+DbGS6EFmOzu7urk5OTyF1xNsAdBPdwfIPgNXkOYL1eV6fTyShGn09+unBtk2H3C0OYtxnyGCqfMwzN8fGxDg4ONBwOYw79JI5isRhFf8yppEx1r3vDLp8eikWRk3u2WCzivl45mYaOPMTuCgtj7LlHHu6GheA1D9O7p8+4yL3yZtSkKnAABCAgzygwDnJjd3d3dXx8rGJxczKQt8WizzGf84t9VqvVouih1WrFXqzVavr+++8jr5JncObF973rrG24HGw7+ECWnRFO96I7+s5woEtxtj2vstFoxDx6+spkMon2X0So+C72gQMBZzqdPWPsKZtGhAnny51Int+r6x1AuNH1MDyf9UiZtGHCnJxgLzgxkTdu0iB8P+FI3t3d6f7+PvoN42z5a5KiBSBjff78uXq9XuSfc5EbvLu7q++++07Pnj3T//7f/zsjG4zTZWIbiAHXsy677Gl3PFyXnp2d6fDwMMgljzbt7Ozo6uoq7ul6CSKLNSN9AufFv4PPQzZQ/IsOY1xgD9c7Hv73VDsp250C8Ox5xykwY6yu+wF//D0PhPqeT/VCSiC4HfLiWhwnTi7jeGLGSC9Tirt9XOCDWq2m4XCobrcbzm21Ws2sW6lU0t/93d/p5uYmooN8Rx7Z9VGZ+tgf3UA5Kk+FMP2MX6VSKSqbPWSIMe50OlEF7AvCd9Brj15yKbMDe9pqtUKRcg8o57QqmmfBu2CRWVBpc9Zts9lUs9mMgiO+N/WwaWnDaVDSOvQOCB8MBur1epIU7CrC7Pk0fsqUpwswJoBAq9XS8fGxms2mzs7OVKvVdHR0FOPkmb33JXPgyn0+35wqVSqV1O/39fd///f605/+JEkZ5e+eYJ48pMr9S16poUzlyje3/50NBiN6cnKim5sbrVarSE/JM5bkV7t8Yri9JZXnIlFNyT7z+YQpdMY/dTK4fONjfB30OoghRJZ+ntcdBPNMnEzGmKRNuzbPa3T5Yl68Tx9zIW3yFekBjNy3Wq04RccdRRgVaXN6Cu+fTCa6vb2VtGZS/vCHP4Tcp2EkX283TNsku9IGNPNPyp7hzcWaI5MUVhQKhUxqFCFPclI5aILT0YrFzZnaGGGcafQHJyPV63W12+04AGW1WoUz7s6YO9+pM+vy7c+cMky+V/1v/p6UcHAH3+Xa/w5Y9XWHHHADj53w+yGv5OTRSqpQKER9AfKcMs2+t4+OjiJ87c60A2x3KP1ywMfv23K5zpE2TqbrXtaeecNZcazBa8gw+IG0NNYUXfvw8BAHogCe/DuPj4/jlC+IMBxmT6HLwzWFQiHSl1weXSc6wMSGeK6myw/jdkbYP+/35XLnC3nnni4j6XyjdwHlfgQqReHoaSKxqZ0nDaVWq2WINv/eYnF9+twvf/lL/ef//J/17bffZphx3sPPzyEFPlkklYJGXvcFfMr7ZILK5XIwHABVQF25XNbh4WFQzu45l8tlDQYDDQYDjUYj9Xq9zP0RYpQtYIp/sDCTyUTSJnGd70+9fs/xYGyuxDzPCdCIIabnHYYThg0Bck+P78dL4XUvsILxQEEBBgDBeDmeXM94APt4P/f395E/cnNzo/Pz89jIp6enYZAmk4l++OEH/fTTT2o2m1GJnno+eQpxm4y7X0+BMunptAQMC8fE1uv1aEIM24KimM1mcTypy4cbZDxw7gsbxRhSRUd6hStDvg/D6eEwB6e85pffz5/VvX4+j6zSU5e8ReYRkOhgkb+5nHhol++4v78PB4uqaiIOOJqkDDjj61EPd95gto6Pj3V+fh5N0SmIGY1GmTlxNsLBqcvKNlyuE9K1dJl1lob9XqvVotemOwpe1Mn8o7Nub28jpcW7lrAW6HBAQqfTieKdNMpFJMn1KfdDvpAN5ALwzPx7Wgxr7Yy/5wc6EPbLHTRn3rh/Gk3i8rBtup8w4F4jICnTZxsbR7oE43fwwZgoZOz1ehnnFnA1nU7V7/czQCOVUeY5ff4vcbnjkLcmjiM81YyDdPzADwfqnU4n8AP23h1lwGOpVArAic2U1s7w/v6+ms2mSqVSpkvIcDjMtL2DlYVx97xUB1pOKjju8TQmZx/Zb4yXnGPPD01JCccjfL/vG9e9zjq7jvAcXQAz1fWNRiMixOgJ7BL/52J8y+W64NB1C3uy3+9HV6But6ujoyMdHR3Fmjupk9rJp67PquJPvdzUsOcB01S5ErZwAeQBAZMIqCf/khtBKIUiCpp2U82OEkAYWCw/htLDjUw6wM9zkFhwL4JCUDwMiReGYaDXnRt5wLF74Xksgp/s495ksViMBvGE7TqdTvydLgFeSdrv9yMNgLnFy+d9y+UyvCfWsFgs6ttvv9U333wjacNcOWBwQ+GeUx7Q+9KXr10qo3nMqhsivEWqS70ViW9y2Kg8oyptWG8HEoXC5rg+N1goTToxsPHdkXIgymvuLDkQ85SU1IHwPFfuBfszGo20XC4jFAyAYBy85sV7Dpi8WToXzwFbVyqVgoGDnUtzJl3xYmSQXfYuoPXo6EitVkutVkvVajXOlmau3Lj7HPq1LWF+B1epHvXQvAMC17kvXryIsHGxuElBcmaQe5Pbjux1u90w0NPpNMMuFovFcNZYY8AvJ/a4g5ESGs7w+GvoXGljvNKwoLSpkgbU+r5JHRGXed8PDn54v887couMETb1inCYIvSAn8C2XC5j7tEjzJ+zXegH0se868Visa44/93vfhdHIrvt8Hn19f/Sl8+7s4ap7vHxM//euoi5wi56uyXSJJAF9BbsvQNU7ttqtQIjuM4sl8saDofBBjI2Z34Zp7TRrw74UmaYNlaub1zunSxxRjV1onyvegQA58xZ2LT/sJSNOrkzDlAfDAaqVCq6urrSjz/+qLu7uzim1J85xXhgHu9es1qtou/yarVOc7m4uIh8bZcPj4h8zvVZOag+Sb4g7oEyWH8YSQEgX758GR4RHg/MDjmU/E7uGN9bKq2P9Ly4uNByuYy2PAABTunwDcF48JbSHn2SPuiJ5lXIPK+3+PGiIQQd9jZVRORulcvrCk/GA+DxBffwKMKHQsJjZ95Ixk+f0wHUdDqN/BIM2Wq1CiBBbiPPjrfP93O0LHPvRsKFl7VPWbltuVjrFHg85WA5y4G3jWKDbUy9c2kTGkSG/D7u1eMgUGBxeHgYMufG1hWtt3Py8XpRgLMNABJpw3L68+Ik8jecuNlsFgVbAGTYBkKZPp/pv3R/YAz4fmmz3yqVSigz9hDOn6RoY8U+QolzTKyDEfYuocLFYhEOWq1WyzT8Zr583lK52IbLQ+N+pc/hyr5QKISBWS6Xur29DZ3nnR7cWOE0oG/JF6vX67q4uIhQKbJfLBaDFJA2eaXoK1r8tNvtDBPrIVBkWlIUfSAvyC86jTSS9D6sWwpyuVJgymfzAJ7bKuwRe409nEZGiK4QZWg2m3p8fIwjNOkX6elZPK87rsgvANUNPuFX7yOZPpfLw7YwqD7HPteprEoKu0OxKA6Ag0h3zCUFUSVtAB8gs1AoRIEUERbYbSJfTooRoeXAGvQVpJHjGbexqWPlOMijGg5M+TuRgtS5TOcoBXKOh3Ci2IfObkI4MTfU+3CIAPJ+d3enTqcTfeVJVXGZQvdzMSfMqf8rFAqR3ghoHo1Gmap/x42fC1Q/yaAyYPfYpQ+P+HKPl4eR1uGPw8NDFQqFyHXE2yGMwYakmMlPjODvg8FA19fX2t/fV7fbDeXLyQaueD3nxyl3VwLOkjpghUlgkV0gAaxU0lcqlTgmTFJ0CwBswpL5ePguDIezF4ALxo13CFjhNTajtGl0zHyh0IfDYQASV/iVSkXD4fCDEAKM2NXVVRSapEyNlM058flF4LbBk+dyUCd9uOnd25eyoZSXL1/q66+/1vPnzzOnZuCAeCoHxXkc7IARQyYBZsgBitlZA2lzglS5XA72yv+5rPL97qn7uLwdmu9TWgTBqnGIAH+HrfDPIm/sIZwmLxzhe1H8zhTwu4Pq6XQa+acO0gE50lq2z8/PNRqNPkip8VQYQDUhJgpPcOz4TgfzblRSgPOlr9SwSxtZdf3k6wqb/9VXX+lv/uZv1G63Q6dgMPi/624iKugD9AXpGIwHoOZdAJz95nVYcCcs3FHhc9zTCYIUlKeFJO4kMn4AjAPNvLV0EOrziu3hnjDHACXmh73mzuRgMNDh4aHK5XLm9C1Jur291fX1deZgGV9Hjz6whxxowNA+xSb7vGyTzpWyetZtg1/oif39ff3qV78K0gY58p7Rk8lE0+k0ikcBtEQPsbOLxSIAKukryD3AzaNbNP6/vLzM7DOeQVL0tsWhA/TxXo/G+L7yNcHu+vxgWx28+f18zpAPn1PHNR5pg4hC7nhmLpjj2WwWDqm3qHz37t0HY767u9N4PI46BOwh6T2LxSL60KJHyuWyxuNxYCKXCyc5P6V3P/skqbyNzeZx5ZAuNBW+t7e3Oj8/j1ZQu7u7uri4CIBFhShKb7FYaDKZaDgcRn7kYrHQyclJKAJyW/k+/rHR3VvgbG+eBaNbKpXid4TZ8+lQYNzXc1+4FwuFUvP+ju65o1zJLfVeraVSKXOGNkIBYGVjsLD8TpuT8XgchQp3d3fq9Xo6Pz/XixcvojcZzOloNIocGTZEvV7X/v6+Tk5O9MMPP8Rz+2bA6KSKcZuMO5dvBjeMKYOWerBcw+FQ796903//7/898qSRNWQDhidldiQFCzoYDCLVolqtBntKc3q+00NYu7u7cSRdo9GI3ON0P7oH642eGQfAT8qGkzD85Jf6WeLsR95H7hThLHd0yMnjwvByspmnk+Q5NuVyOUJKKFVJqtfr+od/+Ac9Pj7qF7/4RZyyRU9OTtRyFtBP2aIokbQYv1xPue76XI/+3+JiTKxx3rozXmesxuOxvv/+e/3zP/+znj9/HhEcdxYcaDno4e/IjINKL2RJnRcfx+7ursbjcUYmpGzbHdbD18UZVsbhLJyzRtzHjbt/DzLr9srnEmbewSzfQRtAb6UFkHanxlOqAKfj8Tjm7re//a2+/vprtdvtAE1E+WBRYeR2dnZ0cnLygR0cjUZxihd73EFKKivbAlKdKEpJLH+dlIivv/5axWJR79+/zxTUsG70NIWJ83aI3AfdwFGd7rDh8CMvfBZmularhSMB2MLBJ5rqfaodNPK8/A05ISrql2MHn4sUI7DO6E0nq7D5npbH31y2If64D2Oi4NzbWUnrA3smk4kGg4FarVY4T4wHPELfaU95wcliLzh2Ojg40IsXL/TrX/861tR1FxjjY9dHASpf6BucL+JKgWnq7VYqFZ2enkbi/m9+8xsVCoUIb7LZAWIoBBra03an3++rUCjo2bNnYci63W7muL7U2NOnDgbSnwFAijJFYXj4nvd56J/P05id0DjHuO7u7kbeDB4fgIMLYcJAYHQRHHIWCTsgmJytztgxzuPxOMATxme5XOpXv/pV5mxonu/s7EzT6VS9Xk+vXr2KM+RhT7z1hF8uYCnb4c7BNlypF58q8fT/LjvIZaPR0OnpqarVaqwPewF5Yx5QGoCxt2/fajgcRgEEHr8kNZvNSFHBYPm8LRaLOOLXwykOpFOm3xlGV6aEthyUcg/CPzw3jhuy6nnd7rCgvFGGhHfZH4SGUGQAeQwWBoNxc6Y7XQ3ow3d2dhby7iwsxYF83gsOONrzzZs34fzx3X6lQG9bDLz0YSg/dbQcFPr7O52O/vZv/1atVitYfCnrOMGA8zosPuvIXAHUSqXNKX/sCykbNfP90u/3NZlM1G63MzLh4VO+l3G47KfrJH3oaOEcIUcuWx7xcQAvKWPYXZdLChKDfH9CqA6eAR3o5cViERXPyPBisdCf/dmf6fj4OAPIPOXLGTT2KIDh/PxczWZT8/lcz58/zxh9HzvzlurhL3nlySV6xR1Cj6acn5/rv/23/6b/8l/+S9g0abNWRF3BBPzNSShkkANmeJ2C7NTR85SXWq2mN2/e6PLyUs+ePQvAyOUklbRhVB34cV+PRLImKbBNozW+tx2YOtEAnvEQPmPiM94daTweB+GAfmf/7u/vazKZqNVqRRvN6XSq8Xgc3TmkjWzxTN7aDx2A7uAnBAXYxefEZcKf/1Oy+1GA6kDNN4IrdAcmqbdEqPRv//ZvdXx8HCcz3d7eajQaabXanCE7n891fX2dKaT66quv1Ov19NNPP+n+/l4vXrzQ7u6uJpNJtH9iUlwIGC9gV1rntdG+xj04jDQC6GEWSVFtjHDAtnKMlxtNNkShUMhQ7O7FsaC+CTx5mgXkJCiMMM/pm5UiEyrM7+/vAyh/9dVXceaztClycFDE86HUPZeSNIA8Y8nlXqAD/224fOzOdqfMqTM0PN/BwYFev36t//Sf/pOazWaANrxFQuRUiTvLDtAqFNbhdJj/09NTVSqVOFDC2y9x8Xkcq+FwGMfQ4cClDJPPNw4OcuZzAdtJbjKK2llEHCruQ9iSKAN5gqnnj2GXNkrLGWEMt7d9gaVi/kn7mc1mOjw81KtXrzLpBgBvTjhqtVoREWEOAEoPDw/6xS9+ob/7u7/LyEQe65+Cvm24XOciVz7G9Cfz3W639ezZM3333XcZdtnZQIDR+/fvJUmtVity5GGfd3Z2NB6PA/CRhw0L44wOY5zP5zo4ONDl5aUuLy9Vr9fjABKXczferJXLtOccAuyc/XX2kWfkfs76+py5DkPXOvOMU48MEtLFPgGISDejoFdSkAuNRkM//vijvvrqK3W73bAjjHm1Wke7YAA9woduZu9Q6Pf+/ftcfev2KZXnL3mlBFY6TnceSqV1C8e//uu/1n/9r/81ky+OfLFGnovKT+5FKgD5wD6f2H4OBHF7Wq1WNRqNwvm6vr7Wq1evwhnw3NJU96L3HHT7HOTlSkvKfJ458f1L8ZGDev9s+v2u4ylSpDDbT8UkLcLBOZhhtVrFe+nog7w78VcsFjNF4CkedOaXCMzV1VXmsJTUof4cmf2sk6R8Mv33PCbVf5ZKJR0eHkranBWPwSP/rVQqRe4eSodwCGwIOUGHh4dxzjx5f25gfawo1J2dHfV6PQ0GA3U6nRi3h+q5B8qLRWPDkKeHAXQma7lcRrsQSRHS8YbmLDoKkNdRvuS0FoubKlkPB5GLyoVBcDYVjw6QwZygzNP14WjX8XicAUGMHwWQrrF7wena+2tf+nIP1D371OFyY4ZyOzk5+aCi0b1j5qBarUaKBYaWsPnOzk6cOFOpVKLdRr1e/6CC3kM5yC1OCMCQZ6lUKh+E1V2ZuuHlWZGX6+vrkHHmBZBBegont/CcPBPygDHnvsgIe5c58vwv3usV3u448f75fK6bmxt1u92MUfITUmCbrq+vVa1WIy0G5QrAhy3wMCP3SAGpOyzbcjlDxJWuqcv0wcGBTk9P1W63MzIqbXQxDq6kOOmFNfE0gEKhENW3HB+NcZKyxVHSpjqek/Pevn2rb775JnQcTgqGHyPGcyBbaZ6dG2n/GyykR7V4Nj+ohPugG33MrDfyDGmCE8VzAwY8Vxu2rtlshk3Bfn311VdxX3f+PfKAPEOSSJucPwga0g3QCQAAn3u3Q9sguz6eFB8Aevy9+/v7AehxkiqVSrQ3lD6MCAHkmIfHx0cdHBzEfCFvFKl51Ttrz5rWajXN53O1Wi31+30Nh8OI5jpQBCMgzwBg16Ouf32P5jlX7qAAmNkHvOb23Rlg5MQdO8aAEwUpAtNJPQm4gs4GOzs7ms1mKpfLUdeD7iddkO91B5fnZFx7e3uZji/0Un327JmOj48jdYLP8fNzZPazSv8clKRCyO/+XgbAZpvP10dr7e3tZZSaez18FioaxTGdTjUYDCJnD8GCvXTB4LtRuCguzkMHdLqyQ+mmrSbc80uVPYwrC+jHUSJEJHC3Wq0Yq+em8t2EfkkLcADMhgTMI8TL5TLOe0YwUITMsT+PAwnmnuvt27c6Pz/PnDAzHo+jnYrPSeqg+L9tAqfSxntPWZS8n65UYIyOj4+jqIn34WwAxhyYogwBl+RNz2az8NIpCkA5eVhS2gAJmHiUroed8gw4z4rhToENOUkUzrnTRUhzOBxqPB7H2eweavNm/N6+hNfYszy7t4ZxJQ14lRSy1uv11Gw245lpBA+IddYPgw4QYqzkSEoKZTiZTEJR891ceXordXK/5JXuJZc9V+zuMNZqNXW7XR0fH0dXhEKhkFkrN37z+TycJU9PAQDQsH9vby90rxd7SsrIMd0DGo2GlsulBoNBjNefw3PfUtCQOozOnvN5Z8iQY/IU3dFkz6Rsl5RlcWHgZrNZ7GX0MvuRqAKgwdNX6H1dLpczqTvuVDK/EA7IJTaOfcze6PV6QUpQ7MaVspT8vg0ANSVBXMc6OGUN3eGkXRdr5gWjgCpsLFGswWAQ+8JT4WBdvbUiY0DGy+Vy2F4YcU6s4l6eWuHyRVqJ62KcQhzBNAeV9zl7yk+P1gJYXX7cKeE1CCt0PmmQvV4v9KK0kRfwCFFAWicWi5vDhbz3O3gFIobf89aa95L60G63IzXObZRHLBxLfuz6JED1DZ8qx9TQumeHgeFM+8FgEJMEyCNfEyXq/UhRclTnfv311wEI/SguKdvYmQXFUy8Wi8F0sXAsqpRtxYMix9jjgXh+VgqmARzkWyGssLwILgwEfTMRDsINvlkd+KFAPY3BQbwDppubm2gfwRrkARZYtOl0qmfPnmX6qvpJKL6urvydhXJhRV624coDog6y/X0peC8U1h0nPKSfgnzAZr1eV6FQiLxo/k544+HhQc+ePQuQyjxzL+6LzLKWODXX19fhpCBbbHZnlnwtkBUPq5NSw98dsJRKpWBruQcK0uWOEDwsMYqVQitCwThi7uA4E+JsGu8vFovBSOHY+p7meVGyvk71ej0AL04tYDtPTj3qwtxvi9xyOVuR5yi67sVAez4ugB9DxFoDitChgCnfJ+PxOCIDnNDnAFnSB0acqmkAFS0BAQRp+D4vhOmFePz011gvZHo8Hqvf72s8Hgfoc8aSz3iqFXKLLHMKGfI/HA4zh0dIygB8Uq/c1gGoarVaZk+6vPP8Nzc3AUAp4nPgPBqN1Ol0tL+/rxcvXkR6httXlwd/fRsubDFX6vj5eOv1euTYp86Eh+ixvd5BZDKZZIo3PRJaLpfjtCRJmbxp/47Hx0ft7+/r6OhIzWZTV1dXmVoR7uUMJj95Vil7aIpHSN1JYozcN9U3vAZx5/sgtT2wuLx3sVhoMBjo6upKo9FI0qbvPGwyOvnh4SFSHXG4vP2UM7POHnvuq0dokDtqclz3kDLkJJbLw+eQAh+V6jwA4t6RU9z8dO9/b29P3377rf7iL/4ijsii4qtSqUTlOEeJei4UBvvnn38OL4iiEcAeD+1j8Up3jkBFWMkLdSBKCJOJR5B8Qt1DgnniOQF7noNKtTzhBhZX2jAFMJQoNwBhCjTw7nk2L5hiMzw+Pur8/Dxy+ggLz2azDMDwjQHIGAwG4akyPn76XLpMpELlIGlbWKg8BZD+zYGiM3B41SmzwWeREX7H8728vAzlSbskevERJsGxcFYeBYDDgve/s7OjwWAQOa0paJM2OU/+LMipy2qxWIwjh10JI9teSOgKn7liv/BengEZZ08iy24M+DxeP47p8+fPVSisz9CGNSDnj8sNF/d4+/ZtPAPzCchmTqTsMb2pXOY5X9tyMX5nUFI5ddBWKBSCASFfDqPmjA2fwVh5vp2Dt8vLy2BZCINieNIxer6dN1Q/Pz+PIjWegfe5A+V6lO9xneOODfvDQ/xUbjtLyXufsl+8jiMzGo1i35IbiVx5XjbFpESbRqNRPCcpYDwDc47RJ52i3W7r5cuXQdCwz8hvpW6A4kY/wMWBu7+2LQ4WNtvnPS9yISlYZ2eHeS+6kM/4PiBPl36bnp5ETUChUAh7ikPvY2Bd6f1cq9XUbDY1Go00Ho8zdRpO9EibOQc0+7j5u7OXPDPEQ55jnDqgqRPi3STcmeS77+7uIkWB7ilpFJWoKfn779+/jy4RkHHuqDlTSz2E52m7TmLvQ8owf+PxWMvlUi9evMjsCeYmJc7yrk/2QXWPhi9IlT4L5GxHuVxWo9HQV199pWq1GkbaPcxSqRQb/fLyUr/73e/0/v17lctlXVxcaDgc6ve//71ev34dIXuYKICsezbpIlNZyskn5FsROvWmv87COvCjQtTzjwAQrkQZB/dF4bFIzAuC4l4LAIexs9HZSE7n7+zsBBXP52AOoNRPT0/18PCg9+/fx3d73g7hjefPnwfbyvt6vZ6m06nevXuXAVFPyUYaftgGRSlle9SlY2NjpAYfmfzmm2/07/7dvwvjwIEH3Jf38RpMKzJHOHAwGOjo6Ch6xHm1rjO3qTJmjckT7PV6mWcDKKbOYqGwSTNxYIkc40Cllf3kIbFvuR/z5EDdFbWDY1gDOlY4+Fkul5Hfh2Gp1WqZ/oUAeJ7BHUjyV29ubtTv9zNHGzOH4/E4oiqkKzjQS42jGxXmfltk10FZCqDzGAj2NHschxMjQRoHhU+sFxf6jn8wSZAB3BPd5Syuj9d7h04mE11dXcX9nfF3sOyFV6nh5kpBmRdxMC7CuNLGuUZ2kXMM7P39fdQl3N7eZhrlw+ohh4yf+5bL5aiDGI/H+ud//mf1er1w0MjXZo2Y+3K5rOPj4wy7hBzCZLndgRWGbHCg5M4VcrANDGrqFDjYYu2lja09Pj6OOpM0WsOa393d6fr6Ohzh1WpdbOZ7mnXiPYVCIQ5a8T62sNhS1jmrVNYHL6xWK33//fdhb90Rl/J75rLnPCecZ5A2dR2OUdLwu8soOjbNMfd7SRtWGMxBYRR7iX3lxBnO3J/+9Cf93d/9nf7xH/9Rb9++jf1J/+nb21u9e/dO4/E41oGIA46Uj5HvOTk50dXVlSaTSdhAHNzUWWReUz38gUx97I8pI+rGyScOwfR/hLNpS+ATxoIgOHt7ezo+Pg5kf3Fxofl8rn/6p39SvV5Xo9GIyUcRMWlczmjBqMC4FIvrRF4S4VHaFJuwYbxVD8/AokqKYq7Hx8fIWcKb9j5tu7u7mZQC2l/wvBgIVzp41B6GI3eQYi16mfL5fr+fGf9isYi2Up1OR69evQrB9BOpYNX4HMdbPjw86O3bt5F24Rsz9drymFUXwC99oeQ8hJ6yqKmzBcg/OzuTpGCuHcx6OA42frVaRQrL9fW15vO5zs/Ptbe3p++++y6MPMojZbWRZRQeYwHEXVxcaDAYxHuZf74bA4vyQGHz3DCLtD7D6HneFm3KPNfLK5oB4avVKt5D8Yznn6bFUjAb5JHTB/nm5iaO2GMvEI5OwSqO3u7uro6OjnR4eKiDgwPt7+9HCgzO32q17jP89ddfZ9bOf08dE/+5DReAw4GaOwr8nWcolUo6OTnRd999F30O0U842F6M42BAWhtMnP7JZKL379+rWCzq1atXkRdIWFH6MC8f+QIU0hrozZs3kZ6URllSxxwDznN5Dh7P6847utNPP0NeeD/OmrfS8kNiyBFnv9CfuFgsBnPK9zop4WwvjhNpXYTvsT10W/FT0Jg3tx/YAGndeokQNq3WXG5Tu5zncH2Jyxn91DbgWEjZTjmEm7FJAFYKnqbTaXTswHHf3d2N0DGyC5teKpUi+oTj5uPi+2EJyavGuR6Px5kIEzqW/eJyj85L9R7y4mkKfK9fKTvr8pEypT4mr9Hp9/u6ubmJU8z8YCC3IePxWDc3NwFosTOMmS5ApJSQl7q7uxs4znNv2b9EqZDBs7MzVatVVatVvX79Ojo3pYXan4sTPul2peDTvyRlgnh/obAOOf35n/+5Tk5OMpPun3UmFVZlPp/rxx9/1Gw205s3b/Ty5ctgMJ2RkTZeBJPslLkzCe12W51OJyrTnEVFcSF8ADdeLxQ2x0pKWcobDxxqnHkYjUbBUh4cHGTaW3HBAPDsgBgMO8wpYfpisRgCIyk2qHvo9Xpd0qb1yZ/+9Kc4E9dBC+uwWCyC8YWp29vby/QvTMMQbkR8Q22TgZeyJ5i4t8ff8q5isaijoyN1u101Go0oInGZgrXz07wwSO/evdNsNtNsNtP79+/17bffSpIGg0F8ljxrSRlWinGxN0gV2d/fj/wi5MJz/9LwrP8DRKPEPb+I1z064P0B+enePTJRKBQijI+ce+EK7yPN4eLiQqVSSe12O8PUzudzvXv3Tm/evAngjCN5eXmp29vbOMaQqupUDkkXYJ1JY+BIYT9RK3Wst01muVJ2n9ekD8OoALBOp6PvvvsuTrajk0GhUAj2lLCnlC3cmc1mcSjI+fm5Li8v1Wg01G63o0sCYDJlPWDopU24n3xU5BZA7LLExThS58vf6/mu/nfkjZxl2DD2JiBB2uSRciqRExXMZb1ezxAgjUZDkqIob29vL4AAzwbY6ff74TD98Y9/1GAw0P39fayHRyOkbIqJ6+Rer6dWqxWRR4799Qims6hp/cWXvPJwABfPCaCBrDk5OcnUo2AT7+7udHNzo0aj8UHeOXYQJ6RcLqvf74e97nQ6QWZ5GN7lBxmBRLq6ulK1WtX9/b2ur69D70sbAgid4/uPezuZI2XBKIx/GtEBE+QBUo9wMG+e7kSE7vr6WldXV5lWlwBmyCrAO3r65uZG5fL6mNcffvghan0eHx/VbDYzDgHfz/6le5KDdk9JKBbXPZT7/b7Oz8/jdD+XAX+uT8ntZ8UFfHHSL/IvYLIxHLVaLSpy0xCNb1hnCjudjqrVqr7//nu1220dHR2F0cezxXPyyjkHuygrjGChsM6fOjw8jElHMY9Go7g/OVMADkKj0iZEnipZAAeKUVoLpG84mCFJGe+Gf2nbIYDFdDqN87BhoamW895vGHwqbmGvu91usFywC6wDzFO9Xtfh4aG63a5arZY6nU7k4Xioyg2DOyUOBLfpSsfjCp6/uxyzTp1OR91uNzo1sFZ81ouLvNE4Rq9UKuk3v/mNDg4Ook8n984bH+vh4K9QKGSqJsvldVslcluRTZdH9idyCgNBrrIf0esMGPuDIhNn1hzILZfLzPGCKEGObIWp40QnxjcajQJEebsogMloNNLFxUXmuMHlchnpODhw5Ov5euDkYQBub2/1008/6fr6OpwtLzp8ivXfNvn1Mab6NX2tXC6r0+nEmiMLgDWcHPLSHOwgx5yDzvGc8/lcr169isgTYXsAYQrySWkid7LVakVbtV6vFw62O1WpDpU2qTI+Nhy7lPX1z7mx9BQC5BuWlNOGON2N79rZ2QkgmdcKCh1K663b21u9f/8+Cilvbm6CMCiVSsHC0WbNo1FcjJM8X8K0RAYo2Do6OsoQGykhkBcd+lJXmu8sfciWse4cWQ77yXN4Hnu3283NuUSG6bErSRcXF6Gbj4+PValU4iRKCBd3fFx+C4WC2u122OZ+vx84hc94DqlHiPzZWQe3mw7uUsfOyQTXfT5/7BUniebzdYeYXq8X590jw16E7rae+/6f//N/4vAYZBznDxKP7+d70cVeXA5mIdLgckht0dnZmdrttrrd7ges/+dGrj6rDyqLkhd2ykPEhcL6pCgawfoiOYhEoaLcaHBM6INjt2gjgpFnHGlCNoB3Z2cnUDuFA4SCCF2iDPG0qXhzAMD3eljIlT/G2nOzUOqpwnDg694RSh8l6vl3zmahJDk/GHaPjYNC5JnpywpDBnVPXhPFY57H+/DwoB9//DFAhYfL0vWV8kM628JKfQp05BnaSqWi4+NjtdvtOA2JsLYzsYvFIhRBqbQuRiOX7ebmRldXV3r58qWkTXGQF164N813o+y80KRYLKrT6cR51JeXlzo+Po5epThgzvyz39yRQ54Acw4I3HF04+6OHM9RLG46aNC9wFlNnpMUAgoiSQMAvDLn9OfDw+dggGKxqF6vp/v7+3AWGIvn4TEunp/x0Tmk3W5nGJSU5fDw2jaB1DwQzeV7D/BTqVSi0NT1E076YrEIxtvZcGnjYKNver1egMzHx0e1Wi1NJpNoBJ7mz5FeAvhFbjB+VNlz7KSzw14U5SwscuxsPzJF/nTqGJMSxTMyN8ggKV1+/C2GHWBKGJ/z3KVNgQppWpI0mUxUr9fVarVULpd1dXWli4sL/fKXv4x9wpxgj1zWfC2RfWoBGCvRN69VQB58Hj3C+aWvdA+lNgGmmJZcf/EXfxFAytNEWHvC8x5ZkjbOvLTRS8PhUKXS+vTIVqsV9oxcdU+D8G48rAGE0t7enkajUXRTSPNG/XlS58NZV49iYSecTOMnc8LaA55dpzFW5IpOO6QRclBErVaLlBz0Agz//v6+/vCHP+j+/j4Kw4goVKvVKEDHFpG6ALNKX1lPg0T22LtEDJwsPD4+zqQ5+Lz7XD51fRSguiB4aOEpFoqLpGNOzEGJOevHg8MuAezwPu/v7/UXf/EX4Sm1223VarUotmLBnqLX9/f34ztYZAwdoKFSqURo3gUJA+/gFIXqShODiQfo3pN7GpyYghLHI6tUKuGdFAqF6GLAyTwkKZPTweZZLpchDNwb0OonZriipyrd2Tx/luVyqZubG+3t7WlnZycqIlPvLVWGbnC2QUly+bikbBUxf+fimcgVo5J2MBiE9zmdTqP1DkbL84UJFb579y7AEewTlfx8Z2qAYQ046xgnpVwuq9VqRX4whT+wmS5v7u2yfyqVSgageWGVyzbzQbEG73UlhZLhu5EjL6rCoPCdyBlGl9NiYOj39vbU7XYzbMR8Pg+gS3jZUyOYO2cyOP2IuRiNRqrVahoMBhkAmwJQXttW+fUrdaicXS8Wi5l0KuQZ48VnYbj9MI/ZbBas4fX1td69exeMB2tPCJYiHp9LZJY55n04L7e3t7q6ugrj5yFp30PICPqU7+H/pHn4fCCDrBvvcQYIp4d0LvR2Wu1MxE9as3u3t7eq1WoBjLk3c8fY+DefzyNqQeSKOUK/esNyogSFQkFfffVVhnwplUqRg+pzzd53WU3//yUv17duJ9w2sm7dbjfyHMkHxeaen58HGMJuAoSWy037SUL+pNwVi8U4phvZ6na7Icd8N9/nkd3d3V2dnJzo/Pxcg8FAw+Ewjgx2QOkXewgd6LqV73fiDP0oZU/ZcqCKfZGyJ1b5/enxTPeJg4ODcJackUYu2+22rq+vtb+/r/39/Sgcf/fuXZBw8/k8wC3j9dxsxj6fz2O/I6uOl7ArkuKob3Jf83Ttp0iBzz5Jiss3TBq24MvxRr/77ju9fPkyzpBHOAA/FI0QGqd4glAki0cDWG/FhHFygOhjxuBxxCnFGNVqNcAEC+GMKkIIkOYZCWN5DiFgEsbWBdW9Dc/VgJ0qFosajUbhOXIfADq5NVRf0/Cfi5AtR5u2Wq2MwPi4F4tFMCKM24WYn7BihLWYF19nF648sLctLJSUbYyMonBPPGUiJOn58+dqt9sB+Ck48f5y7uWvVqtIT6lUKhqPxzo7OwuDjPfqTgGfdaXHvGEs8X4Jq1er1Tir3pkFQBvKCONbKBQybUYkZeTJc+9QSkQXuDDiMBAw7rD1gFZkKA0B7e7uZkK8xWIxcuoIvT88PKjVaml/f193d3fRqJ/xEtpnnVCQfKcrdZxZnAyKJVIgnq79NsksVx5odvl10LKzs6Pj42MdHByErmRuiCSRiwYLOpvNNBgM1Gw2o3PH27dvJUlHR0fa398PgEmhK3Lva4FT4+CJfGOcq5ubm0ivgq1yp4r9CGhFTwEsYMlcZtwG4QhiVAF/AFjX5ejxQqGgVqsVssg+k9YAAId+d3dXw+FQ3W5XNzc3+tOf/hQdI+7v79VsNtXtduOeyH2j0cjknjqJ4Uyzs1bIsOcZ0rnF9Y07KshKaou/xOVRIZfTPJIAoMTasvbkjjr4dz2HzHS7XT08PKhUKkXxc7G4riFgbQCwLmfIAAcHpTahWq3q+vpa4/E4Y/+xI6yJp/SlwJxn9RQBXuN9jAk972NwUs3vj3N/dXUVPc+r1WpgA4gDGH5JkdO/WCx0fn4uSXEcKgQYZFer1Yp7EslGnp0McbvqeId5AqQOBgP1+/3c5+H6VzGoTzELKYPqCoZ/BwcHOjk5yXizy+Uy056J4p39/X31er3IBb28vIy8qOFwqNlsFscfpgKBYLOhXXEXi8VYNECqtGF26GtHkRNglTEDPKksdNoehSlt2FTfjA6iCWt4DglHuOKlLZfrM9wRWLx1DD9KzZlcb6/huWcoQ449Ozw8jHGw4Qkt4VUx5qurKy2Xy2CgmMt03fOEa1s8eS7fUHkg2sEpAL3b7QZT5IAL58DXvFgshiKDAdrd3VW73Q4wxuaXNkopvQBPzk6x7jCw3myaUBLyDEhBDnFu+C6PVjhbidIpl8uRq+rzhEyh6L0vpo/ZvwumifwwwNDBwUGAfMbAOAGbzWYz9sJ4PFav19OLFy8yRgJWDGYPwEqqwWq17nZBLp8n+qcXa58a0m250vGkssz6kXKRRngwMp7uw7GE6JKdnZ2omoZlIYLA95BT7HNIbicAFZDFe8ghfP/+fYRMG41GjAUj5k48hbCAPQgDyAX2Az+dFQO8MQ5p05h/Op3GfXgPeY4w+Z7nCkgkUlWtVqPDCS2mKAQrFtdnmI9Go5hXImCAI9f96BGMuhtuX1+ikDBleY4U798mJ8vH5PZPyvb3xQGCoXZiKY1KeiSPg2iwg8XiOmf09vZWOzs7EZHBhjo+cRCP3BFtRRZbrZbevXunfr8frKI78zyDs6T8DaDp5NlTbCpEkqfXOSDPA/h3d3fq9Xpx0iPzTQ9kj9RKmxzty8vLYJ3n83kAU557Npvp6OgoMA+2xA/54Pupq/HUMc/PJSrAWhUKhUjv8X+s678KoLJ5mGQp31vgi9w7rFQqur6+1suXLzOAjwdwunw6nUbvvel0qsvLyzjTGLbGK3X9e1Mv3L1tF0YWn3s6Q1gobE62wvh5r1Sa1/rkwiThxcHMovR9QVkMNt7t7a1Wq5UajYbK5XLkhQJO8epJB+BzBwcHwToDllerzbnRfA8tVIrFYpyxS0qDe+AwLLRCgUnA83f2wsGpb5z0dc+v/ZJXnuCnY0ZBsJ5UPz8+PoZRYJ0JnUjZs8AxpHt7exoOhzo5OVGtVtNwOIxQB0aJeWRNfe6c7XQmqlRaNw5nvRmLt4oiF0lSxiBivElNcIMB45t6xXzGx030AjmjwwSf597MI4AXBgJ5arVakTvFWLm/FzTx+devX0fLNs9ddTYa3TCbzTJ5lr1eL8aSp7NSudimK2VO3GBxsU6VSiVCyhSDAND8UBQMPvIxn891eHgYhUM///yz5vO5Xrx4odVqne4Ck8h6cGELkCvSqXwvsC60BBqPxxFCdx2NHErZdn8ur+h55oY1BKgi+zwfgBpdjmxDiCyXS7Xb7QD1DjZ5dnfAMOIud8wBtovcO3Kwf/7558i/44Q1D/FKa/uwv78fz8bzMj/sGUCBA29n/90mfukrDyy7nkVu6QlLD9TVahUEFc9LJBGihihl2lHi/Pxcy+UyoibMBd0dnD11efL1YK9Q6U5KFxEc9oykTATLI2GefiRlCQmfEwfs2FjPUeV3t9dgI7pQEAkD6BPtgyDg73Sa4EAeUlnoEEH0gaghjh84BjzB+pF2gW51hpX9KK33SLPZ1LNnz4Kg4Znd/v2rAKoLF5OcevH+dwa2v7+v//Af/oNev36dyTclLE0ujued0fZgPB5HgVW9Xo9QlIc5GIsn2Tvz5IKAR01TcPqtuXFsNpuhvGCSaEECCwZL6eFGhKhYLIay8Wptr8RmvIvFIs6rLhaLwVLCOCF0lUolGuc6cKJ9ibRpiwLQvr29DYNcLpcjxIkwIUTMh+cIusHwfpRs5pRF5feUmdwWRemXe6HuyUqbZ6BAirkhhIM8sFlRjNKmdQlyhdywgWn9wXudCQTsMiY3ws6KsJ+QTeSQ9WF/8Xnkgc1PBACv2L/LFTQhKZQmf2MuPIybHgVJmo7nrLLvYO1p2QOwRxmzFgAsKtIpVpM2vVn5LtbU87hg4AqFQijxVquVAVYpI+NGfpuu1PHPi1xgMHZ3d/X69Wv94he/UKFQCBaf+fWcZkAbee8A19vbW43H4wBXtJkZDAaR04lcMGcAB+TaDwJhjG7YRqNRpCH5PdO8Pj84hTnweWHNMf7OTmJTKAp1lsfzutF1HKABo8r+wDFj/w4Gg3CU+ElKGLbt6OgoU2RzeHj4Qb4/3+9zCFkCuC4Wi2GjSGUAaLGm6Txvk95N7bBjBtaJXH8v6pMUuaG819MdaP90dHSUCYnf39/H4T6Hh4eq1WpxxDQONJc7/Xwvuf/IFmAUFpyxpaluHlFgvDgzOBPcM892SsroYY8+IAs+f/f39+HcU9iEXk1zV6fTqZrNps7Pz2Me3Anc2dlRv9/PgMxisRgnqTE/7BNk1ufNn4lIGDgNHOTYxElMx5Ofuj7ZqN8nzY1sukmcVWNDtdvtOHeenogsrhdv9Pv9AKBnZ2d69uyZTk5OtFxujiFFKBHglF1wIXQDieF1b4Z8WJhPaGwAAIwmITIUAwvpc0ErKGcQ3ICw0J7PyT/Oby+VSsH+wObiJSJQhOLYQHhXXt3qND8gAMXogs6xZb7GgIx6va6vv/76A8fDN4t/LpWLbVGUDjzSMXMB2Hmuk5OTYJWZY1ggwI+DRDb/arVudN/tdsMp8Kp138yemsH/fY8htyg6PFnvGuCfx8lj/ZFlogCATUA4wNOdNw/5wyTDahIqdzlGcdO7FMbew3MYKRQcoWaKI9zLpwCQVAVed5DiYSvPDWPc7GnGDfjytCAudwT8b9siu+4AS9nn9/fM5/NgT5ANnCryvtyYA956vV4m4sQ84URQaMe8elGQf4ZT8tgbGCb0CW0GS6VSpDQBIrEBHk73MfOdyJn0YQ9YQsXsR/Q+cs8ceq4gvxMl4d4Ui9RqtQCrpVJJvV4v5hrmCkDhtQyr1TpCCEmAfUnbCjpoocsENgyADIj2VBXsids69IWnCXzpCxnxsD0XY93b29PJyUnoBs/vRAewl2EwYeod4FDPcnNzE4fYeKoSWMFJLY9GsI7MNZ9BX1FkKWXzZ51o8H2JbKZOg+8fXzOXcfYM40NGuM94PI7jTJEriC50vu8p+k/Dokr/D3tv9htZklx5n1hIBoOxcCeTuVRmVXVXVaNbAwnzIMzTAPOiv1rADAbCAPPNSC11D1ql7lJ1Vi5MksHYFzK27yHmZ3GuM5hZwgDKeAgHCJKx3OvX3dzs2DFz80W+MxFTAPhkMlG9Xg8dQHoa13Rgiq1gbnl2xoqa78x1LpeL8ouMv88Duulj7ZObpHzAXdC8Ew5aWXgsLASExFsS9THsdBLhQ4n+/d//vZrNZuzaR2khJO7ZpEyDgydABP13D43vE87nvGRpCWLdu8ZwInQIMyVBWFQsKCbXC/0zsYTHpOUJPwAEEpthM13pA0QIuZZKpdi5yE7V6XQaNeZYSAhbsVjU0dFR5IL5vPLamzdv1Ol0wrtM554xTB2DdWzIqIfNpKWDxTxtb2/r+PhYL1++DIYHkOMhHUIevhsa+QCcfv/993ry5EmG6XSvU1qeukMfme9cbpG/3Ww2g62UlhuGyP1DofhGJZSIpwA4YygtmSkYGveU/Tp8ltA68j+bLWuUcsqb50gDHt3Y8vrt7a3G47GePn2aKeBMeJW6sWzSQ9YA0v6MbI6oVCqZqIy03NBIeSTpYYjNU1dcz61DS/VrGrVg/WGoqH0M205OHvnlXrge/VGv14MJ7Pf7wZQT3r66ugq96Qyq6837+3tVq9UABoAD33h0eHgYNSoHg0EwPqy9NAyLsXYD7kYSufd8ax8jHEx3ltg/QDiZKBVkCt/DoSKlq1wu6/b2VpVKJfQzukJansBFdRlSxCRFKo6DR1/z2ArYOv4GkLueYk05EURLQevnbKmcpviAflLGDDYaHLCzs5Mp5yUp40C4g8Sc39zcaDAYZPasIDuuMzxilc8vDr1xx4Ic/Xw+H/NOFRcHtKlTm86F6ypnxf11X1M0x1BpwzlygDgYDEJ2eSbSEs7Pz/XHP/4x2GROIUQnU6hfUuScQyA4y+kyh7zSR5zhfr8fG1udOJSWqXA4hX5Nt8sfa5/cJJX+ny4I9yxYvOfn53FknANAymY4a4QSgsVpNptxfY6NY+Fyb7xjR+RMeprPw2uu1FCQ9K3b7arVakU4dD6fR/95xsFgELlVhHbwutikwIYshN7ZADzoQqGgdrud2SXrOaV4yyi4crksSVHdgF12Ozs7evbsWXgs/X5fR0dHkhTACtaXZ5YUoVlnvAmJ7e7u6vr6Wm/fvtXR0VEItgule48OepzhXoeWspauFFIlgNc+GAxiXjhP2GXIFUKz2Ywz7AFDlUpF19fX+td//ddwGrhvCjRc2dI/z2klp43PwsIAJp2JRVl4nqyvkdSooTRROo8ZOT5DH2E9c7lclDXBoJMmIy0UE/lNjOObN2+ij9PpNOo+ct1qtRppAmm+Ic/BunWAQ1TGw6H0i13tqR5zg5nOyzo0+uX6zuXI0yNms1mATXQwINM36vhc8huAd3Nzo4uLC21vb+vq6irWMXntAEfWv+f/ScvNfbe3t6rX6xknAlaV/DmvMeoMFtfh+b2xmYPnh+Tw/Gcaa3Y6nWY2jaAPHTBRC5L7kY+HHFOC8P7+XpeXl+EQvHr1SpeXl1E+i/z9yWRRszuXW5wFz1r2zTr8Rg/d3d3p8vJSz549i02CPN/r168DsLmz5o7tKn32uVoKupBf7yNz4ak3PqfYLq+ogzPMWoAkmM0WG4shdebzuX766ac4mpM++Tg5A+55m6RAFYuLgy/evXsXKYmQDFzLK1qwFukn88v7YB5peRpbCgDdIfPXkW+O5MWRKZfL4egB9MFYkEr39/c6Pj7W69evQ2f6WuOe6GGYasdlrGvP4+dvanMfHx/Hc7H5yoEs1QF++umnB9H3n+NYfZJBdbbRF0aq3PlMPp/X6elpGBwMEKfsFAqFzPnH8/k8SiDh4V5fX+vw8FBPnjzJbOZg0Lwv/htl5YxrPp8PIfb8TAaZvngI1ZlQlDiK18NXfIbEagbf6wICKPjeYDCI+/iuT8/Lw1vK5/OxyYDQEXmneH0cYehF/AmH+FxJy1xTSaHw5/PlJitpUWbj6OhI7969y3g3GL9UAaUM+7ooy1UsrzeXXT4znU4znrWUPdkjDadRaB7ASLj78PDwAWPpzkoKngm7YLzv7+/jOD//PMwiitbBKIrdmRv672wxffHnRkad3eF+NPKJiDDgmLVaLdVqtZAnD2VRBB0g3+/3Yz21Wi09f/48HDJOnJOUUYipM4wybLfbkXfG+Pu6gEkjr43x5vcqZnJdgKobek/HcNAqKQzUX/7lX4aR8moP0nIjxmQyUbvdDgNJuB3nHd1ABRVKAPlmIfQvbBD6kfq8bPQkDWV3dzdCr0R3YLt9PZEO4wYewwjLy3d9wyFAABCA/ges08/pdBqOyvn5uc7PzwOsslaImKF7d3Z29Pbt2wDqgHJA79OnT6N8Dxt+0OvPnz+P72BnuBf2zCM6X331VSZKQyoZ5ddSUJrK7bq1dO06OOW3h9Rdd/LsfJfIC/KIw4G+gVRAV97c3ESuM+QWfUCe3Q5SjgwdyPqhVBU61vuObCIT/jqgjrXBeOB4uB1wxhWdx9h5BKDb7WYOmPBNiBAZgPr9/X29efMmQ2KAZ0hfaTabkb4GmIdQ8wia2zueazKZhCNXq9XCRuRyuXASUjIRkoIxSvXxx9onGVQHJKngucJnUgCgBwcHQf9ub2+rXq+HoiDHg0FlQwpKAiV0eHgYuW0ptS4tqX5fBB4uIpF5Pp+H4sSQAwScrsaocg/PMWIx5XLZSgUIuzNSCDBKkmv4Iuz3+1F3jBwqSmER9pEWzCj95LQHqgvU6/UQ5OFwGGcXO6PpHp4rf68L6+wiu/7wNN2Qu6JEwf4cL+hztJQlewysMq+FwuJcaHLuYEYlBRsjLUs2MfeEOjudTjgIFxcXYaTdu077Rviu1WqFAkIRNJvNCEciz+SYzmazqH8LwGB3fblcjn77JhYpm5KDw4bswgpIinvAbLCjm/7gzPAe8oSRQHG3223NZovSchylCjiB4Ts5OcnkUOL9s7uZ1AZXhEQ7zs/PM6k07phyljTlrXwdug5Jne51aa7Iaa7gYTOOj49jLQN8cFg6nU6kATUajdC9lJpClrvdrvb392OjlM9xun5ghgCLpGTxHRxw1/XkFwMw2BGPXEtZNtVLlPGsXjvVdZCUze1254v1BzvHkc4YasZQWjpjGGEqIpAKMJ8v0nDy+bzevn0b6S2s74ODAw2HwzjJzMO5ACBAy9u3b0PfpqeseckuQBwpNchA6livEymQgmcHdtLC0SFX31MemHMADmNH+N4ZQJ6X0l7Izni8OPlMUpBO6FTW/2Aw0OnpqSTFPhMcHOad+rikeniZMyl7qhTPybpAjyCnaaqBz5WvY3Q5+twdMOzE3d2d9vb2YhzZc0IfAfq9Xk+/+tWvtLOzo2azqUajoVxukTp2eHgYfa1Wq1FpwvvuOMEjd4zj/f29Xr9+rfPz80ghos+MRy6XC+IF/SvpgR3+VMT1Zxfq90WRKnN/MChovzET6saLzlKuhpwfapHt7Ozo5uYmQuBOqTujhSLi+rBIPmkoCY5VS9klPuc7lbk2oNvHQFIIlZdZQOA8LOY5WZKCVfI8JnJbAZyAVj9/vFarxYKUFMzF2dlZlILpdrvhyXvdVJ612+3Ge362NkZmZ2dHT5480Xw+1w8//JCRg9Rzd/Cdhs7WraWMfyq7hUJBJycnqtVqwfzg3KBE+S65dtKy3AgKrlqt6ttvv1W329Xbt28zuc8OFGls1Oh0OpnzjmEmC4VC5HgCJj0lwEM2MIUYO99Y4usnVYjOJqCAHLSSX7i1tZXJK8epg6Eip9yZa+QfsOphLN9osru7mzlq7+DgIPrFc/I9cq+Oj4/jesyZ5zxhfFxXuNOSOt6fUpT/3m0VWE6BNeMIS4qT4xET5AAdAwuK3oQogOn0nHrXrTgvsFeSAgCkYwkjhB4l3QJDi1z4Dneu7TIiLY9MdX1In7Ad6EvfQc01WS/7+/tx8h5jBgCAxed/SbGBqlqtBriHlcdmPH36NHaij0YjdTodPX36NICyAxAMdrPZjLw/CBlJYZvYoc3GQ478ZWxX2eTH5OVztTTqgS5Ap1AP14kfd77c8YDsYv0C2MmtZMMmcgCzSPQWouH+/j7SDNkXwtxLijxtWHScdE43k7JrgNQ5z92Ulo4Wn3OMAsgFPLM+HPB6ihNjAJYBBzx//jzsAuPNGDQajTidq9vtRvREWsg05dWur69VKpUildEjX5BT/j8EBPPwxRdfxLrxvQ7SMjrbbrf1+vXrGFt/rjSq91j7WTmobqDTcGf6PlRvrVaLMlHsGvMQHoMrKQCqpEDkg8EgjkTEO8Kgpv3wh2Z3GkqLyZEUmypA+0yqA1cUGsKEJ+SbSByYpBsJmCDGxa9DuQc+N51O45hG6PJcLhclrtyToqYqBgGDTDmuyWRxpJkzYR4edOWP1+6slKc/kM/nCeKr5pz/H2OnPmdb5RG6Y+IOxWy2SMs4OTnRzc2NDg4OwtDBjjo7iHEFHEmK3KVCoaAPHz7EbkvAnTs9HvIB5DmryVzA2BQKhXBqkE/kMs2rJnQL8PZrporPw6VpHhX/p/nWODLkZnkqg4dlx+NxhFph2GjoAkCzpFD4pDak+abSYv1yAgxGAvljXnAo2WQGUHW59edcN7mleZ/420PgPjYUJwccYvxKpVI4+URlcKYZ73a7rVKpFNGqw8PDmHMMvbQEjzA5OFKMbaqHMayTyURnZ2caDAZqt9sBpMlNTUGqtASV1Kp2I+jkBmvbc+NhxLAZyKRfG93mJ+RIClA4HA4DSFar1QcMGsXbJalWq2lvb083NzeZsC9yyJwwLy9fvgwZ9FQyj8ow/r1eT69fvw5QzDPyvGl063M3d1Bp/qysNdh5Dj3AXhJdRFZbrVZE+HyTWi63KFtG8fnRaBSneXlky9l2P1mMjb+kcuC0UR6QlCT6BO5w+UPfoHt8rrEJLlvodL7vKY48E9dlHJFH8BNrDgwCGUHDXnFSoEeRGacPHz5IUmAOxzissTSaICnSFmFiGQ9saBot4H6pTKArGJdPtZ9VB3UVe7qKMcvnFzmTT58+jYcA/CEwPDgDygKF+SPkiXCgCLrdrra2tjJFX1OvEUUwGo0eJFkzaJ7ryeAhuD4ZMBE+AdzDX8f4OsOGMmehzGazmPTxeBz5Seymq9frmdAnoVF25zYajQwDfH9/r3q9HsY8n89HPiRMSWp0EcJ8Ph9gn4XnpZCazaaurq5iQTiwcuPo8uDzsU4sahoOS5WBg2zk4ODgIHLGWGwAMY8E3N/fZ84khnVtNBpRsgbD44yNN+aUxU6ID+PGd7wSAMASQ8jzuZFGuaKAWXvO0KZ5jTwHjgt9hl1D9jjX3OWZNATWcqvVipOD+v1+pCv4eh0OhxFKOz09VbPZjIR7xpj74yx4ikMK1NyZA5yR7uD5YD7v/L1qbtahOXj2sXOG9OzsLJ4BwNXtdiUtjwFF5zkDyf+kXnz48EF/+MMfwtF99uxZyJuPNWkjR0dHyufzkfqBkUaunGl0/QtIxpjCaGK0kHucG54hzWOjTziJ6E53lBwsUAcbPctvHCfva6fT0Wg00osXLzSfz2PjGJvP/FAVUigkZfre6XQiqkI1BfSph0v9fydGYASJdNBcp7kMr0Nzveoy4+QGUT02lkoKEktanN+OLmk0Gnrx4kXYUL+Os4pnZ2eaThcbLwG/6BvkYjqdZsrluR1kTVC2inQ/HH7Yc9clPAN2gbnw0LjrZ5dNbz7v9EVaRiI8EryzsxPVIriXRww4HObs7ExHR0d68+ZNnE5JHygHVa/XY5MY5BlOgLRMD4PMYm0it66fffMXfYL0YjOVy8UqmXmsfTKu5TS8M6VMgrNQ+fzi2DcYKIrKppPHhOId88CwmdPpVD/99FPUCZUUrKif6OTCwSJG8UHx8wwORDHATDZMixtoajO6x0L4AADg4XafCA8XOUspLU/BoTYYCo5nweuAfveQP/1HgPjubLY4TWMwGGRofyh09+idRfzw4UMcRcizsPscwJuy56583HCuGwvlToX32+UZBYoRIc2CcCS7eDHMLGby6CQFu7m7u6urqyv96U9/UqvVCibG80DTn3w+H3VWYZM8bcQ3O9H4PGAEpeJgwgGXswhStnoAioR1CMCUlgoKuWfcut1uODYwDs4gEP3Y3d3V+fl5GFoMUK1WUy6XiwoV0+lU//t//+8AGfyWsrm6PPeq5swE1wAcp9EMl1f/bhqN+ZxtFQjhb1+PuVxOFxcXAYqkJUginQfwd319nTlZifcw6DD+6GdYPDeygAp0KSF1nCi3EfSDUC56l9xUZ5kec4JZJ6w9dL87Lsw7tgJSAxuAbLqRZe4BzYAP9Cx5iHt7e2q32+r1ehn5c7AB00W5L9Yx68lrE/tzuv1kfB2Ys+68VmsqD26L10F2XWbTCAVte3s78s5xCsjdRf/2+329fv065tA3EbNOcSL29vb04sUL/dVf/ZW+/PJLnZ2dqV6vhz6SFISOp7bRIMCQ962trWDO0+gtz+JAzVMAPYcfnYx+xW7jzHnUZz6fPyhdSPQXeWYtedoe9/CyepLi+PTr6+sgKVzGptPFBicn5ejDYDCIDdJgIGyNr0lnTMEonoYpKWqxstbps8vL/1OIPzVsnuzrC8MZQ4wPjFCz2YwB96L7lEVgkwW5GSgxlIznSDBZeKJSdnOUgxLvk++QJIzAIHPyASAu9fpgILiXPx9CmdbDnM+Xm7Lok4cZ8coAGoSCeQ6YtclkEooXAYdFJryGI8CpLyysFIy5UPP+7u6uvv/+e43HY3377beSFuErjj1zI0Lf+J0CJ2cC1qml/ZSW4N2VzNOnTyM0iiPQ7XZjhzIyOJ1Oo26nMzfFYlEfPnyIk754r9vthnF248T4TqfTAKLII0rUHRyXHXJWkSE37h/z1P06yCQgg/v5LlaABTuXYTZw6mjIMGthOBzGhr/ZbLFJimoTyCJpQHjo+/v7sUaRMwwEFUGcBfFxc0YRHdLv93V7eyvpYWK+9DCnPjWkn7OlICT923VutVoNg0jaBQCJkkeSgkXyepKsWWkROfGT9qjTjA6bz+exYRWdIy03o8AAOSjGWXYW1fPZfG16dIJndcPG5iHGhihHPp8PkIHt8LllPbO2PHowny9LBgHoIUFevXoVDBphYQAqgBQSYjAY6OTkJLOvgM0szuKl6WApKGesiRa2Wi1dXV0FiFjlYCMHnzL0/17NWX8pK2M4Mqenp2FzkRecXdZ7q9XS+fl5yDCOMPPLoQlshIRVdexBihRrwKsBUQ2AvFXmxgvg4zCwqc/ZdnfKmV9JUY4SmUvJEOlhFRV0s6exoEupj+17e8AYgOf7+3t1u13d3d2pVqtFaTJIN7CE5wHjyHM6F0AbJwGH09l9B6bgP3f+nChpt9v68ccf9dNPP2XYVeTUberH2idD/AgdnWFCHJHzGVA4CyaXy8WZr4XCorA2Gxja7XYoIWd7CHGQv9TtdjOhVAykLwL/7QsYdsuFgO/TGHBKgLAYpGxSNKCTv3lW2CQUHSErJgvFhJc+ny/LoHh4S1oqcpK0B4NB5Ia5t5XP5yNnNZ/PR6UEN2aAfgfIbERDQAG6P/zwg6rVqs7Pz2NxXF1dxXi69+jjnDLU6xYmTVleby6jzFE+n4/yRW68cHBQFJJityQ5UoeHh8H6w9DPZovTwth84sbKN/PhHUsKOcPYogByuVysHZSOtKz/yXdZo4BKmss1MsR8eX41hhdmiFqiOGAoeHQC/XQAwdp78+ZNhJu9XA4AvFgsBsvK2gC4oPDu7+/1/v17HR0dhWPJM7CRrF6vx5xhuK6urmI9uzw4A8l11k1+08jEY79h5968eaMXL16EjmE+ut1ugLxKpRKVEWBkMBgYaqJDbjAhBEif8ly8fD4fDKyUrZsLSCYFhGoKyCRgw4+ZRJe6/KUgwAGPg1HPz8b4eoqOh+Ixkjh/6GNCqoeHhyoWi2q328E8MxZES0qlUoBSCvX7hkO3E86y0T/WEw4VDqzvsWi1WlEL3IGfj4EzVuvQUibXWVTkhhOy0IOsc2xNqVTS0dGRCoVCsNee3jEejyP/9O7uLnaqT6fTOAmJNAnmAT0KjpGW1Upubm4iYomsu82GqXVbIC03OLtuJac1jci4U+LOGE4N/+MAEokCGMKeOnuLfZEUeApm9Le//W08K3aL3PFcLqfT01OdnJxk6kZD2rhN9P46UwyYZ47RoVxnMpmo0+lEBN1tg8vJp0iBn12oP72gCx2vl0olffPNN7FY3717FyVMEAjC/g4eOcHJT9RgohEA2FvKKnioBu+AkIAn1rtykBRKGPCJ1+XG3L14lBj/e/K174p1z4L+eS4Rws2EuZclLTdWtdttdTqdKP+wtbWlly9fqtvtqtFoRCkYFkqz2dTNzY3y+XyUkJCWjIOzuD5W9O8v//Ivg30dj8dh4Kk/mAI8V5T+zD5G69hQjvTXF125XNbFxYW+/PJL5XK5qNf54cOHOKHEv4vimE4X9Ti9AgU70plflCxOjKcJ4JAUCoUAqIRrPPQnLUNQhMUJe7ozhcL16IAbTJwPd1zSxHueEcPJGvfwJmydO47eTxQnu+tZ1yhgduXSL94HQLjCc+fLlRuA/ccff4yNKrVaTefn53EqHLtY+Q6/U8Z0nWVWym4gQmcUCgUdHx9HDupgMFClUonNOpKC8SRKRMOwA6gACO4k4OSQuuTODAQBOgh9yrpyJwdDBlPu8tXr9eL0MOaX+eK6/h76F3bIHRYH27Bn/r/bAGegqb3NepEWtaB///vfB0jmuai3OZvN4jQk2FTsBHLl6WvoXPqLE8Ca4nMp48Z6wd74mkmN/TrIsPfRnRj+xxZhs/b29kJf+IbRZrOpXq+nJ0+ePGDewAQw+LPZ4oAIxpY8TdhA2D/WPI6LR6aI+ng5NHS3p3Yhy07CAeJ8jXqI38cAG4+ulZYOhrPM9JOTmsbjsQ4ODiKXmWtLis8Qyh8MBlH5Afkn9c/Te5DrlHRMnXiey9cPf4OdeA6XwcFgoB9//DGiGy4fq67/WPtkiD9Vkv4A/M2Cmc1msXkHKv/g4CBC2rRerxfsXXqUJiF4aVlyAQMG5e1hLDxxn0wUACWdfGBR8H40oj+XA1rPH0VQMZrc0zcSwQDgtc/n8yi+DjvnITPfPIOwt1ot5fOLHLKbm5t4drw7FCQCSUkJduShbB08osyg+ukrAPz09DSzaCRFeCQ15inz5K+tU3OPlebK0pV9uVzW/v5+7BouFosR2sfBmE6n4bUD6hh73+XrixyjPJstEtgJVXueJewpytGVmZc6ATACEKWlfDqgdbD4mBzAbPFZT2p3dgCGB2eHa6F0WC+E1vv9fhSJpn/5/OLoY0mRiiIpNvJxlCTP4ek39IP3uCdOLGGvDx8+6Kuvvoq8NgDy27dvw8FNZdTHibXgr33utoqJkpZ6mNrS+/v7Ojk5UbvdjlDnhw8fYnNErVZTr9eLECoMPQbQGVN0tDOKHBQyn89jo4/30SuwSAvw6ykqADTWEp+hpiMgzGXR14ezRdLyJCLu72kAvEZkywkAB5CsT89pzefzUYubY3gx6unYk6fPaVH0cRV5AyACKLHGcDglxYY++kUf3r59G/Yj1WerwODnbqsiEzTGvVKp6Be/+IXK5XLoVfqPve92u1E9AVsFq8i4oYeJBmD3nS1EFiGIfDMPeo0KFn5gj9tybDiOFGCVRoSA67KW+ExK5NFc5hkz5hCbwg9EVXosrxMUPMu7d+9UqVS0v7+vRqMR6xB9yeY/9H/aL9hfj2QwtzS3K5AlflrafL7YWNjpdDJkn8vCY+OStp/FoKZGPgUt/hDUeOt0Onr9+rWKxaIODg7CMLMAMbYoGISn2+1qNlvsEr25uZGk8Oh3d3eDaeW+gDsAgbOjKF/e5wfj6xOUAlkWgg8gIV/PeWLhwQDh2fM5+kAoq9frhSFxOpwddoRVyTWkDzAh7MArFothkKkbi+DilcK2ef4hyjENd/HdfD6vZrO5Mh/XvXvkwsP869RYYKuAtbSUY8aRHZyECvmsM8qDwSBOPCI1hUXpzJYfOcruSEIernA9WiAtQZizLfTDS+IwT6wZFIYbKdaAG0/3vD3/ljFinaR5QnwfsEzeFt9nPG5ubjJVJKjLCaCXFOVjnjx5olqtFoXh3cj6M/I/xw56qkChUNCLFy9iHN2wNJvN+G5a0iXVYesGTqXHDRvvkU/abrf19ddfq9FoqN1uh/NOKBWmm9xT11eMCzV4MUiARt7HiUrlQVo6R+TCkZrkefleGcLH2kkP7oM8c38HFuhC37Ti68nnGb1PCNNTr7gPjc0p19fXQYLs7OwE8CDMTN4rm2jQ/+gN7z8yjLNLhAXQXigUQrfkcovSPTgOrVZLHz580OvXr8Nu8Fxui1PQ8LnbKobX0xOYE0AVY0UKIA4D9swdH9eN/O2sIDqJa3g+NfYcOUGOXL9wLU//YA8NwNhTm9J16TbAWW93ytLPOsEH2wr5hJ2WFGklXD+Vdxx1InKARuw+e124N3WmV1VEQcY89ZA+OXB17Mcz44hRg/Xy8nKlg/dvaf+mQv3ppLjxRwmxgxbvXZJub28DWbOb1x/KvShCgvv7+8rlcmq322H0+v1+5lxmQn8oVgesXBeBY1ARFP+MH9Hl4UZPDubHhYTv+ESirN1DYXwoVp56Q+z2bjQacR9pyUBhTDDmxWIxduqR+O+MA88FI+UKDi+KXD9nxmB52YTjIf5VXs9j8rAOzZW5g5+UTcvlludm39zcRKiUcB5ePErOWVLYIcapXC7r4OBA19fXoVwdqPqJP55+4vnFvtHCx9OfAYWBjDmgwzN3eeX70jJXD6OPjKURgd3d3VCuXN8L4LNu+G6n08kcBNHpdEKevfoFh1GcnZ0Fc8p8sT4xZr7evFydA4G9vT29evUqnEfqJzebzdjow/O7h+9yTVsHFkp6yETx20EIc8O8lctlXV9fB7tXKBQiGsMYUt8RvQXrDUBDRpBpjBzz4fd3AELI06ND0+k0ith7nh4AVlLk97EmvFi6R7PcEQacIgdpTis2xR2jNEUkJR7m83lsMvHXCGECqNhANZ1OYxPVbDbTyclJfIf1xXt+Dc/z43l+/PFHXVxcRCoR0bLBYKC3b99mqlA40+8Oxbq0dP0gI657PR2EMfaKDDyvp+151BBHCHZ/d3c36usSZeUwD/oAYHXHmwotEAjcg+864UB/AI6ua5384TX+p8/oKscqUvbEPtY5cuwVYJAdH0sHszC/RKdwpHDQHKgWi0UdHR3F97iv70Mg5dHtvIf46T/9Afuhr7GLfoIU10mdrf8nBjUdvPSi/A3Y8/O0c7lFwi4lZgiRQ7ujxPAc/FrVajV2+A+HwzhNCsU1nS7O3OVYOe7nwoDSQygQTgwwwBIAN5lMHuShOBBO80qcyocd4DpMHpNFsd3xeBybahhDmDk2EADQqYWKsiX3xI/R4xkATr4xAKZBUubYQgw5ihNAhBEgHPgYe75KJlyQ/fOfs7mydObRQSv9LhQKESolf4cSY3jnkiJPl/HjPniNxWIxwqC5XC7CTzghfl/myT3yNIQnPUykdwbLGYm0YPOq7/p4pF6tb85Ijanna/M3xw9iVNrttkajURwjiNL0jVbz+SIKwkky0pINRHEDgjkYAJnCADi7zbOwPgmTEmVhjlxfpSAr1WXr0FJH0Jlf5mtnZ0dHR0f68ssvw4nFMcCYwtgRffHcaUBauVwOpx+DVyqVAszibCMbvu6JIjgLTz9xcHyeOLI2BYt+0gz6GYbIiYAUwPg1iFRheB3k4oR5tM31Nek7hUIhUiKQs36/H/aJ5yBKSI61525zbWm5AdfXOWwdTiAntUlLx5MTeDx/7zEdkcr052xOZEhZkoAxorQjBAhyzlzzfKRYScqUjIREwimCTHJHBTbV59vHjbn18lXMC3o5n1/m8DsQxNlyJ2kVqwqATOfH5RygnuIfHDh0JgDQnUTWJLVPiYi6DudzuVwuQPjp6WlgJdKC/MAWSYEnAO2Ob+bzZY1jL6lG/8AjP/30k7rdbjx/atdon5Ldj77rX2axOQhxmnc+XxxReHZ2FkqRCQTM4bEjkEyOszdetoSjFZk4yiixg5rkYBQXAo/BcmFDMN2b432MIh69g1D3QNz7caPKpCHACGAul4tyN5wMhcIkDExI5+bmJpgPz9nd2dmJkD6LpFKpRK4vfWNOfHHhCLDJgfEmN4S8XYwAxoKkcRZdOu/OQiEX9GGdjDz9cQUgPUxTKRaLccQrift45cgXc+3GykM50jKlhFQMQh0oVFdY6bi6Z5wqPVdiAC8HLDgiHj6UlsnsqeJkDGDRWYP0C4UkLesPE67HWXID3+v1Ii0Epvj29jaeGaDEvBweHqpWq2VYEvrEvbmXp+fk8/lQjsxryiKx5tjVmjpSPu8uD+vW0mdydhm2YjpdbNIDRFEeB+aQkDt6w40m44kD47n0bOZLx8sjSZAL/rqH9vlx2XYd5TLJXPM34NcBai6XCwcpZal8nJydcgMqLfUW/eX6nU4nInWcxOa5rUSocFrPzs5i7HDSACyAC9ctyLgTCjhrRGrYQEyK2PPnzwOYuX51HeI/69BSp8rJCggmasVir2EKmVPmxdk9dA9jytzAlmLL2WOB3kDuGX+3/UR0uLcTUowprKW0BLOeWuKyzXMTeaD/rst9TLinyw6OtV+jUChE5R3AN+AeeSV9RFKsfcgJWF36Q4SlVCqF7DkjCjAFC7jz6QThn/70J/34448B3n3+3r9/r3/9138NR8DTaVJZWfWet0+WmXKhQ/BSkIpiqFarOjo6ikXpxhPg56f0OIPKtTDCgCs88Xq9rnq9HrmCgElyi+gbIJD/+e3GPM27w6i6J+HeOX3DQ/cQCwraN3cwaaPRKHJqmXz6iwEYjUa6vr7WeDzWkydPgmHAUDNGDtyPj48zR0c6+GIBYQQAEuTv5fP5qHvmm8BcIQPOUrbJDXuqONeJPZUerx8pLUEaSrNcLodXTw3C29vbOBuenDQHkW68WczISrVaDaVKGJowKsyWKy0Pp6BQ3AFgreAw8V3ew+HAWLqygF1yGfESKs7S8z75rq5EMZZ8F8U6GAz0/v17DQYDPX36VJLiupPJJFhNPk9OuoeoMdAOnKQlY4tCZV10Op2QYdYbxoU1hb5h/lODucoRcLDzOdsqEiAFifl8Xt9++22E7pkTTvoCBMAuUZ6MzVHSMpyIMRsMBpnwM2PqhtnBJk43bDgpGzg7nhOK8wY7z+dYP4BpdJGTAr7uPBKGjksBA0w7OpTPOotPzdj7+3vd3NxEyLjZbEaqCvci1YdnPDg4yMi0s8fz+TxAJv97pCFNN3j27FnIKuTLZDLRv/7rv0YNUAf2NH8tTQf6XO0xAktanmpH9MTHzIEaMpM63p6Tz/u+FmD++dtBKvoN1s+jrOhGd9T9Psi/gzxn4Vk/PIv3NQVg3Je/paUcu+6DVJpOpwFOSclCt25vb0fKYC6Xi4N/fH2hAyDD2G+BE+rpKIwp8uopD9SqBwPBlBKR4ZlIg2s2m/rzn/+cIRdSbPBzyaxPbpJKvSIfTB4KwIc3416GM6m8z2ADWPEKUDBQzAA6kv0RuGazGQbO6/c5K8qAMqH0H0Gi7wiLh7r9R1oybu4BMyEuiExwsVhUq9WKGpA+dngsAAIKMR8eHsZ5wtvb27q9vY083MvLywjHb21tqVKphLC7gvKdqhh2FiJekbQwPNVqNRYTIGowGETKwWNeTyofvO7OwDo0VzqrPHsPv3HyCOxnq9WKXeAYNN8Y5wAM1gRvk1AzoIHQqxtj5oF144oJmUQOYLpdobozgZOWGnbPj3I59rXgLBfgh3WAMvYQrOcY8gxXV1fq9XrB6hPqz+fzmTQJZL5Wq4UX7waX9YM8es4TYw0Y6na7cVoV1+l2u3EC2OXlpfr9/gP21GWB8U7HfR0a45WCEwfSRJP29/djjjyP8ubmRq9fv1az2dRXX30V80DzIuDu1JD/5mV4PCVAWjJ5fk031oALdCHXQH5Txs91KilIrqMdtJCa5Iwa93Z2lLqnvmsegNjv92On92AwUL/fj40j7gwy9p6byj4AjL6HgqVsYX4iAnyOZwHAzGaLMD9lGYka3t7e6l/+5V8yqRV+bcYM+7NOOndVX3lva2txIAeAEbmaTqe6vb2NMeC7yCHOqzsg6BNk1x0T6pbynq9vmEn0DBvhpOUGLOQqrRSEo0e+MN/j2txLWpJckA+ARneMAb/IA8wxETe+h3yzRrkXGAFdARbjfScxSFtjfB2XOVZzbAIuwq5AdpTLZf3yl7+UpHA2sD+DwUD/83/+T7Xb7ZUy4nr458jtJzdJMfg+CK4s+RxK4/b2NgrfEnpy5ob6jym4Sz19jAd5feTwNRqNyD9hUFB6rlQQBp8o93opAcT3CSv6hHloKB1UZ6VQdniJnEUuLYESnhDMFYC92Wxqb29P5XJZjUYjgEUulwvFKS03dZ2dnWUWK3ND3907TxlkZ655D6B7dXWlZrOpDx8+ZHbfpQxByjSlbOq6NOYqZc8cnKDkOdJQegjMCLV5iIj3UWgA2FwuF0oA0MguXppXdmBOyW1FFvmeh8XJtXYnyRkyaamUXF75vpStLek53bDGrDeU4Ww2i01i5E7jNM7n8wgvl8tlHR0dhcdfrVaj/rGHqra2tqIAt8sufWONpowCz02/AWU0GNbhcKjb21u9f/8+Ex5N5dO9/pRZXYfm45PqHcaFcGav1wsABRjDQH/xxRfa2tpSs9lUpVJRuVzOhJqRBV6HtWHOyYknBapcLmecI0Aaepb5SufQ2SLmCTbTwSifwXBLq6vGOBngUSJ01aoQLRErnCBYsWazGay+A3PG2g+gYIMJgMkrVjhA5QfdCgCBdWLzmIMZ5rhYLGo4HOrNmzdxPYBHSgK4bl4XGV6lb5kn7D/zD0NPep+0ZPXz+bzevn0rSTo9PQ0ZAlgRnscOjsdjNZvNcEyIFHD/lJ33CCny5JhEWu4jccdeeqg/XO6cBUYG+Y6Td7zvoNbHhrGD4ACjoE+R22KxqMFgoFarFeuQ+5NOieyR1081GvrFmPCTPjvX9DSgarUac+r5qZeXlxn21ElOx48pWfhY+zeF+N3Qpf/P53M9f/5cf/EXf6H5fEFRUzZpf38/U2aKyUA40h1iUNPsmOTBer2ePnz4EMbTBUtSZuc9/UNZpSF+jDhG2gWU5sLkA+nfB5izu5ANTwCQyWQS5wz7BOdyuSj9sru7G5ukSqWSPnz4EPeHdbq7u9Px8XEcESdlQ7UsVAQl9V4nk0koWwTdPTun/nEGnIV8TCbSxbpOzUGOj4svmp2dnUhNkZaOwP7+fmZHPbJLUWgMnDNyUlapICds+CPPiOvyG6PoQMNZeRQ7ipvQCh4+90QxO7ilTy6/jEfK7jg4xXCkKTH89h3YrL/Xr19HSB/AXSqVdHV1pWKxqOPj42A26DcGyY0C8ua5tOgFro2spydPTadTNRqNTNh7lUJc5VSlzN7nbqw/5t3BF44FTDJhcmdzpAXj58Xm0ZueCgSLhDyiXzudTsgxlT/SsB3F/DHG3McZJkkZgOHOv+t/B6yAEPrrOao0PuNpV9ISGDC/OP2+9mDoyOnHEFNvmvXkKVKw1vQTGeKZAJEuY/SNaxIB8dJqboum06nevn0b56hLD+uc+v++bj53S/W/AxT+JzTt8wN49Bqn0+niuFhO8kL2nHACnHmIPa1Ugby6LZeWMsHY03+XW3Sby6ATWv6c/GYOAcMOXJEPZ4F9vcxmswcl30gJdKdEWhAdbH7u9/tBVHiqE430Po4wdXaY5+T56SfpWFK24pKPEZFBSAswGuRcKgOOT1K7+Vj7WXVQ09BY6iG7oa/ValGTq1gsxoku9/eLIwtns1kU4QXpk/vkuR2wOkx6qVTKlKhikny3moecGFQGiT5DSbNAEApAhU8UQuNetYdo8/l85sg1FJkDmN3d3UgMZywBlZ1OJ8NMFQqF8IQQsNFoFEqzVqvFd1k4CE1aE5MxcuZ7Pl/U7CsWF+V+WHSwZ6PRSJeXlwFQGQsX+BT4pkxACmY/V0uVo78uLZ8L4+xzLi0WLcx9LpcLJSktC5nDLCGjNFgTN9C5XC52WEvLI0o9xI6z406XgwEMo6To9yoD7/l73if3Ypk7WCYPFboRYM2jcAEE7HrGg2Ycbm9vA2Q0m02dnJxEGg7H8DEuruzcOXSl7mwUbDV9/PHHH3V4eKjj4+PQA8PhUB8+fMjcI3Wo3KNfx5Ya9TS8Ly3YS443dePpNRulJUPi7Bv/A/Qd5AAK+W6j0QjwgAMFGLu7u4t0I3capGXt2tSQo2/cYPNcKQDBAUIP0wCmkgLwOUOF8UWG3BHzfFQ2HB4eHgZzzKEZ0mKNcQocUTBCnYBzrksfUz3AeLltA/yzuQUHC1v0/fffZ9IK3MFOr50y7J+7uS1IbQX1OD0HX1LIrUelkN+Tk5Owdy7TOEbITUoaQAoB9pAlAJWnhUDYoOf4W1KkGBCul7Ibovjf9YvrHPrnjgzj4d/36/Ed8kQ9EoAt4rlYC6VSKTbkpqBvPB7r5OREpVIpIoWNRiOO9JUU8+IRA9eV0jLa6mvN5XA6ner777/X7e1tPKMTEY4ZU3l5rH0UoPoF3ctwg8mgYaRI0GfgvPwA55X3ej1JyxIekgKQMjAoK0LxXINcSRQZG1lgB3zC6S8/KA+MLB4YDI0DCr8OCwZllDIRzlx4fs3W1lZUInBvTFpsRAIAsSml0+lkFpJT/xx15goAkO3KkQWCkvff8/k8Ug2cwicX6/b2Vj/88ENmc0Nq4F3AUgOzLuBUyi4mmj8D44dSwMjB/MFIMdfIHY4PJ22RGM44+wYJz6nqdrtRSB0lysYQn0tYHuaTe9B/gKyDW/eunUHzNAJfG270WTso4NQZwzsm988Z1nw+r06no3q9Hg5nsVjMbFCiRNzOzo4ODw8zc+PrJGXiUyYqlcHt7W09f/5c0rIMTa/X0z/90z/p8vIyMy4p4PPXXS7WqaWgw5/djTWyx2sOVj3/DllxRsgjMVtbW5GawX0ATJShIt0K+R2NRsHeYPhx4FfpC5hDl0UHbtKSIXLZlLKEgwNUB9iADV8PLjfYI9+gSoTqw4cPkdqQOmx8ntJSKftPcxvjbDDRCRxcj67ww3hfXl7qT3/6UyY64zYsNfLrBE6lhxtSnSWlGg3HfSO3aeWH8Xgcx59jrzxK6ACSeyDTOACNRkP7+/sh50Qe0X3YdFIq3FbSPCpAhPT+/j6YS+TQbTDfS+2uE3D0yZ1Pdy7pm6cigCskRdlKSsRtb2+rVqtF+iSyBw7L5/M6OjoKEq1arUYutdvwyWQSdWBxIt2Zh1SjnKg7IbPZ4sjZ3/3ud3Efx43MU6pr/58Aqg9uuljSiWTXl5cCgfn0OmaVSkXNZjNyGAC2W1tbAVxRPhg3JohwlINE3yyFwXMW0cOHNGhwZ2BXfQ4QjIJi4BFGNwK+GYbPA655HwHzgwtGo5Fubm7CU0KBAyR4Do6P9ZQHz5n1BeAsGnPlieY+vwCkyWSif/zHf9SbN28ynp4zkSkATQHAKsbyczZX6K4Q/NlI3G80GuEAuEPCD+MtLZwpWBbPWXVvGXYWEEd9WcBYChJxCvzEECm7OcZZQf8NSHEFi6Je5WS6sXdZ41pu/HDiMBKSghlrNpsBtF1Bej9xuI6PjzM7aHHsMFDoEA8FO4uBjDtYlxT5vjx/o9GIXdgpg+MAmN9uUNfJ0KcEAM+Dw1KpVEKm/MhIjCiMJxUeMDAe/fHQObq62+0G+5myluhp5BWdxhzj7KeGFx0vKVKIVrEobjdWOVfeH3fEKNGErvT8ar+WM57T6TTOKJ/NZmq1WpHbPJ1O1e/3o2/j8fIs9Hw+H+sfNt/nC2DJOOHY+WdSxx5902q19N/+23/T+/fvM8A3ddCYP399XWTXdYfjB+br+Pg47O98Po9SkYw7zhVOKA4SzCDPDOAHM8xmy3Q7Ird8BrAGIAU7eIk1aXmMrrOdOCPuuLAznmdAfyFzqW51uXAyAzlAFzIO2AdKRTJ+2CLGbHt7O6oEcWQrBxAATqUFO0rqFXWoHTy7rSFCy7g4S+pYC/n0tJ8ffvhBjUYjQ66lY5eO6adIrU/moLpQpADEb85n3717l6lHRigFwaNz3W73gfdJqRMvwusbQDxnzxUSDYDg+WiPIXiE1xkh7oNA+XVRIu7Vo3QdrHCfnZ2dAJV8djAYhOLiGVCGABrYDNIOtre3Q6gkZRYZfUPhQs9j+AGfTsMzRowBYw9j6KVNHOA4g+6sqjMYq0Ds52qPhRMcqKIEMGKNRkNSljHHyDhT7qwHCgXFhaIF/DNffroHm0+YB0AXCgmZd6YJw02qAXLkzBjPDaD2MYA58rQZQkWsA0JJHvryKgHep7u7O93e3mZADZvNUmWOh+9MET+e54vcpsaXZ4HF8894CPnq6kpv377NKEjGz+UWWXAmjNfWoaX9lB7uBTg5OdHp6WnmPHmOg0XvMt8p4KTihDvTW1tbsUHVN/K5HvQT6GCO9vb2Qoeia5gvB3sYUdYazBTsvKQM6PUUFo+EuGwhqyn75cCU+/gmQzaOTadTVSoVtVqtKJHVarViTNkjAEFQLpejpjTPTZ/R26x7ZBODTx451yWqUKvVYiy3trbU7/cjh4859+fz5/e2DsRAyuo6QcC8Er3DWXKQhly5TsNm5vP5SPFjbD0lj3FC7vguY4X+dZvtOgF9RP9dv3h0Ap2GTna9CLCUsrU/PTrr88d1wUiMCe+RioDu456TySROe/LUG8YWQgF9y+aocrkcqX3upPtJlMwVDbvgzjHP5M/ZbDb1ww8/xPH0Tga4Duc91ysfaz/rqNP0N82BKztHv/zyywCkTKZ7B0wqO8w8LOTsCuWntra2Qinu7e2FIgRoYQD5n9e8lJV7c/TXQZ6Hxuk3zY+38+fGmEqK++G1UGoIQaBvgATPNQUYuuftNQsrlYoODw9jx6yzqDwP30Wxp3lfLiy9Xi9CAy7sFGP3fC4HCun8O8OzjgxqyjK68XImo1wu689//rPOz88zGy5cZgeDQShKHCjm0nc153KLjW+AgkKhEICBvjgr78qb4vfIGcbPN3WwPgBVKEwUhxtxX5vuzPhOZWSRvvMZZNUb90Q+MKKTySSYPM7QdoAsKXKouT4y6M1Z49RY0Z9ut6vxeBx6AIWMgZ9MJmo0Go+yoQ72UqPhY/C5W7re3JDSVweb/J/qUQ/ns6kNoOY1Zt3Ak9uXOt0YYKo20C8pu5nU++J9xRHE0Lo8p88Ba8m1IRww3DiKyEcKSAEVyBzfxVYMh0Pd3NzE+mk0GlF2DxljXKbTRX1SIn6DwUDHx8eRqoPDxziz7nznPjYQEOwHyfimm7u7O71//z6TYsXzuxykMuGy8jmbAzwpC1KkBSgkz9ejMXzXySKPpPADceOR1Z2dnczmUr67tbUVRJmnlaSf8zH0OfTUK+wC2CKXywV7jp5jrn0tuf3HniPLkgLfpOk4MJ2sQfqVz+eD1GJjdblcVqVSUb/fV6vVioiAH/d6cHAQpJnborQ8offb7TmEmkcfGXvfyPgv//IvUS7Uv5vKacrcfqz97DJTPvDOqOHl4R1TL9LRPd48nj05JwgPIE3KlgNBEJ3hg/kE4KKIYJRQQjT6ikAjAJ7j44uE56PvPA8KzgECgozCTmn+FChyXQA9/YCxYOLoNyyol4Sg377IuCdKDuWNRwkjyE5UD/ff3d2p0Wjo/fv3ur6+zhiQFNymC3CVAV0HRSktx9sVk3v0yNT5+blqtZrevn2r/f39mCOAJN44DBVslG+CIjrgIRA8dGSC8jqAX9Iu+L6UrcvrcuoerpdJcYOVRgqcbfEQFuCYzU3cL/28AwL6QGjfIxnI8Wg0ijIvyAwKtl6vq1wuazJZ5O56ySLG0je9+H2RYZgArk8fcWan06l++OGHiEZ41CBlI1MAmxrUdWjoonRNMbdU/cA5pzwNuXIYRzf4Xv4GmfXNqsgQ80nKEdVYjo6O4nXmK10HqV3w11JHlvui89HR0jKNxNOYnElMjaV/3xl2dADyAsjs9Xrh6FCqi/Xv7NZ8Pg8QcHd3p4ODg1iPHtnAsPuawvmEXCFig9zyHoD4D3/4g/74xz8+cPzTlhIF6xK5Sm2Fg5JCoaDDw8NY84wPTj4y1O/3M+kR2EocJ2mZ0kQlBOYA4gDM4awtcpPWA3Z2z1+nT8ga6wcM4mXSfIOyOzZc3/WPO8Wz2SzW7CqHEbsOmYH+lBT1uqfTqY6Pj+OgGdY+36XsFvsWcIjSKISkDNbytUR/vUKAs8rT6VR//OMf9ac//SnzfKn8uv11p/Nj7WcV6vcbga4ZfNre3p7q9bouLy9DMHK5XChOqGyK9RM+ZKIBrFyTkInnniIIXioBAeB7AENXMtIyLOCvp7kS/E/hdWeBUZQelvAQkpStLsBnpCXDygLrdDpRHoN7cO3t7W0dHR3p/fv3sfOZ52Ix+uSzSNNQibMbjDfjheKG1eDI1G63m/GwfGx43tSLT4VxXQCqlK3b6uFu+lgoFIIdmc/nur6+jnkCqGNAJGXG3r1j5oYxkh6GkAFrgM+UDaWQcjqGAFUPZaKweEYHYdKSIUDZ0B9+OwPrYTGux/rw43nZPIMT6kefuqx5pGEymYRxdwfMd456H/3+vo65NtfkdfSAtChx9Yc//CGcAJddxoc5WmVIV4VNP1dDjpwF53lYxx4mBXBhRPkh1xMdBvuJsYLpc7AoLR0Z2Bqu7frNN4owr6nj5OvD//f8OH67nkYeAcIe/eJ+2JjUCcXwuXMNU+mnxUnLiIc7WLzuTCjA1a/l65vm6wrGySN4uVwuIm8eyr67u9NPP/2kv/u7v8vkvjIP2CyXBwc966JzXQ+5U8HYkZpEygd6xqOIjC02FMBVqVQyqU/MLWBLUmxg4prIP6BSWoasAZbIDsBNym5wQz+685rqrFSPuj5BF3uE1kGh61B+IKbcVuNYAbwh+ihNiTx5tHcyWWwARBZxBOmPg1Sew9cx+t+rIUAOgI9wcP/2b/9WrVYrM/er5NRt8c/Rt59kUFN25TGAwi49QClC4KHS+/v7yFeD2vZJdq8fhonv8UB8B9YFRVosFkMxe/5ULpcLr9+viYClocXpdBonrEgKQXB21JVTSl0jNCRlsyBgZN1bdE8CYRkMBiFwpA0QFuF57+7uolzEkydP4lo8t7O10Pw+ds42wKz+9NNPkT+SNgcJ6UJ1IJCyGp+zuXPF//y40RuNRmq326pUKqrVaqHUmDOUJiEeKVu6Jw1LOfOSgkZPQHdFAfAntSMNlSEnKB8UoedgEQVI2Vj3klGWyCpG25XUfD7PnMJEf0ejUQB5nE6AvLT0tImIwIQQ9mdjIKVm0qOFU6DrfcaZZd1hrD3dp91uZ/LaXS4d5LnXvoqJWoeWOlP010FKrVYLI4wBdvACuGJXMPLTaDR0fHycMX445eRFUodaUhihXG6ZliUpdAt9QxZTecCwkfaxam248ea7nrvJNRgbd9p4TcrOJ9UwHAzzndlsFjnR7phh7J195bzy8XgcG00w9g5CpeWJhE52oG/QF547jqEvFAr67W9/q6urq8z4eCjU5dd12jrpXHcKpIdpSek+DnSnvzafzzOy4mCM94ja+B4L9BTED7iA8eLAG0mZOaRv6GKPYtLADdhUUrJcb3qE1eXSgazLudsK/vfd+6wfHDU2Q5Li5AeR9Ho9VavVzAYs5IVSZm67XUcyZ07m0Q90tW9idXILjPP73/8+mP9UHlPyhNe4/6eiVp/cxZ82bpbeeDqdRg6jF8sFNAIKPUcC9pTvu6BSV9EnyQcUhcxOTPdIPZSFcee6NK7phhllDtPpu2O5LmECPu8bXPge13NvqdPpxAKAOXZmk4nn1Jfd3d0oHs/4sTjy+UXZCAcw6Tgi5O4NuuDAXuVyi9qVP/30U3jv6dy64fuY17MuilJ6mP/kz+QApdFohFOD/Doo9R9PxUDG3eil3ihzD2hbxaY668K84UR5ugv3WsX0uZFNn5Ux8ER3aQkk0nQDPGMfv263G6k14/E4cnK5BsrV81YxICg4NlGlZawwFOnmKGkZ7ie0Vq/Xw0FzsIoMX19fZwy5g9NV4JPxXCf2VMo6/i6rzgTv7e1lQKT/II/oPAw1Z81zep0bZHQbuhWwRk41847Dm5IBvEZfHbSl7TFHgOf2sKyH+H0TFWvLwbykjHF3B5I1xTOTz8j9PG2B5yfCQmOd+Rg5UZHPL0u1uRPlz+AMozNdV1dXurq6yujQVG79u6lTuS4tJa/oI04p4JO5cWffGXFAEsAPcO8gCnmDjIJx5b6eIsiGZdeRzEEKTD3k7c/F5/wHHe56xFlIJ3dYm9wXnIOM+HedZccuENVjTfMMgNfUWSqXy5FaggziLCGfvv/G1wg1f93p8vXo0d/f//73Gezg805LmVQn9j7WftZJUih9FKSjdA/JtdttnZ2dBUiVlvl5GDGEy2tKpqHBtC6aMwfcG2Fjcvf29uJ6bsCcYl9lrLxPTHoqMAguCswVFeyCe/l8j756fVNYVAewq1i9k5OTeB7KQrgBkZYLiEWShlfwrsiHTEEL48zxkB668s+70nHBSoXNx+pzNwccDlYYYwAXz84mCWf7UUAuz9PpsiQI4+oKF4XH+GDQpeUJUSmYwxj6fPpvfybKrrDmuJ8rGp9nlAyK2tkCGApn13luGt8hXw/GzjfT0VfkjfW5v78fHrznZmPUMSr8788CKHID504lgD+XW9QP/sd//Ed1Op2QQWc23EnxcfBxTR24z9nStcX/blTYcEJZGRhHd2rQZQ6EqtWqxuOxWq1WMIKSIvztB4rAWheLxZB/Pk/5JOaT8cYoeyQhdd5cbhhzj0xh/GirnEuPZiDHPDeySDqKs0Ptdjs2ixGx4H3GA0BEegrPhcwxlvQ/n88H4cBpboAWX5seKfC5ffPmjX766adMjl8qB+6cfAzEfu7mds+fg1JHpPhJWWzgLdW9rHtsJ86LRw64HzrUN1nn8/lgGVMHVlLIr/fDU/TQXf58Ti64Y72KzOGeyA/rgfl2tj2XWx4f6hEECBJYy1THAWhxmvL5fByM4v3geXH23MnjOhBwzub75mwH9b/73e/0/fffP8AOfs9VThbN1/mq9rMB6qowgy+y4XCYya3E6Dk4dQPuVDDvrWKA3NvgPd+p2Wq1VgIFJsnBFQbRjTTfobYYn3M2l3Fw782ZU2eAGHAEZTwex2kknm+F0PFMbhioiVosFuOkKd7j/qmjwPs8nxt7FL6nEzjb/Oc//1mXl5ePznfKMDkQRTH4Al2HloJmfy5pOVZp/qmkAKhUo0BB5nLLDXVutDFADgqQU+QQY4hMpRvofKNKypR6nhJ9Q+54LuY3Dd+kToMrM5fHdHwA4ZJUq9VChvr9fnjy3MedRfIcK5WKqtVqRtE/9psx5BkZa0pudTodHR0dZfrsu9H/6Z/+Sa9fv45ruuy6LKRrhs+yXtZFdqWH8+dOYy6XixPxnMmk/55W5fnTXo7m/v5e7XY7StBIS8Ya+UWP+qZKZ4tSg0VzcOo6jv48xlj7PdAnzub7/NPcyeIzPg4eSmUd0g/IBcYXwIOMEMnCHnB/D5eSisVzsw4picT7jIe0lMPxeKxut6t//ud/1uXlZYawoDnIdfDn769iqT9no38p4wsbjXwxD9jP1IYDytKqPFI2z15a2kQv0+jgC8ceXe5Og/9GbzsJhYy4w0U1FNcp3rdVG/9chiDRHOzSnFDj2siT72XhGRhTxmM+n0fJQk8ncDzDfVjPjBG2gf6TIoNcc9/xeKybmxv93d/9nTqdTgZTpdd3WUhB+6dabl1Yg03btE3btE3btE3btE3bNElan7oqm7Zpm7Zpm7Zpm7Zpm7Zp2gDUTdu0Tdu0Tdu0Tdu0TVuztgGom7Zpm7Zpm7Zpm7Zpm7ZWbQNQN23TNm3TNm3TNm3TNm2t2gagbtqmbdqmbdqmbdqmbdpatQ1A3bRN27RN27RN27RN27S1ahuAummbtmmbtmmbtmmbtmlr1TYAddM2bdM2bdM2bdM2bdPWqm0A6qZt2qZt2qZt2qZt2qatVdsA1E3btE3btE3btE3btE1bq7YBqJu2aZu2aZu2aZu2aZu2Vm0DUDdt0zZt0zZt0zZt0zZtrdoGoG7apm3apm3apm3apm3aWrUNQN20Tdu0Tdu0Tdu0Tdu0tWobgLppm7Zpm7Zpm7Zpm7Zpa9U2AHXTNm3TNm3TNm3TNm3T1qptAOqmbdqmbdqmbdqmbdqmrVXbANRN27RN27RN27RN27RNW6u2AaibtmmbtmmbtmmbtmmbtlZtA1A3bdM2bdM2bdM2bdM2ba3aBqBu2qZt2qZt2qZt2qZt2lq1DUDdtE3btE3btE3btE3btLVqG4C6aZu2aZu2aZu2aZu2aWvVNgB10zZt0zZt0zZt0zZt09aqbQDqpm3apm3apm3apm3apq1V2wDUTdu0Tdu0Tdu0Tdu0TVurtgGom7Zpm7Zpm7Zpm7Zpm7ZWrfixN8vl8jyXy2k2m8VruVxO8/lcuVwuXpvNZioUCioUCnr69Kn+5m/+Rufn5/rqq680Ho/1/PlzFQoF3d/fazqdajKZ6P7+XuPxWJLU7/d1f3+vfr8f1x0MBnr37p0ajYbevn2rfr+vTqejyWSiyWSifD6vXC6n6XSq+XweP3x/Pp9H/7a2tpTPL7C49yGfzyufz2f68+zZMz179kySNB6PVS6XNZvNtLW1JUmaTCbK5XIajUbK5/OazWbqdDp6//697u7uVC6XVa/XdXJyolqtpq2tLU2nU43HY02nUxWLReVyOTGuuVxOpVIp+krftra2lMvlVCwW41kZf55lPp8rn8+rWCyqWCxqNpspn8/r7u5Ob9++1Y8//qh3795pMplk5pU5TP/P5/MPXvexTGWB7/AcNndL4fhMbWdnZ+59SmW2UCgol8vp7OxMR0dH+s//+T/r5cuXOjs70/7+vmazWcjnZDLR3d1djM9kMtFsNou5GgwGajQaurq60o8//qg3b96o0+loOp3GvRinyWQS48ZcTiYTTadTFQoFzedzTafTkE1ev7+/l6SY+62trZDl0Wik2WymX/3qV7q4uFAulwvZHY/HKhQKIQOMwWQyidfH47EajYZubm7UbreVy+W0v7+vw8ND7e/vh3yyBgqFQjzb1taWxuNxyOH9/X30cTqdamdnJ57FZZQ5YSyQd/6eTqfqdrv63e9+p3/5l3+J+/EdGuPKNXm+6XSakXO/Zyr/fG8+n2s0Gq2N7ErLftHvnZ0dvXjxQv/lv/wXff311yqVSiqVSioUChqNRiGPLnOSQmaR21arpcvLS00mE/3ud7/T7e2tisVifBY55zrcn+/Tr52dHd3d3YV8+TwUi0Xt7++r0+loPp+HjZhMJhoOhxoOhxqPxzo/P9c333yT0Yn+N7+lhf7e2dl5oIcmk4lGo5E6nY5ubm40Ho+Vy+VUq9VUq9VULpe1u7sraSG/PKvrNPrOuM3n8/gcens+n4e9KxaLIb+FQiH6ws/W1pb6/b5++OEH/fDDDxqPxw/0kKQH+jO1Y/zPj9+D/s7nc93d3X1W2d3e3p6vsi3ValW//vWv9dd//de6uLiQpLDb0+k07CNj6TZ9Op3q/v5evV5P79+/183Nja6vr9VutzP34DdrpN/vx/yhA9FHo9EoPs/cFwoFbW1tqVKpqFqthl3v9/saj8eh4/6vjpAkffHFFzo5OYl+uJ71fknLOUa3u4zzWezM9fW1Go2GpIXc7e3tqVqtamdnRzs7OyqVShlZ5fr+P8+LXKJ7kVnuz/vYmJ2dnfh8Pp9Xu93WP/3TP+n29jaz5uiz63D64fpiFXbgfdbZ/f39o3L7UQaVxchFeRjeoxPcEGVWLBZVq9V0eHio09NTTSYT7e/v6/nz5zo+Ps58jgcaDAaaTqfa3t5WsVjUzs6Odnd3NZlMtL29HQNAX1CUPPyqBU2fAQYoaR80XyTFYlH1ej0mp1wuRz/92YvFYoBehB+B4Nrz+Vz39/cZZVUqlbSzsxMLZ3d3V7u7uxmQN51OtbW1lREcf27Gjvf8foxBsVgMheyA2OfqY3PtCtCVBc2v52PPe+vQHJDQfO6n02mM+dOnT3V2dqazszOdnp5qe3tb29vb2tnZUS6X09bWlnZ2dkIumB/mA5A2nU61u7sbMpeOTwrOfN59zNO+029fi7yGcS6VSqpWq6EI+IyDSvrl68FlHyDJvSaTibrdrlqt1oN55bMoPcYUw43c8Tnk2JWXyzZr0xUv1wK8prKeKmlptbO1ShZcB7iBW5fG3DLPGDV/fTqdqlwuq1gsxhxubW2pVqvFuEsKJ2Q8HofjdXd3p93dXe3s7IQecfniJ9Wt/HbQ6H+7/EpLQoDG55CdQqGgw8PDeI95WwXMuBcAmvsBhovFora3t8PB+b/GL5w7aemY+try53WZclliHtAH7hi5HuZ9ZNfXoz9/2v9VxIq/n8pFCljXqaXOFU40uigdM+QJMJiSHvl8Xjs7O9rb28voJ5cJ1wkO+qSFE82cuVPhugh9BFAcDAa6u7vL2AnsM/NZrVYz8pLOja8DX1upvfT+Yk/op9tk5DzVd1wX3e54zMcHPZ3eGzuAvHo/S6VS4K/UEUgdJJfVVC5Tnc1rn2ofBajpQk7BXQoSmWCUUqFQUK1W03A41Pfff6+3b9/q9vY2Y7T4Xq1WezAQDOr29nYGva9C4i50DERqcFgUPrA+Idvb2yqXy9EnVzKATPro4MQnFhBwf3+fASoIFp5dujhQqiha2CgHQi74LDZ/XgfTAPzUOH9K4aVANHVK/LuudNdNWaaLxfvmjoSzIZVKJcYbRYRTUSqVMnPtQG8wGKjdbgerLmVBJEyr9417uwLkNb7vXihzjHGezWba3t6O9Ver1VStVkMmkF+en37t7OyEs8Rntra2ok8oRtrW1pZ2d3cf9A85S5WlO0SSgsny5/b7Mq5uRLg+3yuVSpk59Osxdq6gff79O75+HAw9Bg4+d0uBC8+3ar2jH/js9vZ2yKOvY3dy9/b2VCgUQsYlZYzlqnFJnXsfc9elqXOVAoT7+/uIFNXr9bieA5cUqNBXgE7qBCI7AETW13g8DgdIWkY0/Dv+mjOs/h23cS4/vO+Ags/BcPPcjwHvx4DnKpnnt7+W2rrP0dL+uNzc3d2F7XQbzJhub29rb29Po9FIg8EgAwhh3AeDge7v77W9vb1St7keTWWeuXUnmoYN8Ohuv9+PuSRi4PdAd7m+dkfMHR8pq89oKWhjnbhdx374GKeOt98nXZ9p/xkX1wO9Xi/IDI+YcE30r1/3MV3pa8DbKhzyc/TtR0P83qnUcDrIYoL57HA4DCGaz+e6vLzU73//e93d3ekXv/iF9vf3YzIQmlwup0qlEpMBk1ipVML7SoGq9y31SrgGwuieN31HKPk8LJm0DGdKWeHa3t7WZDKJ12CgECg8QDz51GOH3UwVE2wrwMGNLgIIsIK1cmFzYQVQEV5N53CV4V41pi7s9DVtrozSeViXlj43jBPOVKfT0XA4lLQYv7u7OxUKhWCnOp2OxuNxKE/32FEgyABzlLJP3lJnBhkArPJeOv4+lxg8nJrDw8MAmjC6fCcN//hcechna2sr5A3WLZV1Bx7+XgqQANAuR96HVWwpfwO6dnd3Va/X9dNPP2XCVSkIZez9tVQf+P3S8P+6ye3HnEmexQ018sY4zOfzcIgAqe5MwGhtb2/H+ElLUEU6yyq2I+2T31PK6mPki/AhoUl3RGq1mkql0gMma5WeZ96QBY8UOAPPsxIlGI/Hur+/197eXmYt+T2cuXMZZYzSviA/LsPT6TTSHUivISq4vb0d318FqFJigGdNQV9633Vqq8CRj52n/zjgS4mOwWCQ0W2z2SxsWrVa1XA4zACldI34OnEnR1JEEaRlKguAsFKpxOe3t7cjquuYB4dsb28vk97k8p7L5TIpNT42Pj4u5+hfdKqv1VURg9S+uF1LdaCvT95zImE+n2s4HMb6dGzg0UNPUVnljPj1UqyQ3t/7/bH2UYDqwuMDmyp/N7Cj0Uij0Ui5XE6vX7/Wv/zLv2g+n+vrr7/W+fm5dnZ2QiHOZjMNh8MMYzWfz2Nhk3OBh7W9vR3gwal2D4M5UwU4ZYJ9gFGULkS1Wi3CZHgUng+Y5vHlcrnok3tb8/kiTwXlRLu/v3/AMNFPFtT29nZGOCaTSaQFeIjEFzVeF/1DEaRGm7HyfCkfu1W/VynEVcLmSmkdWqr40/Fwz/bu7k6j0UjNZjOA/e7ursbjsa6urtTv9+O6KExkYz6fq1wu6/z8XK1WS4PBIByT1GiuAvEO/FBa6QLnO95vB2ulUkkHBwfhuKB0metVfUCZIbekM7jhyOVyway5PNLcOXLFyNj4Okeupazz5wCLeXJnbnd3Nzx4zz3nsx7SeswZWMXouMPmz7sObVU/XCaY2xQozmazjGPCHKPP5vN56CpSWEajUYDIu7s7Sdm0Isbd17z3ieZzwL3J+dzZ2clcQ1LMJYwo/U5BqtsG7pmCYEnhnLkcuaPtes4dFJclX1voffSvAx0+m65TxnB/fz+ALePsrK5/b5XBRw+sSo3wsX8MDHzulvYNG+uOpOu0lCUHHOK4ONvt6XZup/ze0nIOHbC6U0J/3O4Wi0WVSqXAMFzPnweZrNVqcU8Hw96n1Nnhs/TDv+P9Rhen13J59egsjXFBbtzec19ky8erVqtpPB5nbBtzQdRie3s7SBwf58fG/7Hm2GkV05q2jwJUbugKwy+cKn2M5WQyUbvd1mw207Nnz3R2dqZ6vR4PyDXYcCQpo5AAWIS6pQVzWSqV1Ov14nturLg/fUpzOLy/3L9YLKrf7+vu7k6lUklHR0fh3ftAMqEoQSadySZcKS3ZCRL5XSA8l9YV3t3dXcYD4l4odlfILqQsahiFfH6REwsYANSnDId7577wfAyZ+3Rs/fXHvKh1ab7AH/O2pcUmvdFopKOjo2Cl2u22rq+vNR6PVavVwpMFeLrylZYGd5UTg/P2mIL0HynLdK4yYJ4rhTfv6Ql44W6U03lhvfEd8pXpM0yDrzNndT1i4uFRnMtUCfKddG3583gYlP/39vYybIWP7Sqw78rex9tl3fXWusmstHpzouuR+/v72BhH5IXxSg0ijoKz/OTa393dRWoFxhzZwcD75jQfd+TKGXI+4/dx/cv1fKNrpVLJGPJVTpyn3aTr12UUQsLTW1gjyFgKTgGbvm5cpklRcfl3QJKCE2nBWpfL5VgHpVIpojHo+rSlzlO67mmrjPq6kgPed1I5mEd3OlxHp3aX/MjBYKBut5sBfikOSde4A0GPZHq0J+0nGw0lhd6muT0HoLpjk+r4lBDimqlj7H1wJ94ZUZd9jzZ5S6Mefj3X1x6BmM1mQQKkzgDPc3R0pKurK3U6ncyYpc+S6qu0rdJrn9K9P3uT1Cqq2pUTN8PIVatVnZ6e6ptvvtHp6Wkg89lsptFo9CB5nns1m001Go3YKEWuh4PVVX1zIM2EOiubGlsUKIqyXq+rVqtlQAJ9yOWyifE+2IQh3Hu5u7sL4OuGxReihw5Q2lzbN7YwVghWOgc+wbDCCCAbJtK8Lp+/FMBzfV/kPsbp66sM/jq1VQrdwSUbTU5PT4Ox/9Of/qR/+Id/0PX1dRgaaZnqgVFCxtiIwW5lwlHcy+XO5TadTweqbqSZJ2dCYftzuZyOj48jPIPcStkcbffE0/w6BziwcMPhUL1eL5gEZ+w9D8/778AEsISz6Aad5tfgbzYmcs1yuaxyuRyMlPfDDVI6t2nzz62SeTci69BSdsjHGJ1A1ROcC5c7n2d3ej3/bD6fx1xL2TGVFN+nL1I2n28VqKBtbW0FE5Uy7VyzWq3q+Pg4A1aInKX9TEErr6VA0W1C6gym4U9nNHk2mut3T+FK8xqRdU9J4zOMX71e1/7+fgYkpU5aOvepXvV5SJ28VQ7o52gfW4/kAkuKTXqQL6kjTjiZPGVJOjw81MXFReABt+MpJmG+cVhWETC8784PgBiQ6v3ju5K0u7ubiQj5dXnWdENSOh40Zzd5H3Dq12bsVuk2d/zdfkAC+r3dicvlshEy3/+Szy/SFHd3d8O5ct2bEh/p2nzMAUh1zKfaRwGqK6K0I4/diMW5u7uro6OjDAhksBAaZwdRlJPJJHZCIzQIBErXQ+J4Fg6kAJIsZibXlR3sA9Q2CgSqn/664YJ+R7BXhRp4Fl+EHtrwMeT5faecg0kqBPiOaFeQgFB/9q2tLfV6vQj71uv1lZtcUsCeep6+2Hx8eV5vGJ30c5+z+fNJWrm4CMlVq1UVCgV1u1399//+3/XP//zPqlQq+vLLLzNzzFx6NQg2V52fn8euznK5vBLw+JpJZcD77Z576owgo5Re2drail3QDmLS8JIbeNYHMoe8+waU+XyRF93tdgOkStncWO8z1/V1kMstQ5wps+fMlT//9fW1Wq1WrJmdnR3V63Xt7e1llLv/Tp2lVeP8WCRl1fvr1Fx+3bFCv0gLfXp8fKx6vR5yKilyiNFV7riTajWZTFStViUtc/a5BwZtFSjy8U8dFnfm7+7uIm+bZ6F/x8fHkb7EddHXXsotbc6i+aY+dDfj5SyRAw6uwdilus9BFQ6TO2Y0Z5JpxWIxNiuin/f39wOkfkpHpmB5FWng937MIfsczedRyupgj+iMRiN1u131er2MXLlT4ddy+RqPxyFPUtb28D1kls8gY04upc456X84bc6kejqVpAw5kMoo90+drlXrCNlNIxHc0/Ua6zp11GiAbpqDTUmZ9eTr2+XNy8vRptNp7AVgUyVjtoo99blgzPw+6f+fkt2PhvgZ7BQReyfSGyCQABsAKqEoaTH4XmMS1hGD5js+EQQ2D3nYyHOxfKA9dOqC6OFZ9553dnZ0eHiYoeodgDo76gYj9eDw/AiPuoAjsOShEr7nWjQPSwAaeN8XXjonjAX3vb+/D/ZpZ2dH3W43M28+fw5cPJSQKkmXi1QgU4P/uRvzQp88tOfgkTmhzt7d3Z2++OILffHFFyE7af5cLrdI5Cf8hwPGAvYyM7Do0sP541q856y6j6WDre3tbdVqtQAp1Wo1wk3Iu9/HWVQHsJ5Sw3ilIXhyc8mn9t3TfA6lyI5sb/Tn7u4ulLEDfhwqnr3ZbOr6+lqvXr0KHUAkptFoqNlsZgxJusb9vunr/vypAU3f/9yNfnn/fF1NJhN1Op2Mk03tT8pHwVh5aJvrsM6Pjo5iHJl7B1wOUlNA5PKF0WOOAYoQCh4tc6BSqVQyoBHmyZ/ViQ3khP7S3NmD+aEBiLgGzC4bplLQx7Vd9/P8zmrl8/kHea+sAZ7BSYezs7PQM71eLzPXKWBlbN2upbrVSYRUPj5n83UJdqCP9/f3ms1majQams/nOj8/jzH0NVsoFCJqQomw+Xyuvb09nZyc6O7uTq1W6wHDKWVtbVqdBNlJ9TnVUIhIrMr99bUAIcD9cKrSSGkqXz4mKSBNCTYHp75h1T/vetgdRe+7g1QH+8htSn6Nx+NMNI71fHFxodFoFDXpP+UY+T3420Ep8/3/DFB9Ynxw0vf4n3IQo9EoBte9qMlkon6/H6/j0VN4fDgcZgoqY/C5nj/QKu/J+1goFDI5WihSmBkmeG9vL+5JUrwbuNRIrGKAnVVLPR4HHp4A7YuT5yEvDDCNkDkoTRVVegjA8+fP4/Xz83O12231er1gXFYJFwoxFRx/3fuaCpkvyHVqbujThSIt5rvRaMTmhouLCx0dHen+/j6zm9mBO54/So/P8Dr50lJ2raRJ++6JpovX++ngoVKpaHt7W91uV+PxWIeHh5mC/DwTv2Fy0rXqho3X/XlR5IwDcolcuzKkby6D/lzIL04T3yO8RJpELpfTq1evtLe3l3ESa7Wanj59qk6no0ajEYZu1VzT0vFM2RN0ksvzurD/Pk9SNrfWgX2pVNLNzY3+/u//XpL0q1/9SmdnZxkDJz3cQJWCAXQR6QNuWKRsKkgKSl1P8B4VV5AZwqHFYlGj0Uj9fl87OzsRcXAw4T+0lH1f1QCQ/CZnmSgWNon9Ab6muT7r2XUvz+Th4pQxxbCj31lvg8FAe3t7ms1mqlQqevLkiW5ubvTTTz+FLqbvrtdXOf+pXkjJl3UBqNJq+zIcDoM55UCbm5sb1Wq1sMU4VewoZx345mbSWFIgtioy6KkZyLqDPEkBhrmvE0SpPpnNZiqXy6pUKg9sseto/mccUmKL5rbD+4X8+kbW9LAAruXrFOzi45JG8hx4pmkI6AEnG9gw9eTJE81mi/RM0i9WRcVcH6SYwEHrpwBujMXH3nTAknp0PkG8Ppstiu1SngHA6afwUAAX71laTibGSFqWg0DYvPySC5orWZqHdhg4XsvlcpGwfX19HXlD5Mc6OHVwkbJurhC9Niqfl5YlIqRsqSrGLgX9KZPB6y4o/l2+s7W1FdUN/DN7e3s6Pz+PDRVv3rzRcDjMKFkH0My5M2y+CH3+faH5c60LQE1BNeDJF4gv7Ol0GsCSz6AcGYdisRgO1Hw+z3j3bOiRlmPjFST4HP1KmSE38M6g0T/Pc5akbrerXC4XJ5mkChBZThWfJ/K7t06qi4fTUIyER91gTCaLwwE8twzmyuWF689mi5NZ7u/vY413u111u90A++VyOZhTfufz+chhHwwG2t3d1fv37zUcDh8YIB9nZyBgVVxRpuArZU4+Z2O8vHnfYVTa7bbu7+/16tUrffnll5lNlR6x8Y0+GG6u5c4190wjR16ah/FOdQJzRSqWM4zoqPl8WaHl8PAwE94HLDKnzlr6KWqsm5Rp8jkmZcV1wP39vQaDQeY9aZmO4HrN9Z5vjO31enEYhq8z7AN9IELmeqRcLodumc/n+vDhQ9hJxtHH0/vjcu36i9dS0uJztRRY8xo25vr6Wk+fPtX9/b2urq40Go30l3/5lzEn/HjVDikbVaSubLp+scNur1P76JiBvrJJUFoQD2kkgesx9tTsTbGQR+hcfpBjxoQ1wLP4uqN5//01HCxPQ/TnSe/Fbz6HrHH9FNvRX3dafV6ePn0a6xdSJ51vb25/vT+pfHysfZJBTf9PvTj/HMoDyhyvk4no9/vhWV9dXQVr5R65b+BgRzpCya50wBqK1xeuT1CaC8K9Ly4uVC6X9fr1a5VKJZ2dnQW1zXOllLsLAhPnu/1SAMlnWZzOPDBGntvibBTP4SEtv3baXJj9s9PpVKVSSS9evIjd0H/84x9DMaZg05mTVcY69ShX9WVdAGoqo86YOABERlutltrttvb39wN0ITfkJF1eXur4+DhT39ONSbVa1cHBgdrtdjhU9IXfHpZywODhJxQE79HHvb09nZ2dxbG/e3t7EaZNw/a85g5Tygy4QkOGUkYIcApI8NxQPuOAhvFIQ8vkeQMUuPbW1pb29vaCRYHB29vbi77s7Ozo5cuXKpfLevv2rba2tvT69etwtmirDIcDfQcfPm9pjtk6tI85fWzK6/f7+qu/+ivVarVMfhqglHniNdgRZzkAkpLC0fA8TpobIzfuqfFNyQFJmY0nzP3x8XHIfzpvyA3AErswn89X7oL3e+VyuYwB57rIsK/5lHVzx8bXLs4pY+/1glOQ7HPlzwNB8cUXX2g8Hmtra0tv377VYDBYmULk13hM968iDj5nW9UflwtIk3K5rFarpZOTE9Xr9fi+OydOmrgu9xQq1+W+tvkbGUj1sPeTa6LvPVLm9+UZ9vf3M8/r8pFiEGff3RGTlmssnWv66hhEWqwhotMcxSo9dBj9udK5WcWYOoj2+0uKVBg+t7u7q6+++krb29v63e9+F1VuUrLqMdyYstI/p32yzJQrDwdr/r4j8EqlEsdsQokzKew2LRQWpTeurq4CrLmXhKDwcISJfABWsSH0BaWReqdOgXc6HU0mEx0eHkYNSWehVv3vJ/fEABazpXFcYNz4u8eHd43idsVeLBYDvKbeHH3yfCpnTSlGzaJAWXMaEJvN/vSnP2XO1l3ldflYe3tMEbqCWIeWghTpIRsBE5/P53V5ealWq6UvvvjigXPDDvKLi4tgLnO5XCZlQlqcuLG/vx85ZqnycQWKZy8ta+c6Cy9lN0z50b+EWV68eKHd3d2QId89mjJM3o8UyMKiwSK7kwRonE6nGg6HD0Jy9/f3sclGyrJvztam0YVicVHqCFl3RiCfXxx9DGOF3nj+/HmcmCVJ7969i9IzqaOa6odUZ7h+SOXkc7eU2fC+8X+r1Yp5Yd4BYa1WK06SAsymG+CYc9Iu0BOwsw40V4HkXG6xL4Aawegu+olD40aRlKtSqaTj4+PMfLAOmbu00oSHQ7mH79B29sfHjeYg2QkDN8qpw8I13YiTU54W8GfOtre3gziBfb2/v48Usq2tLX355ZfxfK9fv87YuhTYpfLs/fTf6wBQpdX2gXEDiBPZYwOatKyWwHjj/PJ9vy76wtc2cuFMOt9DpumfX4uokp94l+IcrrG3txeHDK1alzS3N3yffnlN4hQnOMZyhpO9LZ6q4qfr0WfuSd98jNJ++TP4uvJ1ViwW1W63VavVom+lUkkvX76UJP3jP/6jms1mbKL166VOYzpmqU5+rP2sk6RWhcHSB/f3JEVYn+ZMIF47SdBMAJPkbBATxYCRm4En7YLI4LpnTx9Q4EdHR7q9vQ2q/PDwMMN2ujeRPj/P5cnvTJqzT+7lrVKwXhTXk5hXeVzp/TFG3mfujdF1ah4BB6BSM/O3v/1tbDpJmSWf01X98PdYeKnMrEtLFYkrfGSGMQTseSgO7xqD48XNfR5QqDgYsFekV7giQr5TRyoFzd7I+2m322o2m5rP5zo6Oor36QvXSlNSmGNXbq4stra2VK1WMxtrfC4dBKDUUdzO3rsi4rXZbBbH6WGUPEeXH+7LOPMcs9ksSiEVCgVVKhX94he/ULFY1OvXr9XpdOL5WBNuZFYpRwco62TgpYdh21UyTF3IVqsVuoYT0TytiuiT1x7l+ZEp8lmRVWdAGZuUhU4jEvl8PsLY7XY7vuu/YXZPTk4esGRSNo/U07g8gkU/PH3A2VDkyMGz9wOwwphIemBj+KwDXuwNBAHMdNov1oizqmwA3N3dVaVS0d7enr799ts4DOTNmzcZI5+CnZSRQx48xWUd9C661f/np9frRb1p15E+N15OEQcHnYad6nQ66vV6GRvE34xVastSR8/lBn3kNsz75bnR7N73o9xTYso3/aU6yHP3/blTIEtKl29K9XHp9/uxrrm/EyrInzuN7vC6rKTyg/zzbOVyOcYUm7K1taUnT56o3+/r+++/D+ciHW+aj8cqO/yx9rMK9fvFPOycKk2S7PFUPcmXDvr1qtWqrq6uYkclQuJg1Q05oADF4YnsLrCu2BjUnZ2dAKT1el1/+MMfVC6XdXBwEM/gJ+UggFyH12FRYSAkZRRdClTpn3sgfD+9B/dxkCwpTidyT562yttDMJkfxmdnZydKf83nc/3ud79To9HI5OakguVGnNdST8lf87597uaKS8qOFX3HsM/nc52cnGSOu5MUysg388E8kYLirCDy8TGjgVygDHxxM9a+rvj/9PRUu7u7urq6Uq1WixqSXNMNO/PjOUWSMhENxoGxwflDhl1+HET7RhKYIZcT1w3D4TADPO7v77W/v5+pOuGA2FlfX0NsoiyVSqpUKqpUKtHvf/3Xf40Tknx+U8WY/p/K9brILusodRBTg48+wIhUq1Xt7e3FZg/PLcUxRtZ2dnYiH5jUK2m5ocqNmrRcLz5ekh4YZHR2p9MJ4gEwwr2pzez6jN+AaE+F8bWBLNOvNDrFuoIkSEGQ626MN99bpeeJTrCxivtTLcEBacp8MeY4f07u5PN5PX/+PFK+3r9/Hycw0lIb6zIgZevUroODlerZVTpsPB6HjPrcTSYT3d3dRUgZIMhYD4dDNZvNAIGlUilTJ5XnR86Y41Qnuf6ClHDdgYymwJnwPq+nBECq99CZ7rxzPcdDKciWliWiOGLUN3pzSAfHvzsb6lEPx0iuy92pSWWLNc6GSTCHrxF0eb1e17fffitJ+ud//mfd3t5m5oG/U7IidRw+1X5WiN9v5q+lCBgBZIcXu/V8cBwM+aYMV4AeXnQDRm4R4SsEEcXlAuCspAtAv9/X7e2t2u22njx5EkwO/Xcl5MLqygXFhvCws7DZbIYRJt8Jrwdhk7KG1wWdz87n82BF8KK8GoIfweoTz/W9/ImDLcDv2dmZpGVe2c3NzQNHw/MJffGk3l/qwa6DopQepiek/XJFgkJMlZhvAPLv81kvNu2GrVarqdVqaW9vT71eL4AB/UodKVd6LhPOzOTz+Sie3+l09PXXX2c2QXn/Usbd/3ZP2Ncxn3MHaz6fZ4BLmkvNrmwAOWFPXxv8vb29nfHGeU5f76vCaqQ34GBJC/m+uLhQtVqNYxHfvn37gPVzWXDj6c++ytP/3G0Vs5D2jXlCRp3tcydJyh592Gg01O/3Q790u91MTnzqjPi9Uz3ANT0ygM6nDwAMN/TO/DPPnuLh+sWb77J3OaalazntH+yph3MxxPP5shxVrVaLWps4bak+5PMQDl5izfWh27dCoRAl19DjT5480ddff63hcKirq6tHyYBUNvxnXdpjuIB5GY1GGgwGcRQs84d+xNmVljXHB4NBsPPY1lwup6urqwdEWSqvKeOeplJAtnmKlGMQXsPJ4tQzWoqJUpviTDo2fRUJlL7G616Ngs/B5jsJ4oxpeo0UIPtemXTemDt3GB1vcOIm81Ov1/X111+r3+9HfmzqVPo9VunlT8nvR+uqrLrAKvYhBTF4Q/6/pJhs8img+wGVKEcHCAyas0AYwlSYvN9MGn3jXu12W41GQ7ncgpFy8OA5fCh8vA3C+F630UG276CdTCbq9XoaDAaRBE+/vI6jh6ZQvNQudfbrsfxbwkLOUktSu90Or9DHkNBpLpfT0dGRvv76a7169SoM/yrv0IGrzzWyQEvzfdehuUw+ZvQwkNSO9N2eKWPjNW5ZxCmglxShVT+lg2sgA1J2I4azU8w5aQIUSu73+3r37l3UpaM/hUIhZCHdUU9/eBZ3lKTsySu8zm5Y33iTNvruOeUOMjHabIACpKZnr5N644DKvfVUmTv4ODg40DfffKNf/OIXcYKPz0EKeHl9lbJcJ0NPW2Vw6Td/dzqdMDqSHgV5zPPOzk4AsFwup4ODAx0eHqpUKkWur+tjfrivhw6dJSP1AplHN6BfiThUq1VVKpUHjpNHY3wtuB1I58gJhDRVxCMB7nw6UQLD6Rsi2YDI+5AEziqz8Zf/nbVL585BCnrh7u4ugBY5qb/5zW8ievNznaYUmH3utspG0AhNQzIxb6SepIcscB2PSpKPLi13368aA+SXlkYB3Y57eUhn0mn0r1KpRL9db/v9kQcHzqnsup7zsfK+IbNeD95xDuwwMscm3jRylzK03m9/X1pG4FLQ7ak44Cg/Rev09FTfffedzs7OMukEqzAC1/y3tE8edbrKY04VvAvUfL4I4RCSw/h5KDL1dqDqEdjUK/DcUj+VxvvpHgRC4icxARgAItSl8+P/VuVreG4ngpIyax6a4rvj8ThybqjHB1iFUfCxZJMCHhLC4JuoJIWx99xVBwvT6TSqHXhLvaJyuayLiwu9fPlST58+jWdj/B5Teg74/O90Tj5380UoPWTRXJkyp4QpYP99U8WqRe73cEaDufExSccKuUaOPB2Fo30xYIQSDw4OdH9/r/Pz8zj5zA8F4DrIv7MUKSh15c/6Qm7ot2/I8jHzHageisJxpF9udFjfzh5531xenQnzULX3i3ZwcKAvv/wy1rIryI+FkFLmw1/73C3Vuav07HQ6jSoGrlMxqG6ckDdK0W1tbenk5CTKAJI24bvfkSMcEJdP5oYxZj53d3cjlWo+n2eqrEjKGHoa/UsJCTee6YYkvufzlq5Lj1IBOhzIS8sNqzimbDLFkXJiwMEvfSMH0J2mdA5Z487us8bIJTw+PtbFxYWePXuWIQdSuXRneRXIWIe2CjAiJ37sLfmf7I1wx8Tl3dMA3BF2Z8rJkdSRcueF5nqJvGt0oNtwl8OTk5MHOtxtir/nxJa/72vI7+XRUSe+0JEOTh1/eBQhBdncg9/uMD1G3HCtNJ+UseDo+ZubmxjLQqGg58+f67vvvov6sC4H9CG1kXzmU87VJ1FFivZ9cbhQ+UNzDn0+n8/UisTYI1CwmjyA5x35zjuuy6SlBe59QPzz9I8TKMbjsS4vL6OAPYOXbmhiQtxjSfN93BjiubCDHkEjX6TX64WQedkeToORluEx+uy5WHzXdwCmnvZ8Po/FhhC7UHr4mD7v7Ozo5OREr1690sHBwQPnIRUof437+/Om3uG6NJeNVQwFC9437AHYJD1QAsiEs6nONEkKhgUDL+mB8adv9IN7kV8JQAXwUtbjyZMn0QeXW77vxjFlE3hW5gxwy1yivOk3ZaEAMzBiyKn/pCyYl8/x43YdUHikw1krnzsH9HyH5yyXy3rx4oV+85vf6Pj4OMOW+XzzGv1M9cU6OVirjEzqYM1mMzWbzYxOgRl0h4oQJjrIHXIAAmyeA1RPv+L+zqbAPtIgDwB6vj/A81rZve8ymbI4ngZGWzWf7uQwh6nDAVhGP7n+bbfburm5ic1UnkKGnfE1j9ynutTrtKbOL2OT9hM2lTrcz54908uXL3V0dJSRV9e9qe73PqxTS/Ut8kpomrQS5hvdK308ncRtMXLP645N0A/MJSCPa7pDjXwirzjWnn5AbrcfGIDu9XXgZIgDdR+PVWkpbl9Ys4wd90hZXdYZa9pLb0rLGuzuXDpBRXPdhzwh6y5vjklwEni+ra0tffXVV3ry5Ekm33WVPPBc/nwfax/Vyqso3xQBo+xRihi+vb09bW1tRVkEBMuFzXfiA2YBdQgsR0qyKMn18esgeKmi42/yYPv9vtrttsrlchQMXjWIbmydok/zQN3z2tvbC4WDB85kI1CSMierTCaTKLbNM8L2oizZWZ6ZtATkpAYjZVLcaDnDUCwWdXBwoBcvXuj58+dRgD5lbFbdL1Ua62jkaanMpiB8PB5HEXnyp9NxA2w6e+dHwiEnyE7KbvvYoLC9n658kVeUDrujG41GhGS9bzT6kYIJKcsUerQBGXEQ7VGKXG7B5DcaDbVaLXW7XfX7/TjuzsFtKgPoBjZBOoBwQ56ODYrLax6nMuY5wru7u/rlL3+pr776KsB9qhT9+f1/H/9PefP/3s37h8zQb9cZbK7xcDYAjE1qXpnCwR36hnQjd6iQbe6bGhlkR1qed47e8b+RocPDQx0dHYWhS5lU7uHg2HMHXU7SnHE/Xcz1G3p6NlvuDfCxQdcSTvVyXO7MTCaTGMtut5s5utsJjdTGSQ9BAPbOgWa9XterV690dHSUsXGp7kqb65J1aOmzu/zO5/NwCLx05HQ6jQN+ALF8x0kg5GlnZyfS4NzZTZ0DQKhHRtGLpBlyXY8EEGUgTe7w8DAT+aGl4+7zyY87OMiAgz4aMuvP4/KUyozvCWDcYZi5lhNsYDT6wPpw+811HQe5DHNN1rqnauzu7uoXv/iF6vV6hu3lObhGClw/pXM/mYOahhAcnDw2cEz85eWlms2mptOpBoOB+v1+LGxH+tyjWFwUTOfEj/F4HN4OAAyPy2n9lBFh0hxIUnoll1scBQqA9JANk57S8wBNZ8B8wPG4SCFwxQ8DRr/4fuqlr7ouz+ZnEpM3Qz8dQLlwsshS0OhzVyqVAqx/8803ETZ24UmVJDLhQubXXRcjnwIyN4DuWDkYJYRBPp7LByGq0Wik6+vrODoWZ8qNkO/45Tr0xZVEGoqVlClhhqHf3d1Vq9XScDjUy5cvM+kYPKsrPL+fvwcQXcXoSFmD4KElAE+/31ev19NoNFKz2dRgMJC0DDmzJnu9XrBo5GM780DDWPhaZn372PhGQWc8WDNUp/jFL36h/f39jB5w0MLnXbb5n/uvQ0uBtfQwbwy2hNQjH1vWPWF3/i8UCjF/rVYrs2EI44bTlTLgqyqjSMqkczDfGHZJmfz2Z8+eZYxmKgvOnHJvtz/+HQfHtJTJ91QUbA71dUejkWq1msrlchwKwQYQd+Zd3srlcowTrzG2fuKaz5evVfrl+pnvTCYTVatVff3115lo1mMyuS56Nm1uh53gQH9eXV2FrcS2Y4dIUZOyuMCfleuzEU9anRKDXrm7uwsZ517OJK7a67K7uxvvlUqlqPSzyhFAzrin992BIp91+UUfuePOM+CkoSPT+QZkp6/zDMg7VWjADDhyOJgO6B1IOwtN/9y+5HI53d7eBqClPuqLFy/i/cewxypc8Vj7KEBNvVQ6nyb+OkpHcX7//ff63e9+p9vb2wBqDtYov4EQsrBrtZru7u7U7XYzxh8mxr1uKGzvS0qLF4vFOL2k3+/r8PBQT548ic86sJaUUSD+fIVCIQyCsxnuvXn9RvrBc3tRdhcIFibX8OvTN3IDXcB98Xt+H4rbWQiExcOzjDm7zk9PT/Xs2bPMUZ/u/blRSec7lZF1aKmnxu/0mVj8MFI4Gc6gcj1nSzH8jDGKkDC45xC5U5f2jf95jXqUkjKK7+bmRsfHx8FAAUzck+bHHZh0U5d79Wl/YE9hPF3RzOfzADbX19fhYBEZ8M95sX/WvLNO7vX7enY5JSzdbrdDEU4mk2BuC4VCjPl0utiQ9fLlS/3yl79UuVxeCexWAVE3Cusiu7TU8PpakxShUg7gcOPkxtHXJzqRXEtn+pFpXx/z+fyBo+sOjDPvzG+qN/v9vo6OjuLUM+afOXXj/pgjjGzQX++nr02e1YGBj99oNFKn04nSRZ5zTaNPfs+dnZ14VnQE8u6RM2f1HIDQWK/0nXsVCgWdnJzo5cuXuri4eJBulcqAk0fr4ljRfM35/5SKopqDl+jDVqbOizv07rAyJz4eLnfpuKDXPDqE/sRmum0FyB0fH8fGrBQTpDplFVPo68GjQE6Q+BhxXY8AOKPvm/pg9cFOHt3jOk6EefQ21XmMn9sJ1mQ6t0QZvYbsbLYoc3dxcRFHWTteoKUy8f8EUAEzDv78gVLFQGcvLy/1/fff6927d2FwnBWZTqeRfzmbzcLjxmDVarWYONhXvCbPj0NxpiEWz9vjnmyA+eKLLzLK1ReIex6u6Fw5cA+ML0KG9+3eOgLjbFXKjI5Go4wSc2XGuKTK1gGP59dQDmNVXgfPend3F8WOGbdCYZH3+Pz5c52enmbCVB9TfqmQp0DsczZXBlI2L8h/eI/Q3Xg8VrPZ1O3tbYYFZLGyM13K7tQtFAqxCc6ZLAxZ2rdUtuhHpVLRyclJ5iS2VqulyWSib775JmTOZZu+uGz4Jivvo99LWjpSKGrCYih/3sdAe7oK96R8DA6jfx/GiRItHkJibBkfUnDIP/dnY2Oh56359be3t7W/v6+nT59mjiNMw9UpgHoMuH7OljINvOaAZD6fq9Fo6Le//a3+z//5P5KWeb7oJAd+7oS4UUQH+GY3Xxuuf/gebKm0DBUWCoXYVCgt9DalgXK5nH75y1+GLUhLN9EAn77J0AkIXzdSdkMgsorMcA13lrgfzg9MqqRYJ7BsDmKcmEGvI5+9Xi8ORyCVjfF2481vv85wOMykVEynUx0fH+vrr78OI8+1Vhn5VK+sQ0v1mYNp1vDbt2/1+vXrqGtKQ14Bh4xxCvTd+XWHDBlB7zlTii720yzdbtH4ezAYqFwuR6Uf1qJHLP15cTa4Bq8jLw5GJWVIBb7vIX5P1YIppr++a5/+sF58H4qH8H2tc33Sp3xcuZevF8Awc+nsKmNA3yBRVmEQrps6lh9rnzxJypViqiBXLRyE6/T0VF988UWm9iEdY1d7uVx+cAQZE1coLGr0oWBcUTx4iGJ2VzuDyHXv7u50e3urw8NDVavVB4m8PAebRpzSZjK5FkDQJxwhgf3K5/PhoRM2Azi7UsWDzOVywYTQHyYT0ICQpBNPH1nMHEMpLUp7eFmPNCRyd3cXtc12dnb05MkTPXv2TJeXlxoOhyuVjY8Df3tbF6XJQvAQYQpAeKbJZKIPHz7ob//2b9VoNHRxcaHnz59Hro0bGQ/f8OwemgTMYYhhOgF3KYBg7mDfmT83uJeXl3rx4oX29/czQNk31bnD4qeDeUTBN7VIWaPP5z2kQ/SB1wHHOJw0jnf0Z0Vmua6XTHEQIS1ZEvSBh5bw/D0awbgh21xvd3dXX3zxhZ4/f65ms6lerxfXTj32Vc7KqhDe52gpgJaWOg1dSi7l9fW1/uEf/kGTyUTPnz+P71HKy+UWOXXw6dEgP72L913/+foh2jCfz8MZwdlAH25vb+v6+lovXryIU8po/lz8n4JxnsV1DIY71YWsLcYol8uFPiaUDnBhPIbDobrdbgBm5Nzvm+aUes1ZdAGyz2tO2NAcbPMdAAkAhtQiZLjVaj3Qr6nudVvxudsq4Oa6DLv//Pnz+BtGD5uJDiB9ajZbnn7n93FAlzqd3BddjJ6guL+TWBxY4XPBfFxcXARR8LHxdd3rOk9aRqHTfrqTmKYc+NhBDPAe92NfAPiC+yNjnqLloJA1ijPJXiEHjdxbWpYL9Cor/lnWKu8/e/ZMz58/j7rUPi8p0en3e6z9rDJTrihdMOiAL5LZbJGQ//TpUxUKhcwORweCrkxovmHDDRDGyYExfXEhyeUW4XDyTefzeeyWp96cb+pwEJsKDs+GIkRZec4Xjf4i9JRSGY1GarfbarVawVi64EjKhCmcyUGZolwBHc66uRfp3g/P5CHoNPzAd1DK+/v7Ojo60pMnT1Sr1R54tqmiTKn7xz73udpjHtqqBQOjwedPT08fHMXpn18FZDB6OEp47C4XLvt+beahVqvFrnlk8/r6Wvv7+/ruu+9izF2pp8/F36miTDcEMj7OwHvYCacKw+2Aglw8nkFSJrzJGLneSDeRpQCftQH7SskiSRl943lXjCfgIJdb1Pe9uLh4kMeXsi8OTB+b08/VUuCcyrDr5Hq9ru+++07b29sRcQFISUu9i+7AuUhBJ5/xUkzOtrrTDrBw3ezMDf2+urpSqVTSixcv4p4uk66Dna1xp9DlxJ02r6KCPk6jXf5MUnbjTC6XzasmV4+UMr7renkymQSI3NnZUaVSibquAAlPm3AH0W2WRw05UYnvbG9v6+zsTK9evXqwadXl1f+WsgdyfK62igBI7Wmr1VKpVNKzZ8+0t7eXqarg6WcASsgTz8Nkrj3M7/rJCSoOCEn1OZFGTrVD1qfTqRqNhur1uk5OTjJ4YxUryHymYMx16Sqc4jY/7btjEbcn6ff6/X5sOnNiz++FM5vKDlim3+9n3vP+MHdgBbcbKWZCnnd2dvTVV1+pXq8/GCeXkZ+rdz+5ScoXqit2/9tvMp/PA5iRMzYYDDQcDtVqtXR5eRnHazr7h7cPsHRqfD6fxwD47mVnFDz07wqdPFbfYUY/U+XDb/ee0/xQf16UKgYSo14qlVStVlWr1WIBwGhQgov+s9ByuWwRf18UeJ/+nrN5CJwDBArO8xkXCpR0uVyOnBxSK05PT3V6evpg9/RjIDo1oOsCUD30Iz1cIG58MHhff/21/uqv/ipT8448R9p8Pg9mzpmpwWAQSo8fP0nJjTceJ4oXQ4aHz9wNBgNNp1P96le/CqXqjKH0sAoADhUyRR9TJcv/HrJytsOfxc/N9lQdrodX7jLi7BLOVaqouTesccrAwBCnYMuBpwNrSdrb2wsv3sEWskDfvB/r1txIePPNHO5ckfuF4+I/nlblsuZGxutTIh/pmNPy+bz29/cj3SqXW6SluEwA9CaTib7++utMxQB30NFP7iRJqzeJuRFNZcmBgOs7NrmkcuJsLQAp3fDnTpG03Azj4+ljRNQkl1vup3Db4fZDUqQJ+elrgG9Aar1ef6B7+fF1Tx/Wpa0CH6zjZrOp6+vryGOkUdM3BdpOiKXAj4hTWkM5l8sFK+gVGlz2ZrPF7nx0HEAXh4FohMtd2rfUSfDoa/o517Nul5AR1gPPzDW2t7eD6EjX4Xw+V7fbjQ20aQ4qZEeKWViL9Xpdu7u7mRQGPssPdmxVNMYdSGelz87O9PTp0wypSJ/S9im5/SSDmjJNqUfnv90oOUDC0JRKJR0eHqpWq2W8IoSJAb67uwtknwqV7+51FnJ3dzfCSPP5PIz9YDDQ8+fPgz11VtFBbsoI+TN7474OgNzj8PJQe3t72t/f18nJSQgvnot/l+eBTncvCCFI88tWeeUIUAriabzOM1Gip1hcVEwolUqxmcEXdbqofN6d9VinloYMU89Uyi6YyWSiZrMZm5w895Rx9ee8vLwMxcC8MRYoC+pM0ny+cGiQJ1JhnBUcDAb65S9/qaOjowcADlCIPLiCS/Nmva1SuA4q7+7uYu6parC7uxtsEc/DOAFgAfk+7h765HUUJErNw1vkttJ3ZB72OWUrXCYZ12KxqKdPn+r4+Fjlcjme0xVwyhism+z6OkP2Vhk3aTHGnk+efsblya+NPmVXO0Yd51VSZs4kxZHNlUolNjzlcssDRVwnjsdjffvttzo6OlpJbKRrkTlFhqTsKUz87+NCc0BEmB25dMYTWYIscMDoetl1MuPCRlWvE4zNokYwa5Bn9DXo7DTXqVarkSaBbeP7FxcXOjk5yeiVVWDdx3SdWro+Xb+QU+86jbngc07GIKuA3E6no2azGez9qlOoptNp4AJk2j8HJun1ehk5Gw6Huri4CAfCr5k2l8fUafLvsYb8Oo/ZUJwcv4ZHKrxCB99lDXupSmxMesiFExHYINct3lcn/ACoEGEe+UvnmdrUfpKiy2iKIz7WftZRpy4oKfPiN2NRTiYTdTqdyGOkqKzXN3WPPs0rKhQWm3YIbzNY5Ol5iB1voFqtRs1V+tput7W7u6tvv/02E9pPWZiU0vbyDLQ0f8gTlN2zx5ijvDiM4PDwMO5NHtQqSr7f7z8ITWI4XMAxSPyNkPNdmFqUNd5muVyOowbxYh1Y7+3t6ejoKML8qTB5iDSdexe+z918XlLjnv6Qj/bhwwddX18/AN2uNGezWcimOzx4mZJijlEAXr3CGQF3IjjruVwua2trS+/fv9fLly8znnyqEKVlGBa2IE1b8bwkaQkaGRM39M7C41377uTt7W2dnJzo4OAgWE/yt3zjCP3zk6mc/Uhll8/DQrlC9CgB32NdAg56vV7UZ727u9Pu7q6Oj48zRiaVY5fTlAn53C01GKmB8c/d3t7q6uoq6kX7xr60XBdj3mg0dHV1pdvb26iWQo7m7u5u1OFFntGFzIefdOZyzWdGo5HOz8/17NmzmCsYRweG6BIpW5cxBej03+tmO8vDWpKUkVcMuh87jI4j95Bnd0fPmbCUAXKgkcqwA9d0Hp29Zv2PRqOIpvn3cfaePHmSSQFLZST9+dztU2toPl9EQilRh/5gHNJInBNSs9ks5BXd0u12w74hh3xXUpSU8kiotDwi3J3xfD4fG6OePHmSSSXi/n7tVc/t+sodCprLetpX5tixDTJIVBYZTfHAfD4POfaNim53XM782j4e/pqvL7dzro/defDxyufzevLkiQ4ODlbKZoorP9Y+ukmKi7l34A/I/74RAS/n9vZWX3zxRYYVBLQRysc7xLj7bjVnBGgcu+hAmHsOBgN1Op3oB3//+te/DqBLCAxBcoPpApN6TBh0Qjw+Nm5syYlBkSIw3W43lCRAwkPHjC3PNJstSjawQUpaluFwgI1SdvAtKUAO3tX9/b0ajUYIF4JzcHCQYV653tHRUaYEiDd/zRcIgrouDXmTHub9SA/DhIT43MlxA+VjdHd3F6ElxvT+/j7ynFjUXh+Xe/K+pExoFU93OBzqz3/+s0qlkr799tvMBg++51UmfC1IS6WEXLp8A+4c6AIIfdxms9nKmpidTifWEPdg3bI2KPGUriNPx/GQnINXd962trbU6/WilIlHDLg3Y+vhacD+3t6earXaAznw5grSgcg6tBSkOYtEG41G6vf7ur6+Vr/fV7FYDPbeHRNkYzAYqNlsxtqXFIA+n1+U1qG+JLn86D4HAuRcHh8fq9lsZjaN3t/fa2trcbIM/V9lfGmuM3B2eM31n48H8uf/c30+58wluhfCBIBK5YnRaKThcPignBbN9YYDdVgsZ529n7DB7pxh7/isl0Rzhmpvb09Pnz5VpVJRr9d7IBsuuynR8bnbKseK18fjsW5ubvT+/fvM5rr5fK7hcBhHkadRORjnwWAQeGJvby/yh92p8I2akh7cg7nGSUCnTyYTffXVVxliCJ3leIeW2kIHY24bfZ5cHzvx4WSZy2BKzk2n0wc62P92eUOe3ImjcgT4AJsDuca4pTpXWurwFJ/xu9frRXrj8fGxzs/P9e7duwxO4xl/LiHwUYDqApJ6hfydeha8xsk8hPtdwTLIXqzfvRX/Dt/jLHuYG2er+O7Ozk7USby7u9PLly91fHycSfJlYD3/aZUhTVsavqSvLpQYhp2dHVWr1Qxz3O12Va/Xg22TFGyph7Wc/Tw9Pc3svIUhwTNkLJ3d5W8Pn8Jw/fjjjwGWX758GYrQi3mXy+X4IRTmXrrPY8o8rxML5eEj/l+lXFyWqWowHA61t7cXCtWVCOAVAM+4kMvL/4SYdnZ2Vi5QaQmit7YWxx0Oh0O9efNG0+lUv/nNb+KaHiZP5dPnwJ8ldRgcZHt+nV83ZRU5Hc3zm25ubsKbJ8e81WoFWwzg3d/fD4DBswM+uRYh/JRJgz3F0eQkFxy74XCow8PDTKTCw1GAEnKsU4DvoGaVbKxLc/DMb5cxHAcMO2NEGRov+bTKeHn0hIY+cp2Z3h8nyE+w6nQ6ca/vvvsuZMFzsdFXPv7+21MEnJ1JI0eSMp/33D3X8dIS5KIzAaPkzcI+IbfVajXGgeeheU4pzqdHlAg/Hx4eBlh34sD3XaAzVgF27k3VmQ8fPjzQvx7FWhfnyuduVX/Qi81mU9LyCFjGFbDpDJ3rWOYauUQOnSSj8T9rBD3KHLM+cKovLy91dnYWxJA/A7L2mOxyPymbDuD9TgmPVX+nY+VOB5GBTqfzgJACY6SOuq9fLxPopQelhZw1m03t7e1lQOl0uqy17c+fkj3ScuOVtKzwwcE/Tqikjuan5PZnhfjpWCosrkhS4+8781yBwEJ5LT0EjbAUG3iYCAwrITsHbdvb26rX6yoWixHSHgwGuri40MuXL2PBp0zJxxg1jB3Pwu80p8+9DDwdSapWq6rX68GoDQYD3dzcqN1ux25X8qNI0OeYN4q87+7uajAYRG4t48b36TtG35+B/1Fk1Ij87rvvYgFeXV3pw4cPajQa6vf7urq6Ck/08PAwwnjuEfqi9bFLPcN1aA56VsmtNxT/YDDQDz/8ECFTvG0YcS/mj4xzfX7IowTYO9MtZfMhJalSqcQ5z2/fvtVoNNIvf/lLnZ6ePggzenK69JD180Xv90ifNQU+rDPPC5MWAPLw8DCYf5Tj/v6+Dg4OVKlU4lQdACFeOKktrBH+Zo2k4VzPh4U1JncQgz4cDtXr9fTjjz/q//v//j9dXV1lcrYZq+3t7TgSFnYwdaTXxaivau5ofMzgM2fk27ujLy0M/ocPH4LtKxaLOjk50fHxcTjpHo50vZiOKf1hXfV6PbVardDbOCtffPFF1KH1aM8qh9adazemLrfORKUynTJWAHFPe4EwgBnGYM7n86i2Ii03LXn9anSn11KFhFg1P1tbWyqXy8HG+Tw5E+ZMn+spdOh8Pg9QXalUMjbG12dKFq1DS0ELjb7CVvZ6vbD3AEcfj1WMMGNG9MpTAbmHgzdPOeF41ZOTkyAOcEyur69Vr9f17Nmz+P4q3ck8O/7w9bMKZErZQ3V8XHhmHCV/LwVwRAEgjSTFGIAL0H3IUZrWiG71+zCe1N11wiBlj11/e0SRz3Et3q9Wq4GBVtmjnwNQP8mgph1NDaK/xudpaWiRyc3lchG22Nraipp+GCNXtPP5PIDTcDjMnIHsoAiBHw6HOj8/15dffhkTlnrmnheVegeeG+rPBqPL687QuaJ3Tw2mif/v7+/jlB4Ud7VajUVEsj3GuVwuxy48lBXPzDUcOKYLIX2vUqnoP/7H/xieKiHcu7u7OKUrl1swZ9VqNRTjKk9wFTDi73VoqfHy+UqVgLQESOPxOJQecofTMxqNMnXxVnn1MKsonlwuF6BNyobQkel+vx852y9fvtSTJ08eeOk+7ilrmoZdnGVx+YWRxcimCsLvx7oolUqhvDwcWq1W416w7YTN9vb2lM/nowal1+b1PrpuSRWVb3rwjQOstXfv3un9+/d68uSJzs7OdHZ2lokGlEqlqKCB/D8G2j/23udoKTvjetjlApau3+9HqRkHfOT0EnHyDTfOEKZ6grmkwaK4XHOW/f7+vm5ubjQajfTq1Ss9ffo002+fW+QKoOE74r0mNC3930EzjWu7c4z9QA9iOHd3dwMU9ft91ev1WOce1cjlFmyo1/D2scJWeUTQI0tsPmWdp7I9nS4OqhmNRpmDY9zRzeUW1RHQHQ6a0+deF2fLx/xTzpXn5CKfyHOpVMqMu+spIgSMLZ8HcDrARNZarVbGpk0mE1UqFbXbbfX7fe3v7+vZs2eSlnKEjkyjhYC81LZgJ3ydumO8SrekhA79Te+Lg1+r1eLIeJwdxgcW1YFpSs6ltsPJEmzc/v5+5jOuLwDGHnXmfp5a5vsv9vb24lAmv6ZjuI+1n1Wonwv7gzkQ9c97A3H7AI3H42AMMXiAOQ9/OwvLALXb7YwnyXW5Vrfb1fHxsb755ptMn5zB8rCm59GheFwxr/osr/s1UbhuVNjtOplM4vQmnu3o6CgEcT6fx0k4gMXRaBTefb/fDy/TiwxzL/o4Go3CM/S5w7j4fOzv7wdA8AUNeystToQolUpR6sjZwBTkueFcl0a/3FNM2QcaTgQHQ/BbWqac9Hq9TDkkFCjKEjDq5bmkZZiEz7C4CZHC+pdKJf3617+OTYCMNwrVQyVpGEl6eAyff2aV5+zK0Rlml2XujRPVarWCjRiNRlEGZzAYZEKd/Ca8nu5M9bXE/VynMEbIO6FiB05ETohU8B1A2tbWlvb39x/Mx6oxWhcDT3NAnTpVqeEnN1haKn9n7wBK6AiaA1lpWVZqOp2Gw0E+L/sFiPwMBgPd3t5qMpno9vZWuVxOv/rVr0KvcF0nJeiz99GZJAdc7mA5E0a/WVMpSPCxS9nkQmFR83I+X0QCsC17e3tRhWBvby/YZgy2A0z66+XL6Av9AOADYmH1IDcAE91uVz/99JNubm4y9bkrlUoG5HAqTyqj66ZvaR9jdd2xIh8Sp9nH0483T/+XljKCLfSavB6JwYHC5u3t7anZbEY4vNFo6OnTp3FaVAo802dKSQ7WYiq//h1/Ll5PAS163q/hwBGdxkZaT4HC3vt+FyKwnr6VgmX/kRSpLRzuswrfOfDGPvgRwOhynpcNia5zU6dl1X28/ZsYVPeyP8aoERqh4DjKjRBltVqNIr2UOPFak77z1MM1lUolNv34vWazmRqNhp4/f65Xr16Fl8XuPQTB7+GKxw2mo3uEyH/7PV1AU08JUMBpJoQTfEMY3+n3+wGI2PU9n8+D9YIBwLNEidIX8h0BAp7sj1HxOSJMzaJggQCat7e39fTpUx0eHqrVaq3MvXEZ8edeF0Pv4NQN4qp+Il+9Xi9SLZBVwieMEwoG2e33+5IUKRvcO59fpAxUq1W9f/8+syEOdrHb7arb7eri4kLffPNNZkOc5w/Sf+adOfcUA+bPFbsrfvf0YXzd+PNZl3O+h2LkSFM22zBuMEbSavDJaWUeXnUl54we693ZKQ+7FotFffPNN/ruu+/iOqQW4MXjvcM8OCORGnaX33WR3bQxn1JW97hh7Pf7YWRyuWXlFHRDPr/YBEG0ijkiSuDjUKvVYnOOjxlluzqdjm5ubgLMXlxcZI7GpaE3nOnBlniKh7NFaQRAWsw5z+JOtV/LmVn0INdFtovFYqQ4Uf0BIwoY9dOxcEDdYPt68ghGCmgIuZLPjrM2my2OVKYuOPnCJycnOj09DdtGagqpCWzwWsWYrpvcuu51zCAtSSo/Bc/nvVQq6e3btwEskVPArDsmrO3d3d1Il6NeOseI53I5ffHFFzGGl5eXkb715MkTPX36NOSQdUb/kV2vdwuOcDmjpU4YY4HT4v+n69fXTho5INpA6iMygxMEkUSaDbn9KdNJczvo8+JrL93QyLVYH81mU9PpVE+ePFG5XM5EpcFdpIC54+C4kf8/1j65i58HcjDmk+EP4d4CQgL1LC3Pt0WZoDRRGLR0c8VsNgumtdvtxj0wsI1GQ19//bXOz88D3BG+TgXFjTIKw3e6+XPwrM7upPmeAEbGwg0I3k69Xtf29nac/0x+Z1qAFzACWOW+HprDqDvThTCxSKlpmgoCz84z8uw8C44CzAKAy+ffgZoL2Lp586kST+XUw5o0ng3mDWADOHQmZzZbnJrRaDSiPqgDKn57aN9zrIbDodrttp49e6bf/OY3oQj98958vFMF5nNMnx2c8pkU2PC/s+zMrTt2hUIhPGGYX+QYYErJsrSGJnIJa0F+Kdfnmnzed6DDkOzu7kYSP/nm/Eh68Dxci5Cfj9EqoOFO6To0ByIfAyM8K2W2jo6OHjCL7XY7xpNrQxiQX10ul0MXemTF9wVgYAkxTiYTffvtt5nyedLSqXdZ428HmPTfZZR7+/OtYltYJ+hdZAZ58qhFyqbu7OyoXq8HOByPx7GhFQeM8XL23QkFdxh4Zney+J5vNsMpQO5rtVrkb1er1dhz4TZle3tbR0dH2t3dDZbcx4WfFCB/rpaC0lU6OB07MAJzOZvNosaulC1+7/V6qbrCNdinQTkl9nOMRiMdHBxoMpno/fv3urm5iV3maY1q7AJOOn1wpx1AndrRx5oDxFT30BgTXw+rdPpstkxrAE9g68vlcqzL+/v7wFUebU3Hk+fg+t4f7sE4QBAQQbu9vVWr1dL79+/14sWLONwnTYFMya302T+FGz4KUFcxg6sYs/TvXG4R3mM3I2FqSVFGwsNQkkJBpPfxhGQeVlJM1nA41PHxsZ48eZJ5aAeUeBNuhD1cykQ60OSzeGm+0YuUgNRLR7nQADGz2SxyWAnJsdBQyhhkdwTc2PMsblAJnfFdno2GUvXvMRa0FPiQgN5utyUtGN1er5eZZ3cwXAGti6KUsn3xZ0wBWvo33rg/33y+PCjBHRtJqtVqwZa6p82GCQraM8eE//r9vp49e6Zvv/02HAt3PvwZUuYvXZeuwJyR4LOwQP78zig7eOD7kjLKCwPMaWydTiecRdYWkQBK57jDhjHm8/SN5+R/FBzAlc9zooqzLqmhZt0Setrb29Px8bHev3//IELiz+bMxzq01Plb1VxPUmKv3+9HDVOMNRtSkEHXW3d3dxlDjPwOBoMYf6/iQXpWr9fTd999p7OzsxhHZAA5drDljJfLqBusNG0sJQT4vsu+g276mEaYnORwecrn87q9vY38+3K5HNGtwWCQ2dTHemed0jf+dl0BOF3FXNGng4ODyPmfzWYZw868UVII9jV1TP33upADPg+pc+w6DHafagqMC1HGlG0kpO3Oe6ozifBwHz7b6/XC3jYaDR0dHQWYcoeH8XXdyT1T25liopTg8r/dAU91kOt2l2kng7iPkxsQR5IiBY9UHpwsmHdwi0dRXZ/TV64v6cH6oa9UHeKzyOn79+9DRzsQxjY6fuKaPlYfa588SSptq9C/DyYPQ6mlk5OTyGlig5CDPpgpHopcUgaeAfNwEe+B8l+9erV4mP8rzK64HJC5EnO2hXt4OQRXQv6MrnwcUKQUOgqbHBkUkqQAKDwHnnxaDodnpPlxfHg57KD1PK9isRhMB6yog6r0mfDuUe4sKsLWzngwzik4ZSzWiYlK52QV2JOWxpBj8bzMCOPiC4xFChNSrVbjTO7pdBo5btIyhApAdIfo5cuX2traighDGranb85EuHx46JvPOsPLb+7prwHm0nWA7LoCZf4BfYeHhxEya7Va4YBSZsfvxbiuyoNKDa4DZNY6Pw64U6bDnQzW+ng81osXL/T06dOVwNPZjJStXoeWMuC0VQxqv99Xu93OzOXd3Z0qlYpevXqlg4ODYE1h9zmul7C9747O55ehZUnxGk5JpVKJGsouH86aOevEGKeGHznzaIWkTH5q6jgAoh3oOvvrQJQ+IcvIUrFYVLlcjqMuAaRuSFlb3hfPN2WOUpLB++Zz5ak5GHJ3QHlWj0oUCoXY/OaAx/VSmmv7OZvbyNTBd31AvmSz2YwNNE60rMIYDuSxiYT0p9NlFREYWca2UCio1Wqp1Wppd3dXZ2dnkbfpfXYAmUZmUnZVykaIUqfe//fvcD9+p68jC+50OCnBd8EUpEoiA+Cr4XAYm3pTR++xZ0oP9fCx4ftsHCyXy3r58qX+4i/+QkdHR6pWq7Evwcd+a2srnIFVTvfPca4+ClB9kaVeEK85WHNwR26Inx/OIvVOttvtQPywhuyu9B2ofuIHu9BzuZy+/vpr1Wq1AFowofQ9nRDvN4NIA0TgBaNEGXD3wJ05TQXIgQOsGYXDYSX6/b4ajYa63a7G43EwbXzXWSQUZbFYVLfbjfsCqFLPjbGez+d69+6dLi8vdXt7GwnikiIXDSeBvBqep1gs6vT0VIeHh0thsQXkYJX5d0X6uVsK7NzpcDngfYwlx+j1er2QOSorII/OPjkbCMPo+aCj0ShAPt9jMx+73be2tqKA8irg7H/7+z73ruDcWPu6dLl358qVl8udK0t247OuCZER7gc4cDKR53LBTDUaDbVareg/8uabIXkWGLvRaKQPHz5kxo8xB0A5IPacWgwWUQzXZ6tkeV0MvfTQuZIelqvhc/P5PHKCUzDFnOMkATRzucVpYYwVP55W5CeTETKdz+d68uRJJv0nl8sFkHO5cUCaOvdpiHSVofeWsur0V8pGjdzBSskTXs/lclF6j/JSsG/kR7L+afP5PCpY8HzOnDlY8bXqfZ7PlzV7VwEYX4usZxhUj6x4S2X4czbvn2MBf93Xdr1el6RMlCkFbjgzsIFEqrgOYzObzSJHFXKIFDrsG/WUvU/oQRwo1428733h2R5zal3H+mvcI8UhPIcz6Kke9rHzvoOVqGAwn89Vq9UCX3haWgo8/W8nECQ9IBiQa/KC0SfValWnp6ehC66vrzM2CGLu5zjbj7VP1kFNQxV4G/47ZV0QGASJTUJ4O/4dHqDb7WYGDmAHMERo2SRVKBT0xRdf6OjoKAwiHrKHM93LRkH4fVJj7s/poeyUdWVw3eN2DxKgi/ARomQRYXxbrZaazWbmaFVfpIwTO/3JBeT+rtx5zdnP09NT7e/v6/j4OJOjxjz5Z12g9/f39fz58wgvuRJMFYjLyzooSmk1e+ogNf0sinB3dzfyd/gsRdBdoUyn08zYuNfI7nZAF/nQbBja2dmJsiYoIOSSEJ83ZCsNubpsps6Xh/DpG7ILu+WbpZwFcqPp65pWLC5KntTr9Sh91mq1IsR0c3MTDiTrfWdnR+fn56rVahmmjOdxp5A+46zt7+9nQoGdTidYEXLOkH9nAvhhZ7Q728y7M2zrIrurDDpjIj08wQY5xCCT0oHD6+NNDhl6l58UaKX5ZMxTvV7X8fFx5Kgyx27wPPLljqLncLuBd6PsuiVdq860pQYvdcgcLOPMs4aYa0CqV+dw4+xRLdYK40t/Ugedv1N5dGCcAt8UZHoeO45E6ry6TlvHljqbad89dY+5Z+MUOhBdxXjjxHOCXy6Xyxx2QJ3QNJWE/F0qIqDr3UmVlNEbPIPrB+mhc+jOCGvI7bM/s7/Gc/v9/b4pbkn1L+l75FSTOgn+oU6xEwXkaru8sea8H74pETlkvbjTl88vj3YnKnxzcyNpGfUmWrBqvTIOH2ufTLpKQVfqHfAZZxQLhYJOTk5CSRYKi40neKdsxPHEaB4eo5VOLkaXXKvj4+OouedspYedMP4+4ZIyi5++ezjCQ+DOMPiAukImrORC6LkaLAZo+YODg8gZaTQaevv2bbCbjA8AiMnEePNcPI8znq4svd97e3thZNJ8VVfMpDgAqE9OTiIM9hj4XOUxrkNLvdRVDkYK6jj9ptFoBNhkly8e/Gy2KLfE3GCE2Nn+/v372OkOK+3hy7u7O52fn2dAE7sxMWDO+Dkb7ooqVV6pYXcA4LKbOjPS0uGAyXQnke95FIE+1Wq1SHGYTCaRt0x1AhSbGwXAvsunh4DcMXOmiedkPpDpXq8Xn6FGMuulUqno/Pw8U+PXZcLHZJ0M/WMsC//756SFA/Xq1avQq6xfaQmcGNe7u7soc5cyJVyTiA3y7/n4FxcXEe1iPrw0oLNPDkzdwKcMVnp/KXuiH/3kWq6bua/fwx0xgKsDZTbfwdp5eg7rSlqWL/Sxg1h4zAbSX/SJFy7P5XKR3+56yNd0LpeLKMt4PNbJyYmePXv2gJX1z6dy8bkacuu2lNdpPHev19Pl5WVmR3o+n4+oHjvtuQbpamlKCvJE7ipVcKghy7VfvHgRTrDjAu8nulrK5t/zWX6jD5Exx0fucHhz/CGt3j3vY8RnXG85swrmAJTCDvtGdC8JyJru9/sZ9l5SBrSCIyCsIP8cW3l/JMVYY8sgDRxL+NixJv1ZH2s/C1W4Z83/KWjzySyXy3FiA58FWJFn5wbKmTw8dzwpD6uMx+M4avHVq1fxoJ4n6R5Dygy4YnSmxgfMAaV7Vqu8YzeaCKgDg3RC2bnPhBK+h0ltNBqxoGCdfeMYfeLY11RQUk/LGSk3/AgP84AAezI+IMTLXrksOMDzBbUOilJand+SKswUnBQKBb18+VLPnz+PzztLwQ52H1P3LiuVSjAyLGrGjoVbKBSiEL8zUH4ABWOfgklnoNzgO8Pm8pnKonvLDtgduHrecxpZ4N6k7VBr9PDwMBxQQqOwnKRHIG+sY3+d5/McKO9j+lz0jfJSt7e3Acx4Znb/X1xc6PT0NKMIU9Z0lSH93M0Bz2OhQXcaTk5OMgdvXF5eRhoPRoa/3cgRAvX55ahlN1jz+VwHBwfBntIfr8jAnNH/9HlwhF1meb50k1SaduKsqct6ymBxL2el0P1OSrjztLe3Fye6oZP5brPZjFJypVIpckRTYE9fAbjk+fqa9jmFoGHN8bwwUawjQNOqCEA6tp+7OVB7rLE+J5OJTk5OVKvVooIBeo9DanK5XDj7OL8AWUgsTxOC1CGVjvk4Pj5WtVoNnUtfPcokZQE28oZN89QS2qq9GS53HyNtXFZ97aU62x3utJoMephn5bjs2WwWu/p7vZ7u7u4y+wQ8V5Q+8OObx4mszmaz2BDr88sams1mOjk5ifFgblhbXned51q1fla1Txbq9wnwv9PXfHLxHNNd6ISX/VQbN9SwKeT9sQmKiWo0Gup0Ojo7O9P+/n7Gkx2NRiqXy7G7j+9wDSbYhcY3B9EH/40ik/RAEfqO+9SoMjmAD4Ai3s58vijADMuEUDUaDd3f38cGBITRx8o3clHQn3454Pa+rAr3Qvczfyx6FyQ/vhIPNm2uNH+u0P17tNRArVKaKegulUpxPBsyhOfpofVKpRLAPvVCpWVuKLlVs9nihJTBYKDnz59njoBjLnAE3JlYZexJE3Bw4EyKt1QBPca4uMPBPNIPwIeDP2fe/JSs+/t7XV9fa2dnR61WK9adp7qwtprNZsg1QMmBl6/JdDNBCjrcSXRQKy28+1qtFmFBD2el47YORl7KAo5V/eR1d/4PDg4kLY0mz+wyQq1Unz+cEVI+MNw4rciupCh55KF/UkBSWePvlDF13eR2Ia3qwveZr5QhTZ1hB6WMj8sujdPN0jEkrWc6nUaOLlViGFPWpzNY6FOem3UCYTObzaJGtX9GUkRYHBzwPoe7kErjY75KVtbNuVr1GmOHcwkZxdp1+0xeJOfPc+QrFSZIlWAsqbfpTg/g7MWLFxnn3v923biKpWbMUyYV3OL2xZ3eVetj1T183sAI/gw0dDGv87fbdHJSwQVsRAPIQkC5fHIvxymeT+6pPpSk455cgzV+eHgY9s5xUropjX7/HJn9ZB3UdFDTCfTPSQtlA6uC0PlAM3kMhCsQmBVCnlDVgKerqyvl83mdnZ1lmCRCW14eiD47nY9B874z0P4/hsyVR5oQ714H/ZCWuYSPAQEMthcVp3AzIJA8VSmb2C8pPBoMritrnpvFkAo/DoMD5n6/H7k7udzyLF9J4Y1SNHrV2Hn7lPf879lWsaX+mhtRxrxSqejw8DCMNAbO2Q0vdeQsZso05vOLshzz+Vzdbjfm9vT0NO47my1qUUoK1tUdNu8f6RkomPl8HkpZephzmxp2jybwbMgAz+C/UfasX2ernI2SFmvMDc7d3V2EmzA0noIC8woAceDBs3B/gAMN5UkfisViHGGYrmNyMavVaobt8HXLuHHNdWjMuY9LyhDyrB6ZwmkF8LkOGA6HwVSx+9mZQuTXcwPdAEmKPGDmDEDB/4VCdoe1Oxlck8/Td/Rl+vzS0mi6Dqc5C+zrL2XxfL5TOfY1gg7gOjznfD6PXf68h5MIoUKuHQ097xEy1+MwhQ563DbkcrkgK66uroJVRJ+4THC/ddC73odVDpW0XNvVajVT3qndbmeIJBh/ZAw9wnwh/4SwsYtup4mqQOJwf9cvrq/9t7Rk7z3ViOd0B8TXgj8r+s31kveD9ZDaeB8z1wWMjbSMMHiaHntUcC6r1WqQekT/qBDBfdOcVK7FmDKuRMMpy+Z2hO/v7e0F6Uc6J3OXykSqhx9rnzxJygdzFWvqAwvSpuwOHXBFyDVcOGGiELD0DPv5fLEJo91u6/j4ODxSFrmzpJ6Yn7IPDlDS5HfYQwCJG28EwnP1nPVNwahPONdB6TFO0+k0QnII8HA4VK/Xk6TIsz0/P4/Pe3I41yNXzJkmX0C+CFzh83lysbgm7DbXKJfLmeP3fBH74vAFtQ7NPdpV3iqNuSoUCjo4OIiKEL5D3ueTZ/dyZoyBy4vP0dXVlUajkZ4+fRrnexPS9w0tkjLz6cbc2SRnrNxb5b7Oorun6oyRr+10s8sqL/+xtU7b3d1VtVqNUBAnyVEk21NMJAVLTXOjQXM5m0wmkZ/r1/K6ia4TGOPxeBynTAEufByd/VqX5nPsY5wqeGm5Ya3f70fkykskIWewIe5wM77oJ+aDjZiDwSD0Mzv+h8Nhhr1y0Ow5n8iVb/7jvvTbv+sgk/fTfnlzEOHMmDsuzmA5sON9r1KAIWX9cHwva4N0KyKE5Df6s/j64nuMkdeIpP/oZIgQJ1Z4jvv7ex0dHaler+vq6iozBo/ptM/Z0j6lMutAvF6vq9/vq9vtPsiZdweL/EbXBTRy0bFdLs/T6TRKrNEPB4ypIyhlnSAwwiq76s/HfKf4iOYkQIor3J7yHA7mUz3F2Hlz+SMt7/7+PkpC9Xq92MzqwNqL+HMPf47pdFndhz5BFFar1Yhuu43le2CyWq0W47QKi32qfTLEn16Yi6cGi//ZGekLnwHBUN3d3QUQkBRFkmFOV4WgyecpFAqZkhRUBoDlQzH6SQirPAQmKlWyPIuULejPzuq0iDDCnY6RX8cn0MPtDh63t7cjJIcHVCwujgvDuHCt1CtbdVqE39+NHaBoNpvFpiD3Uv15uBalWJx98HFz0LMuLJQ/+6rFkM41TByhDGmZpC4pcqHd+EpL5YNi9PHZ3d1Vr9fT9fW16vW6nj59GnnWzq444/UYU8//fA6Qle6sXAW+0p2ggD1AJs+cMqoObt3hYf2gvElXQFYxIPl8PoAMx0r60Xe+XpkTz/XifjCy7oljhOgLr/tzUgzc8x75nCvUdTPy0sNc78d0LlEOHFnpYfiRYzzdkUnvJS03nqBribBI0sXFRTAnFFp3WXYDi7PF3CAPrv/IJUz1jTtf9M0Ze/SiO1jpbzeczvQ7UPTx9PQQ5G4ymahcLscRpWzcAQSgs3lWJx+kJTuLrep0OuFA+F4MT/VhrGAYYa4kZU498rFx3bAOLdU9/hp/Y1eRTwA/rFu/3w9ZdGZcykYT0M+wy26T2u22xuNxsKfIXeqwuF30yIDrW3fePYpIP/iRHu4TcPn2SLHrOtfZrvNTBzWNYnp0jOt6dGA8XpySdnV1pVarFWQUGwKpkpRuMmfM3dnzKC2brfwgIB8L0tU8XZI5/7fq3E8yqI+B1FWfkxRHZHa7Xe3s7GTyzFiEeDS+WcrpZs+RQnBQmuxM45rkCDGhCFE+n49wE0acwcQwI7C+KSQ19DASDiKkbCif/vskOuPpIYDUW1ol0ICW4XCoN2/eBFCVlnlQs9myJuSbN29UKpW0v78f4AMhIIfVmWqAqOfjeZhhe3tbBwcHwaaSw+JJ0b5oUvZkHVq6EFzB0ZwpZGc44+LMOHOL0mTx8z5AM33++XyuN2/eqNPp6OXLlxEuQTbcIHqfkFEHejSXqdFolGGqvLEJw5WHpxF4njZrFODozBPj4PPrsjOfzyOUxulxV1dXUTC72WxGSBmF6CV/CAnRN9a/pAzrydgho862sv75Huvl+vpa7XZb/X4/w6JID097Scf5c7ZVoDSVX/RMqVTS2dmZcrlc1EJsNpuZzabIbyoP7li6I8T1+/2+Wq2WKpWKjo+PM2X/HMiRvpLqMWeKMJ7ICjLO/LrzxHW9uTy6o4S8kEbiFVqQF3f8pGxo35un8nS7Xd3f32t/fz/WKkyf510DTH1NS1mG12WXDb8YbGkpv4wF4IHPAcLSMU1lZh1a6vy5XUbetra2og40NsZlAnvjUU0nQ3w94DA4Wz+ZTNTr9aJijrS0m54LzE8aUaLf0jKFUMpuikqJCp93103eX4/CIs/pmuGaXsoMW8Aa4Vo8L31B1pxoqFQqkWJ2cHAQY8oBMeynoLnO901PzvLDxpIG4BiAtACXAZwrdxT9/4+1TzKoKeuw6nWfaB6Eup8oGzwRn/zU23QhdkPoQK7b7WaSq8lz4MHxzj2H068P/e2GHePsStuNsrNJztB6OBFgyPe97yk49dOhCoVCJON7fheAcjKZ6M2bN7G5xsedZ/Qd1LCiCO5kMtH19XWkXbB4UO7OgND3Wq2my8tLvX79Wj/99FOc9uHj6PPnho0+fe7m/XRl6bLlSqhUKuni4iLCojAYjBWKgu8z94BVZ1ulZQ3FZrOZ8YR904oz+PQFA0tuYarcJYUz5yw6xgz58T658SoWi1FqhOvj6PA8aT+lZYoLz874pWCHer2z2SwOpxgOhxoOh+p2u1Fxg+8XCoXM0ZI4hT62GC6eFwfMU4BYXyj+P//5z3rz5k1slGCM3CCkoGydWgpSU/aTZ6W+LPPc7XYjHO8lYrgmv92opaFMDF+v11M+n9eTJ0/ic8iEbzSSskYs1aHoGUmxblz3M9fOBnn+K+PgRn2VE5WmknkeuRvH2WyWSQfB2QQE7u/vB5AkerW9vR17BQ4ODjIAimsgo/SVceI6DjCwZV6uzsEc6VvYlMfSX1xWPndL5dVfl7JpKaenp/FMsNSMm6QM28Zcp+zlqkaEllrTON/eUnLGQT/rinJ76EnkiXn29cMz8X8q+6kjyH1wSJz04j1/Pie5/HO8B65hb4u0qBWN/iyXyxoMBrq7u4t6qTgBrVZLtVot01+wlTt3RDycuSaPnc87WHVCEFtGSow/26ccq08yqKmCdMPuk8oipDg8hogHS1kYz+Fz7wlWyD3029vbeDgGfjqdBluLsHi40Q0oABdl1ev1MhuR3Pi7sfXcExc0/3H2MTUgqffjIFXK1i4lp8lLPu3v72s6nWowGOjm5kaj0UiHh4eZgv9+3GlM6v8F3oROarVapli850dxf56l0+nohx9+0NXVlRqNRhTeTRWkP68ryFX5Yp+jrWKf6CPP6sagXC7r4OAgo0zdwXCm0NdALpfLbJBwx4ATaebzeRwT6Wyg5xLRz+FwGCXIaMg7fYFl8l3F0sNDG1yBuszxIy3XHvdxmQA8cE366w4p48n9KLVFcezJZKJWqxVhnnq9rlqtpm63G4C2WFzs6j85OcmEvxyYYBzSPGz33tEr6Aw+9/z5c/3+97/POL8+brR1Yf9TpsGBjb+HLMGasIFBUuiQXq+X2YiEHK1iGzHMRATIPT0+Ps4AT/Qt44iRdR1Lf9GN9JkNre4cpySHb8xjPJx1ccDh8+gG1J1Ifrv+4t407yObTSjLJSlqO97d3anVamk8HgdTJCnDGqVG3tlVdP50OlW1Ws2Ul2MuiDKwNgBcThB5Ks46tVX4gEb/2STpUSLS/2azWQBWaamTeF63zXzegT1lqZ4/f66Dg4OM3eKzDqC4pvfZI2jocgeIaUoL80Ef0e9SNi3OcYT3yQkffyafW8derp9hJ12WS6WS6vW6bm9vQzeUy2W9f/9epVIpIko7OzuRHrG/v595dpqnT3mfSVkZj8chm6n98TFzYgw5cfv0WPtZOagpHe7C4kYil8tpf38/jsTyMI4rsp2dHTWbTZVKpUySs3sVLnScAc7JCJ1OJ7wYPFwXOnItXJC4Np6zK1oAHaEVGFiMtTNVPEu6yw3Wyj0HBBfPmnFz5pYFhxBhdNjV3+l0MmBjOp3q/Pxc8/k8lGepVFKlUomxwlCgJNNncEUoLcttzefzCL/ApB4fH+uPf/xjBpz4InOj/nME7t+zPeZQpSxPoVCIoztRELB60jKM5KWKnCH3hecOUafTCWNMuLRer2eAgTOogIrJZKJutxvrSMpWCiBNBobFAZsbWg8Vsq5wgMj58jxUB3q+UYs14jKEIU+jAbnccjdnu91Wq9WKPMZOpxOscj6f18XFRchVsVgMxxFWl1QA5DiV4cFgoHq9HmW32AwlLdbE4eGhqtVq1E72+VnlfK9T87F32XKdjF50AEbomHnM5xclfdBvyFnqpEkK/djr9WJ3NOyK65NVjim/iWg5c4qelbInX0kPQ5g+F2m6ErKGUXbmy23TqrlEB3qUgPFJdTNjBkBFbjll7v3793r16lXIpgNijD99clvjjtx8Po+KIE52MB4+Jz7Gq+ZslbP1OdpjTK5jh0KhELWiYaA9IjKbLXb0Ey10O83Y+UE+jMV0uijXeHt7q9lspsPDw7juqmgJY+5hcq7H59K1l6a2MOeOiXDysIOeF8rvdD59zFhfrGOPiuXzy7RF7u3rw8e5Uqno5uYmDjKhj7e3t2FjvJTX7e1tEF/oCMduPm7gGT5D/1Iwyz3L5XLmPfr/c9rPRhMov1U0Ow/B6TLs8PKF70JSLC5Og2HwEEzySd1o8rAeOiQ8wg/evhuzYrGom5ub8NA9f5SBd+FMQ6Y0lD3PTX+ducSwcw12G/MMDqa5h4P6fH5Rh488SPJC2u129L3X66nRaGg6ner6+jryVL1Yr4fF0nxCZ8n4TR5Ls9kM+h828dWrV/r1r3+t8/PzzHi4gk2B0Srl9Lmagy7p45um8OhRQJ787V63n+yFE+Pgj7FBgXU6nVBKg8Eg5J2QaspC+mlStOl0cViFH1nJs6Cokc00LOoy7fIOKyMtQSX94Bkd5KRz6697f/jbN5ewwYn7N5tNDQaD8Njv7u40Go2C+aDOIacV+eEdLu+ss0ajEY4ZxosxYNMbawCD5MZnHWVXym5SdAPh/WU8OAQCBxnZms0WYX6MNg0nnE0pnvfJkdO872DAgaeDhtSIp5+XljJK31J96+CNdUA1F3fQWZe+hh+bO++byzP34zPoWAAp0Sc2/bE++v1+6H1Sd/y5hsNhpGw54HSA7vaQkKzPrUfj3BlxRja1uyk7vC4t1RHoni+++EL1ej0TUZUUUQDsnY+Vr3snpKTF3Pb7/cg3J1LoeCEFna7f0+Z9df3pfWVTs0eZmHOehb67M8b1uabPHzrS5dvHiM+iE6UlueQ4RVo46LVaLfYidLtdbW9vq9lsqtvtZlKf0I3okPR5fJ1zLwfbjo3ABRAI6OkUiPs4fKx9MmFwlWfmN/PfW1tb4dHzXR6ch+DBoIc9/w3B4p4sTD7vE4tSHgwGqlarGo1GcWIBtPLe3p5ub281n8+DqU1DmKuMlbQMRzGB7gHncotjFFMPzb0GP2DA70eYMg1TwaBKUqvVimcnlM89yBnpdDpRQgMwRl8kBYDl+XzxpMqvXq9HAV6fW4wFYDtlcPy5PyYvn6OlLIM3B9WU5djb24udoM60oDSQ6clkcaQndXe3traitBJ5qyxS3wyErA6HwzBg0pKxYVFjoPb394NJ7Pf7GS/UPdvUi/WIAX+7h+sh3lRxM248J+vRr8G4OvD0/ENfr4BQWDmUebPZlCR98803mV3KGH/6CQOcgglnnZ1llrL1AWHdZrNZyLezkr5efX187ubymgLolNF4/vy56vV6VEHxOZeW0RXfFNfr9WKThB9ti7M/my0iCNPpNOQWsOgsTjpenpdJX2ezWeSyerRJesiA8tt1mTNkgBQfH3e+vU9OhiAzKTPE9b3PyI1v5HUSgjSfZrOp3d3dTPUEZ0P9WtLyaGJfu3zmMQDr65Zavq57nTVfB72bgmW3D/xmb4qHfmns6zg5OQk5lFZveHXZQ08CrqiGQx9S5tl1J993W+1OBSc5Ms6uK2muQ11/8prbb3cekRO+5040fXFQnJbC9Cie9wPZr9VqGcyxs7Ojbrery8tLbW9vxyE8hUIh5gRd6puynMDw8fR0Hp9/+uMHteBU+Hz+nPZRBtUnwv92MOegjtwdR98OxPgu1/IyEjAv/vBcA6CYKkS8fLx+7gfIAAgA8lB0LkipsKUeUT6/DN3znHh0rozSRcJvn0AAMUrPhQuPmoLxbDDZ29vTzs5OnI07ny/zGRuNRowN4QDGiNQH7x/P4CF+wgQu2P45cq/cMDpIpa2a68/ZUlZv1Xuu7DkYwgG9lK0TOpvNwgnDAfFyO85y4sEjyzDiw+EwUjMAZ+5tO9hC2bKTmM+5p+zg0Rkm5hiDitwBKPl+uqHOr5u2VWslNcpszKlWqzo/P9fBwUGGmYeVaDQa+tOf/hQHFcCYAWLcGLlzCMjwdZ06iKx/B/Q+3w7w3NFaByMvZeVTyrIwqVweHR1lAF+j0Qgmj/WNXKVVFBhHP5aWEOWqnMvUCZayu53TcWWsa7VaZle6f8//RjY9wgCwZi2RzrFKvzJmfu0UeDBOKUsFQPDrFItFHR8fx6lc5XI51uFwONT79+91e3sbm6x8Hfu6SMeLPmIvHNykpAa2wmWA5+K3g4LP2VbhgnQ+nJGG+Oh0OpEGhE706j9uixyUS9lN0FSTyOfzur6+DsdZytYW9mgq/UYG0vQBvku/+Z/vSYpIJK+5jDO3/j7P4Lnh/hz0me/551w3ehrhKnkolUqhH3DuqGF8c3MThAl9hnBze+FrxrEbr8HQehTGHUNAPjbq3yqnn0QUvtBST8b/zueXx5d5KA3jvapjoGsUUQoEU8/LG4OFp0UY0MGfpEg0d0PsII2BdgFypYAyRJF4n2ATvEIBi8dfc684zWt1wMx3Dw8P9fTp02DkEFhC+fP5oog2oQ1pWZLHhZrn4d6pcaZfw+Ewk+s6Ho8jbYKaaem8pTLgC28dmjshLqPuTTMexeLiLHPYYuYGVipdqIPBIABXWkaN+wGeUmCZngYjZUP1LHCfP8bfxxYlkD4vcu7Oim9UpBajKyXk2xWde9L00ZWg5yLyOZSVtPSe+axv6NvZ2VG1WlWz2dSbN28yO3ZXpU0gW/zPOHupLr7neeeMGZ9JWVJ3ytappevIGSOfH84cZ46YWw/NwYRyHRrf5yAO2JTd3d0HZfmur69Dz/i1/DMp4JOyesJZbz7jemg+n0fdWq4JYOVaMJCpbuO7Pu+u65wwoC9c2/Wrg0QA+e7urk5OTqLMX7fbDXnq9Xr68OFDpKXQT5o7wL4hFlviDBiha4/wubySM5jq8PSen7N53+h7SmKgG92B8s1sND+4hDQg5Cd1lLkXzq4k9Xo9XV1drZwXZ9DThiwjB45nHPw54UN/WbekKXjkeD6fh+5PwXG6WS9lRfntINB1MesBWXc9vr+/H7LbbDaDHOn1enr37p36/X70k5A/Y+SREPADtgyHbnt7O470Rq794CBSB1fZYH++x9rP3iTlr/lvF0rCpTzcYDDQ9fV1sKp+DS8f44oq3QU6Ho+1t7f3IAfPAZF7H55Y76G7VUqThlA4e4QyhBmtVCqZWn5MouceYoi5FqxFCjDcW3cgTEiT1u12A7gzudPpNDZCES5mc4nXG6NPaV5tOg/0s9PpBEsgKdjnlH3xBZfKAwtkHRpK0oEo8uIM+dbWlg4PD3VychLKcjKZ6OrqKgAmCxAQ5uwo9/C8aWeAOMmDxnvIlpRlQzxEnQJj6WE9VwcK/ty8z/Mwb2kqgXu2vo7S50CO+RyRhVS+fLxLpVLklzYaDY1Go9hlfnFxoXw+r6urK5VKJT179mzl+Pm6dCPvhqTdbkcNW+bCwTGbV1xGU722Ts0Ng/fTjT+OLAeetFqt+A45nMyTl8xBT0tLQ81JPdvb22q329EP7k2kyucGRwpdTl4rfeOe6Cuu56yP60D6hP5JmU/WhjswfI51nIIGri0tI0zoZ9cJztC7bnBy5v7+Po4fHQ6HOj4+Dib16upqZWqbpIw+Z27dkQQEuTPGZ9Ct4/E4NhatAqPrIserAF9K3GxtbalSqWg4HOrdu3eaz+eqVquZqj/MBzLiwC8tLo99Jk+feUydd8bada2vLycr/NpO7Pj10udjrflaTZ1ir4ebRr1SW5DikcdYeG++pvxAoclkUdWENUnK2Wg0UrPZjAgHsudES7p+/Ihf5pMxI10AgoyI2OHhoSqVim5vb6P/qxytVe2TADVtKVhFkPL5ZcmYdrutq6sr/Y//8T80m8305Zdf6osvvogFBuhxT8SBKdd0ZtENNJPDAyL85FCkeUkpw+lG3b36QqGQ8dzci3ZlhsC4V+Wsm1P4pAKk3j194ll9M5e08HBubm7UbrcjD4ZQL6wcmxrK5XIAVTaX4CCQB+ggyhcsY3dychJCzvPO54ui37ncIv2i1Wo9MJjpnKxLc+Y4ZVVoLnudTkdv377V999/r1wup6OjI+3t7UWoGNbRU0+QM8954nXGl9xR5Ie5Yc7pF2vClR/KJXXCXGExh7CXqxQ87AMbDV15uUykXrkrKwfU0nLTiStZNg3M54s8x9FopNevX2cYOK7R6XS0s7Oju7s73d7e6ujoKApJs/aYq16vF6kujJOUZXkHg4EODw/j2Z21hvVyRmuVLKwbE8Xf/pu/mY/hcKh/+Id/0P/6X/9L3333nf7Df/gPmfXphtNlz19zZ9aL0CPHPvcYKXcg0M8paJSWh6Q4IGRNOYGALvXNp/QXpwad67VUfV5Tg85rqdF3eSe6xvcgGaTl0dOXl5e6uroKYoLDYsrlckRFbm5udHp6GnLt0UQMeBppAdSTz+2AhjGD9EkBu8uF67jP2Vxu0/VEfyuViiqVikajkf7rf/2vur+/19/8zd9ENMTz5N2mIB8+n9ISFKP3eI2N2KvYzjSNyb+HHpeW+MbzSd0hZ224I0FfpYelGRkTB7Wud2noPsc47jhxfc/X92fwNTUajcKxSueGAyl6vV4m4uXy5Jv43EFOwSV4BCci3WeTbmz7ubr2Z22ScgWTXtyBQLlcVq1Wi9qbz58/lyR9++23kct0cHCQ6eQqJO0KZWtrS61WKwbejYt7PYS33KNn4lyRUocPRUjlAEmZHfTT6TRYW/eoJEXJGhdmAAcCl4bJUs/Exw5wfn9/r06no9vbWzWbzUwNw1wuFyVMABvFYjFqa2KI8FLYwcyYc083Cswri9tr+Y3H48hJe/Hihd6+fZtRjCk4Tf/+3M0BnPSQ+edvdvAfHh4GkAP4n56e6uDgIHMs43A4jP9TT91D9BgXT29BKTDWrlycPfV+p5uZJMXGFRQQirJeryufX+42HgwGMRa8Xq1WM3niNF/jGNMUKPAZFDZ9w3CzoWs4HKrdbqvT6USImfvhbLGxjB3RbKQhB9uNE7mr9NOVOkagVCpFeZp8fnl0qyQdHByoXq9rd3c3wm/+3OifdXKwfP6lpZOdGotKpaIvv/xSvV4vynYNh8PIOWPcXb5g1dO1604SYwRYANhy75QtcrbfUzb8/l6mjfeZD+Sz0WjEa/Rvb29PZ2dnGQcllUlv6K+UHUVWeA4cG0mxYQyQKilK/XmtUuSEU6Wm00XtbZwqL+tGqtR4PFatVgvmib7gQPb7fU2n06h7LSnjOPoadJDgDsE66N3HCAsHNaRRFYtF/fVf/3WMn9tPlzWcJGwuzUGUjwny5SkB0rLOaprOlJbJQzaJvKBvuKenA0pLsDsYDCJKM5/PY9+IH7fONeir57vSxxQgcr3ZbJHOyO57+uGRLQA0jhH7HVYRdJIiB/r9+/cxNwDVlMigeS6uA2MYVUnxzNyHDdsOrn+urv3Zu/hTb/XBhf4vSCIv6ujoSP/pP/2nTE5cs9nU27dvdXp6mgFrsCiSVKvV4r5O+Tu7SIMhKBYXR1CiHBg0lMZksjiOz5UNA4rBLRYXR4s6uwCAQdlRdB1Qw3ssrhQIuyFIWTzCxf1+P4Byv98P5gmhoj8svLu7u0wJlPF4HPmo5XI5vEbC0Oyg3t/fzygKT4RGqAHt9/f3MZ4IO+D1U8B0XYz8zwHSyOXBwYG2t7f16tUrPXnyJHbzA3RyuVyc9JXuunejjLLCwGJYpCU7g0OAYqahcHyTyN3dXZx7zvUBpfzt4I/6wDw3coRxI/XFwzP0jea786WHxc/9B3llhz5rD8PvQMVBNgcYVCqVSAPgtLOUbdre3o6SNKvYIpQyGy5//PHHOEGG59jb29Pp6an+8Ic/ZJ4ldY4dsH/u5kYgBdDoJU6Hq9frevHiRYSf2fhATh7PyXOnJWkAeYDYVUBYUlwXGfa0F4zmcDiMv6Vs+TRC/R554hnRbawN5imXy8VBLefn50EgrDJ2nufKPViv3G80GoUDBaBAB2J8ec1zS7mPR+fob7FY1PX1tabTqS4uLjLMM7aEmr3uGNI31sB8Pn9QqcCBJ5s4V723DgBVehhhpWEjORJ6Z2dHv/jFLwLse1pPeh0apeuYH5dZyCwHffP5XDc3NxHeBqA6I4ptdxstLTcZo1d2d3eDVHDnnCgGNa89KgVArVQqoX+JxLHOaA5I3fkYjUZqtVpBVnmkzkEpvx1boH/dkfN15XalUFhUimDtIduuW51hRb5ns+wBLp5eKS3Xv1eucdn4DvVhyQAAuWlJREFUFF745ElS6SLgh8nhf3KNVpUBAYDt7e2FccXDlBRHS5Ig7z8U7B4Oh5nJQUgwyBhWBOb+/j6Mpx/TxTP5AGIUCdWSNzcajTK7NzG+k8kkWCgXbN4DALJ4UFgYkE6no16vl/Hk+DvdzOVgmblAaAgv393dqd1uRw4qYX4W7Ww2i7JUHpJFoLyYdj6fD6BQLBZVLpd1e3ubSR1wVj0VsnVRlCmQeQyo+qKSFoqJhcr4cSgDQNUZT9JAXMlNp1NdXl4+mhLR7Xb1448/xiY45AIGkmvgTHE/D1tLS3DGvA2Hw2Ao/EhR6jvi8FE2iHQOB8+ubLx5uKjZbGYUPYoRpYficwfLx1tarNfhcBi53Y1GI44/dEcAHUCYk1N10mgEz1Cv1zPrjXXgefAp6Eh1wuduj60h17+Mu7QEfOmahKXBIKaMFhUo3IlI60nz3evra11eXj7YrOLyjdym/eh2u3FPTx/w77pzJmVPJwRs//TTT3r27JkKhULIt98rBdisa2wBelfKVqDgWqwlADd9oAF8kZXhcKhyuRw5qc7uOtNULpczBEU6bkQfeQYcC3coIQwc2H9MVj5XSzGDtHqTKg4G443thSxyG+gNuV1FDDh7fXd3pzdv3mT0K46Irx13Ilx20e3MBTWtXT9xHXSj58DC4m5tbYXtpcHQe8TCMcTd3Z263W4QWE7yue5jjft+Ak+P4D10IGPl9hsMxuYpGFTX3+gEyEPGDWzj92ANefQGsoQxdxn5lPz+rBxUR80eTnC0z2DRAd5HWHZ3d1Uul2OQeM9zHnnYZrOZyVPiAf36ruCoG3p9fR2KDi9+1aTwffro5x3zPF62iUXjgLjf70do3sOhKDEXIPKUKAvF52irgKn/+DgTNgL8IOxeY5PXHNSOx2NdX18H6PTxwwtCuTroKhaLOjw8jKPpYBS5vjsp6xJqkpQxAikDxbz44kHGqcU3m82iLiS5qJLCA/aKE8iOGz3m2ZkqFEa73Va73daHDx9CgXpekRszN8CrniPNmWI+cbiQW1ecsPQ4N7650eceBpli781mM2NU09OZeF4fJ9957LLGHOXzedXrdQ0GA7VaLR0eHj6YR8ZFUmYzlOujwWCgdrutw8PDUKxeCo7wlV/Tx5RnWreWOqfSQ6WehrqZ/1X5+Og+Z2i8nrMfiexEgLQsAcS6cDDIOKd9cr3gTjAsuYcn/Rn8OjzzeDzW5eVlxplwY4+MeR4rJAUGGCOP7qZvAA36COubOrbIOaFi+uepY5yChJ6B2eMkudSg8yxEFZBbd/yIZqV5ix+Lan6Olo6Zy2/qqNLvtNIJaXduWxgvn0cpmz/K/gy+VyqVdHBwoG63q9evX2dOeGIOXHZdRnHW00gLZBJz73o1xUMu741GI+N002dIhvl8rkajEXbcMQTOorOZkBdpvVjvA+OJ/iNlgdQ9ZJ7ohR+V7DbDUx8YJ49ycB+a6wFef/Lkid69exfz53jxY+2TIf5VzJMbfjeeHrrEywHI0bysFJPOILIgj4+PY+JQMggjE0CeKw3mKR0gH0x/LVX8/I3iRCEg9BhwhK7X62kwGGg2m+ns7CyAwHA4jPxASXE6TlrfL803dCPpOSssJO/TZDJRo9EIzxsGmE0irth47kKhEEcYzueLsCfFt+kbQozwcm92+RGKQh5cJlZ5zp+zpaDD5196mNDPqReeV0llhJR9ms+Xm/pwbhwoAuzZFe2GypPHc7lsSRKPSvi1/BncK0XBrWIomHv6ikyQSoKcE2an1qMz+gBTL9/izIHnR+Jx+6khrtB4Dtggj6JQBgXGwcuUcP3BYKC7uzudnZ3FekiPSAScSss8KK715MkT7e/v6/Ly8oGj7cBunZqzz96cYZeyGz89xzYNd6YMkaQHQJHrOsPkIArglxINGDIHTGnUypszjKuAjMsNjBr24ObmJhwwP56YSBE5nQ4QAKXcl/5DZPC5VXKLfMDc1mo1jcdjtdvtCF0748Ya514+V7PZLOTWHUwcN9fdPl+kurhOZ+7XpT023+6oSFlCBkfq/2/vzX4kzY777MjMWrOysvbqZTgtzgxBGbZhGLJ16UvD/7BhSLIlCwQkyyJpkbTI4XB6pme6p7uru7ZcKrO2XL6L+p6TzxuVvQgG2HXRByhUVS7ve95z4kT84hdx4vAeYDxiFn2lAQpNrkCs0JaXl0s+MawndXyz3NPmOYDubyZkeFb6bxmxjvb/7MFhTgG/MMcnJyclL9/gudFolKOKr6+vy6Zl5BW5Qd5MirlPsNRE2JBT1hHkhMkWnpkxJ60sYlbyEjth8s4EHykOu7u7tzZMznNIc3tniN/s6TzBY7IiojBz9jCpo2klwI+9DgsCn+t2u5W8Fa6B8ccYcjIKwl6v14sXgrGln0w+1ygD8f8LOayQBzwiKpO3uLgYl5eX5diwpaWl2NnZKcwFNURNz9sDQbFagWF05ilJvBwDHF7ncAR24z18+DBevXpVTo8h3Lu4uBg7Ozslz3Z5eTl6vV4BKChxntH9IbfViftepO8rbH/slh2RbBBRFBTeR3ES3oPxj6jmzVlB4P2iWK+vr0tO9ObmZiUPkGsSunFe1DzARPO68f8ZfNrTjpitS5fY4WhMH9noMD+J9Th8KEDLBTKL4TWbwz357U0IsBvU27P3zncPDw/L6TywBZPJzaayTqcTJycn8aMf/egWoHD9TztY9B85z8rX431XjH1e/7yWwR+6lTVJpIZ8OdelRUYdhoQpjKjWlGV+2fxTq82iDOj2TFiYrDCDxPt+tojbJ735WgYcvA9owR68evWqHABBzh0/yIBlAWIEBx0GCvuD7XK+na+zsLBQiIbhcFjW+NXVVUldY6MlqVR+bvIRsWMAKwgFAAvrhTldXV0tesZHqFrneuw/ZMt9sa2ImEUrmcu8kRM9ZYbYIXRAkR2jzLJynjy5xoeHh7fslVvWVyZ4rCcAo5lc8zXnXctO1sXFRXGIPDboQUc30Pej0c2R2XyWChFEhIynLBvo+kajUcZhOBwW5xMM5Ejf2dlZhfTjGYiiIYPWy8xPt9uNvb298hxOmeH5s7P9PqTWOwGqJygvBL+Wmb/JZFLJvfGiAokbONgLj4iy4QPmjo0YpoZhXED/JFG7zxEz5UCBYIApg5Z39nrTFc9J0jSKpF6/2ZHMpjCABoxpv98vC2lhYaEwlgidDbjDxdwLwUYZTqfT6Ha7Bdy6LATfoTQVu6AB2yjp6XRajA4nxfT7/UoJH4MchP7k5CS63W55FgAcY+M5uQuKMqLaDwOnvChg9bLn6JyoiCj5m1YuZjHNAoxGo9jb24tarRaHh4dlfFAaMO/MHeOenUH6ZwfOqSk0p58wbzw3CgkZiZjlD1nBvHr1qrJR0GyOPWhACoCIZ7DCBFDUarXiPV9fX8fR0VHFScPgLy0tRa/Xi/F4HK9fv4719fVot9tlYxrPbD3g4xLNjDl8Dbs1mUxKMXrG866yphGzPuW1NI/lAWyNx+PodDrRarWKPsxsPPoVZxtw6vXr8CJ5vzg1NtjWr9lhy4SD+8y98vizLpHh7PQiq5ZtIlV5nNDV9NOgmn6gj6lfilx44yI2ALmGkIi4sU8Yd0fUxuNx7OzsFBtDH3Be7TjZIWCsGo1G9Hq92NzcLOksdkjYj+F5u0sybJmzfouY9deb0siF5zukSliGAOq2/wZx1osRs/nv9XrFKZ1HoKCHuFeOWHBtzxHA1QCWZ7POznKL7aSKiSMWfNa4AyCMfK+trZWi+07JMahnTEkdiYgS/SX6eXV1VZhclyZkXnJta3AOLD7yhuwyl4uLi3FyclLZAM+YgHecNvC+OOGdIX6zZFlpGLhGRPEQGCR/hpaF1p8ByQOa7GFiGLMAjcfjaLfbcXZ2VmFj6/V6Ke3B/7BGTjtwoq/vhbdgReM8w4uLi1hfXy8FlBG0TqdTKWbLpAB4UC4YCo8f44S3sri4GJ999lnx0Cmq22q1yiYrdpEi/LTj4+O4f/9+JR+VhcrufjPI2fGw0lhdXY0HDx7E6upqOUfdn7eBuisANWI+WJ5nFMhRY74MtAzMUAg5sR35ZZczO40JpyCDeKx+j8oQDsdiiJ1zZkUQMTu5yewl/eJ7GFsUObKAk8HnKKWF7NqJQ64BrhigxcXFUlYOw05dU9goQACyt7W1Vb7T6XTi8ePHpbwUm6VgkAyo+R+mC6bQJ9I4eoKC9xyzCxxHw9fks3fJ0M9jJ/0bhhOZJN3ExdwxJrA2joy4Zi3r3WWWmFMqQWC8vBHFugtQYOPk1CqDKesW1hi6G0KCOUV2MNSZicGYIpd8Fxkw+9NqtWJnZ6fCRma2luewbFxfX5eC4xw/fXJyEt98800cHx8XR/Dk5CTW19ej0+nEgwcPKgztxcVFhfQAOOMk+JAD5Nrkw+rqalk/gOS7wpzSPK8R1QgFc2823ASAGe/V1dVKCoYZTYeRrZe9yZMxYXNzvV4vaU30K5MwXAM5Qt7pZ262D3aKsSHj8bikjTEOflanBeAsItu12qwc5uLiYuzt7RVHiusZCznqwBpgXHDSiYCSIkUKRK1WKxEsogpgMdYzDv7KykplwxbrDF3hU9Ys+0tLS9Futyt9mkd4zmtvBahWBPMMuxuDCoo3i8H37UH7uva4zs/PS1gjh4j8UEzycDgsg+dJc/F0sz7enRYRFW8NBc9GI9PVTBr/E55ghzdhBVhOGE76j6JHQQPkWZCAD/rSbrfLCRu0zz77rACCo6OjePr0aallZo+LHaHseGYxAF5QwjyPFYjZCyv9drsdW1tbZZMCLTsfd62ZZbYX7PcjqjLC7lyMNsqQzWmLi4vlBB+UBWkdLESYp8yAEhqnOgXzZoXp/Dz6zvcw2pZtn/RjZmJlZSXW19dLKJR70U5PT8vOZuQ9lxPzZsOFhYVSaJucVSpHRMzq+ZK+giHive3t7aLUkEPWMYCY9AKvCeQSoML4AjyI1Bg8Me4G2M+fP78FSOaFoT90y86if1unYKyd0uDIFGu70WhUokR2wOx4IZ84vKR+RNwUWOdYRBtXbALzyT0xtnke0ME4XAYaAAT0OHWpNzc3y7rEcHItxgZmCj1GuH88HhdgCvuEQc8srp1Jh6Ht+A2Hw7KekHO+T+oOoVTr3lqtVqJbsNyOmhgs0w+edTy+qQry/fffl7XwpjSVD9nmOVMmLbAlBnOuZGMn0fnBjIHZN3+HdYDuQcfs7e3FZDK5dToaemJzczPW19cj4nakiuu7nBryHzHL77b9jJiB0IWFhdjc3CyRVEcu7NhBBrAWIDAibo4p3dzcLJEf+sb1GUPLMq8zVuhudABViZwGRtUjR9h4/+rqquwRMGFjO8WYttvtch1eZ70fHBxEu92Ow8PDMhcehze1d+7it3CZ4s6vWZFSUxPDhzKx4fAkIwi9Xi/W19dLPhSDxcRjmDBwMFUc02klR44f32Ow7M0gXNlIUe5nOBxWNg5FRNmIwW5jCpJHRAG3LBKYGhYbDDG0e7vdrpzuQ66ow0p83mEKjAcgHNCEQNbr9VLCyIvHTDWLmFzhWq1Wjjx0uC5vUHCtOQNSDMNdMvQ25hFVB4dnYjGhBKh9eHp6WoC+gShjR+4x14JlwVFjHiNmTtri4mLs7u5Gq9WKo6Ojcq/19fWS18zYc9+IWZgGw0UdUTse3AcHiNIq3W63GHkbaOpoonwpAo2CBAAiT7BH6+vrhYFC6VpeHc5i3MyOzCs3kgE6oX/YJX5PJpMyJ5YzgKwjHAAtXm+32/Ho0aN4+fJlxWDR3kdZ/rHaPBIgyzBzBKCzHEbMmG+cV+oostHGOhmjy/UAEtyz1WrF1tZWnJ6exsuXLyMiKvK9sLAQW1tb5SSwWq1W9PE8eSYfDlbec+Y+I+f9fj/u3btX+uTDGNh8Qr8xyESI9vf3i2OEUWU9AhBtUAHn6FmiHozv5eVlHBwclFP9cPTN1JNChTwCaGFvHfKMmJ3jboaZ2tXM/dbWVvzbf/tv4+XLl3FyclLJw7R83KWWwSqvIb+sUZxrO9/MhW0M84susc3BzmPXut1ukZHT09OyX4O5brVasb+/X2xeRHWzJD/r6+ulvxBSONEmhXCOfKgLh44QJcvEjplPsAFs5ebmZsERBp12UKwL0L2Mi99D3lknOG58njKE7XY7jo6OKpFVz5O/w314JrPbTr3k5/PPP49OpxPff/99GS/W1NvaWwFqZsQQhnmLAQAFA+rCrDCOKFKugYLgb5hLe8I2ur4XYWqHociVIk9teXk51tbWKswBoKTf7xfaH6FHUVjZo4TIi6N+I8aSe/JsBnx4D2bUVldXY2NjowDfZrMZ0+m0Ego162dPhsnnmfBGj4+PyzPi8cHkXVxcFCAfMQsHMAeE+nLII6J6IsvCwkJh4wAuXM90/l3w5GmWNTcD1+n05izywWAQrVYr1tfXo9lsVg5MwFFoNptlvrJM2stlfJ37hmHqdDrFOAPuyKckjLe5uVlJJ3DoD/k6OzuLbrdbSSmBjXDodjQalc8SnqTxWQplM3cY2Hq9XpgngCksvz1wfgz8zZqhyNg02Ol0ot1ux+bmZgHlsP2sa+QOtsOgOSJu5U/SLxQjfeT30tJSPHr0KH7zm9+80WDelZbZpyzHjAVjx1yQ/45Ta8YS59f6zdeyToaVgeWEEUQu7JBMp9PY3t6O3d3dWFtbq6Sq+G/kYTKZxMOHD4tT5A174/G4HDSAbLJb/vLyMtrtdmxsbJTnQq4dAUIP7u3tFUBoYJLHkmcwS+dIIXaMCMrp6Wklb4/vRcwYJjYaQiCsra2VcC39ZIe51zfpRb521qkQOGbwLM8fsmWiipb7ZicBXYY9A2xhkwGwzos0EcP/trGMz/HxcUTc2EQ2TxEl4qCbzJqipyOqEWTC5ERrnPZChRGOc/azRURlXdB3Xx/dQ2Ui58o74mNnjz7Rd66Xo6CQCNgeNpt50xknE25tbZU0TfSFIzI8a7vdrugoz73zWFnDyPbGxkYsLi6WNMf3wQvvVQeVZqVuJcegw27QFhcXS4It36NTmYVjMgC6vjelDAyEuA6L2QO9ublZlNna2loF7BnwUhwdT8s7mMkBMUPl04Q8HgA2h5xQrozD1tZWbG9vFyVkWh4jbKYWz8bhnMFgECcnJ4UJoy8cD4jgYXCWl5dLfUlyAhlr5gOjDxgArCJsCPGPfvSjePToUTx+/LgCzHgGK4y70OY5UTb6/I8xb7VaJV0iIkqNUE7mgpGcd90cHrLB90JeWVkpMgfDQmrI0tJSfPrpp+VIToc6DZ4yK/jgwYOKAqFGJeF7h27JiXLYEWPMfRxecljU4Xo/t/NivSmPPjkflN2kVqzOw6UfKFQzcefn51Gv1ytMLXMIK2xgkRsOpTfP5MjKXWGhPD/WnXbmeQ4MOJVONjc3C9jDOZpMJmXtZ8aHxv9EZsxoITvoOY9hq9UqMmt553rOEXU+3urqatn4YkMNwXB4eFhYKnQU72E47QChr1qtVmxvb5fIFHot4naUx047xt/9xsbhGA6Hw3J/qgVQ6WNxcTEGg0G02+3Cst27d6+MOXKNE8k4XV9flyOKWcOWU+sW7NnKykqJvtwVmXUzPshkQN53QW461VMibtfqRQb9Pe8DwAk4PT2tbPaFGWQsI6IcQgNQtfyZeTR7a7AH4EKf1+v1Sv/Z7OncbVJBuA8OGM8BeIPNxSaZocRhdF8ZK54HgG6chaPa6/UqeIm1RwkrgCnRZ2/QwiFlPYERImZ2n7niefjbDh+11C0T7yIG3nsXPxedR8t6Aj1wDt/wYPZ+GFQ2fNBQZlYSDAZ/w3DBgMEcfvbZZ9FqtQp7igfhXCLv4icfKIeWKGkD04Whx7O1keMZ7UV4YjkLPG9aYXIQRnvYGOOIG8OPsl5cXKycpb6yshIPHjyIJ0+exHR6c7QbG5oILWHcubd34zFnh4eHJaSAACKUhCu2t7cr3hlzP09mPnSz4bITlBVnRFQ22ADmzY7weZwds43khWIIs3KNmNXvY9EzH04wJ9cIZpD+0HdYbOQUhcWmGK4DoN7Z2SmnPhEyZz1yPTtDsAbtdrsAU1/bTiHssBUV18yA2g4nIf6lpaVynCzsNddybm2tVouDg4OYTCZx//79ChsbMTv6kHQADJPnNhtIlLwBmp3Au9DoXzbw/o0usF7Nz81zIWcOj3pDhAEa142onlAFY4tOM/vCPZgzDKYdeeQnM3/Ow8TxIjR6enoar1+/LmAwYnZ+uDcQ4pzfu3evnGdvYGcbYvn089rhsfG1M8C4Yq8o8Xd9fV3k2Gk9PCP5qM63JkUGULa2tlacVr7PvWB/iWLNcxbvElA1o+0G0KTaBDoTOQMowXTyHYN3R4hgstED6CTrKsAe486Y855TkTIT7rSRec6vHTzIIvbCHB4eljSsq6urSh11rsUR23wf/cS8ex9LRNxKt+O5GG/0p1l/Isk4rGAgSBCwBWNJRNUbfdEVpBbSv3n61jbD8479WltbKyDeDOyb2nsX6je9jODZy8AAM8gWGBSWk4RRrCxKGzrCMhgfA1iUDa8hGLu7u7G3t1fZqMHEwOSYKUIhGgCgyPEwIiJOTk5K/gmT4rqs9hBQgrCVm5ubhT21wsRoOz83M1iefP5GmVE7Dg9wc3OzpFGwmxTwfXp6Gp9++mlht1EQAF36w+YrFpFzTLLXZmP3JkN6F9o8oxRxe2OBw9WAf1diQIHmjRiWHRRFxMyr9AYADJR3SvNz//79AtzygmcOHJq1c4GysCKPuAGaq6ursbm5Gaenp3F8fFw5zIK+ssZWV1fj/v370W63KztKveYNMhx6d3J9RPUUEgMeHDQABX3lujBDKM9Xr17FaDSK/f39svaQUc/pyclJyf0mTShvrIDp2Nvbi2+++aYC7O4ag+p+OOQZUSUNmAODKDshETPng/XKdwGvAL5crSQiyqlx6Bb+Nohz+SaAKv3m9eyw+DVkxP8jC4ToX7x4UdlUZRYU2b13714lJ9ayggw6cpJtQQb3ABtST7AVTimDvR6Px+UoSxwsarWSMnR5eVmiETwr4GE0ujl4hRQw5sBMJP1aX1+vVGqwnHzoNo/tt6NufQB4azabZe2hDw1OHU2wLUJu/RnvP0Fe0KkGQnY8kAHLtPNZI2Y4x/YCvW4n34CXSgSkrICNuB85/cgS0VJYeWOsPIY5GoEu8DNYL0RE7OzsxMHBQRmziNlRuvX6TQSZNW4c5/0v6Bhk3pv9bFOZkzyGGxsbsb29HQcHB5X5fVt7J0D14FiRm9blcwsLCyUvkwfE0HugLRBMGIaSyWYgyTkDxTMYOY8DQOmTPOhXZiSyZ+8kdxTPaDSKzc3NopAuLi5KfVP6y3cQTDwMwKkLwFvp0Dcny9Mn5yCZ/VhbWyusLh6ZwxMYkfH4pgRPt9stJXxYrDQE014QZSx438o8IgqrjCfoBZcV6V1oAO2I27tLI2YM4mg0in6/XwrW+z2UJXNGGSUDU66PHPv6eKaME4CAz2IECZ9eXV2V/B4UIH+blTWTyprISoI8IFj09fX1ODo6KsftRszmf2NjI/b394uRRR5zFCMzXR5PO2p5rFCgMEfknCG7m5ub5YhI+tftdmNxcTF+8pOflBArbFRElCoFzO3V1VWcnp7G/v5+Ja2AvvE/edTeGJQZ7w/dMpCKuF1SyE4VY+DNJZkRzjJrJt3VJuxUIZ+MTzb0m5ubMZ1Oy8Yr+mOgYnBh4+XcNPfV+jkiiky+fPmywq5jQ9jpbJBru8L4eA35nsi67Roy4VJrRJPa7XYJr/O5VqtV1tnFxUU0m83K2eYABebH6wdgQ8F/5t22yg4ZtVHRXXfJufL8ulmGa7VapS6n85QjZrrNKXPzQtueM4e2Ca/b6TADyv92dsyYRsyPqJiAM/FhFtDOB7Z/MBjE6elpwS+ME06JnUDvrsfuM24Z/Ofnos/+G2BNVKLdbsfx8XFZA173k8mkpFr1+/24f/9+Jf0soqor0b22/7yHzXNVDAjL7Fy9S27fGeK38HvheMBqtVpFkTHJGHznAGVgZ1bIxtDgkXO9HUKaTqeVAeac8H6/H3t7e2UHpAfQbARKyhNshcakuiYbOVDdbrd4Pggm4R48XJKMyXXhvjm3xmAaIXDfzLQ6RIGnBYhHwGFQrq+vK4cDeK5QkowPgkzOJcoP5qXX68VoNIqdnZ3Y2NiIly9fVoC2Be2uGHp74VYiZgJR/BsbG/HjH/+4HEiQ5yxithbIP7OX7WtmtsggzYCe97e3twtYYi4AYyhzKkAYBGSl79wo5JI54jhciub3er1yTzb/eTzM3qN47E07j5HPmFFl/bBZgb6htFF6jCebo1CaEVFkkWd1Hiz3gIldWVmJZrNZnFM+g6PrdQ1A7XQ6t3TZXQrzG5xmPRwxq/FYq9ViZ2cnTk9P4+zsrBIejZhVdnC6SsQsqsB71u05XOrvIQ8RUUmtQNYsh1yL7/GbeTQQQG69hph3dlJ3Op3CvKH/ODEPUJqZf+yF118O8TMGBs/uO9dfWro5MfDFixdlbNnkOJ3ebBY7Ojoqp/F4PPjBqTRJAOOfSQyTJoDae/fuxe7ubjx//vzO6FpaJgFotnfk29brN3XKB4NB5fhsR0rMUhqwAoIcfjaIRTeZCEMm8yZi1gVjj9xH3K5SA3EFIMbW0j/rE64LFuj3+xWnho2HEVE23jLPOER2rBk/kxaME86Po1Yee8Zld3c3Tk9PY2FhoUTJHBUER1DdxcSZbYE3cXkN2wHLToo/b738/wRQ3VDyVmRZAE9OTuLFixfxxRdfxHQ6jeFwGIPBIDY3NyvgiAe2YrP3ggCYZkYA7E1yPZQSNcfa7XZlIGAQLeQ8D0aOSY6YAWcGFdRPDbHt7e1KqYaIWfjSnriBoEOhWQlyHxaBxxRhxAjbgAyHwxK69/fImWXHJyWR2u12xXhZCTKm7FKFPQZATKfTODw8LHmonvvssNyFlg28GfWIaurK9fV1/MVf/EX8u3/372JjYyN6vV7JWbZTYSUUEUWxkgifQQTzFFE9lYT+4GGfnZ3F+vp6qcmHojQrCphwnwAWBt/ku8JgOlePtA5YKRpnM89zOOxQ+V6859/O7zRDzVqs1+uxt7cXx8fHJT0HBgX5x3CjxO2gumUZ5H4OCTqMV6/fbCDa39+vePFmffxsH7JlBtJ/028M7fHxcfzDP/xD/PSnP41GoxGnp6flyE0YqvG4eoY2xt+OOoa8Xq8XR4Lv0Cfuj6E5OTkp9XDRj/M2YLn/juZ4PeWUANsbjKbTxMwi0m/6wdybiY2YRebMPuc0j2zcsRfkOlLHNNfgvby8jK2trXj9+nWsrq6Wnd08DyDZDgSABzuBjsFm2GaaWbOuoc93AazaLme5NUCJuCk1hq22o0TUletF3NbZdnIypmAjnwkEPoPdjYiKw4usMP5gBcsw4JZn9FG5yHQmKLDR3qxI/9nxnwFyxOyYdTPD3vdgR4uUMUdjGWvGHpvAWjXQZoyQSyLZEAQ8hwE84w9+ojllwvPCOPJMPGdeb/Pav6gOqpU4g2Slsre3F5988kkZKBg9Jt7XRMkwoAyYBcMoG4Hw7lXfv1arRbfbjd3d3SJ4Zh64DkoUZWXv2sLIawZ0MI0oqpyrwj3NIJgV5n3GCy/JYSYnxZutmE5vjjvb2Nio5OOyScELjHCpc1X7/X7U6ze5tWz8AlTbo4dBZdGaGYEtsQwYkN0lI29gR//9LMjEwsJC7O/vx+effx4RUfIrDw4OYmNjo8J6cD0W8uLiYmGzh8NhSbvwOJhRdMgcGcHQwbxjkPHks/xZTr3YcxTCDhHrxVGCiBn4ZM0BiPN64HUUEOuI97JCdH9YM4zB7u5uSTcgt48fjAOHbywtLcXR0VHx2OmnN4lkxjRilu5j446OYYe0dVDWZR+6GXS8qU+AteXl5fjJT35SyX17/vx52YDhdBDSSCJu5Jz60TZOXDvidgH7HJYfjUaF5c79NwOW10B2fkxO5DFAdmHKAYe2Hc5nzg6ymVmeyZEP63kDWtYDEY3JZFJKeLVarej3+5WTCh2+pFLH+fl5eSbLp0PBjDd517BnZnm9tnAg2BicCYIP2TIj9ia7enl5GU+fPi32jAoInU6nfA6CKjPs6EXA7HQ6Lbv1I6LsaYEZtM41IYRzbP2Zm50py0xElO9mgsDPDLhkzWWG1nrMeMH9RW58XdtgEx/oOUgJ1iBzsby8XFhUE3LknnKd0WhUKtbQZ28Mj5hFD7AF2SFB57AmsB+ffvppfP311zEcDgv59rb2Lyoz5YmcRyXjSXIWPAYWgwygw1BaEdrwkAAN8mcXPoORFVm9fnOUWbPZLKUMbBgRDjx4C5K9Mi92CwO/DRgRGoAAz8+PWQJ/lmuZwscDiogS7mRsMkBpNpuxsrJSdtwZiDlUxNhQP/Dy8jIODw9jb2+vkoRNY6MELBynoNRqtUpiNQB63phlD/JDtiy7yGNe3BFRqctGaJr0iHkhJuSN8Y6IiqMQMVNuyK1Dd8yVy3aw0S2imoZg+XVNWkC2WRj3FVk1EKPZgYqohm3z2PlzOHQ8L8/JEXi8h4J0rp/DpO12uzD1lAMCOJ6ensZgMCgMx8rKShwcHMSf/Mmf3IqaeKzQKewCprYsz8MPgAy9Mk9W7kLjeSKqsou+wFm3PsSJJJ/cBt5OKAaH46QN8vJmJINEO3mwSUSrLNeAOp6D33YM6VPEbN0BetElfu6IKBvgYHkMXhkv9D5j5c0nEVEhLyxDrHuzd4AQHFHWie3L+fl5Ac+TyaQARzZG4YCiTxkn60o7ht7U4/lzRAW2kWtkouBDNdsBvxYxC98DEDc3N8sBCsgmaUjMW54jdKUZTP52RIn7OYrqcWQjqO0198hpMFkX8kz+nhnQeTjAlR1MPOXr8xnLiPeJsD5zVNa6DAxm2+Tx4DRINpdzOha4AdKK6JVJDDsLRNx8MpjZapf3sjxzYIzrF7+tvVcOqg2DlQedsqAwiOPxuOSEAabsQRgE8joTYmPJILrMkj1Mf5Z8JVhSlBn9sxBFRPEiDB4cckRhwmxl5cKk2/tAyNwvJgjAbYCew6MGff5BMNmtPBwOK2wAebCNxk2h6IODg/jTP/3TAjAJH5+cnBTjb4aDeZpOp2Vn6YsXL6Ldbker1SrCZk/K4DQD/g/dvGgY08y4R9zMFflBfA+HCDm3jCBLhFHJSfUJR5ZrvGfkFWfGCpLyUpZN+m5nA/mnn8gSDO+8xY6Rt3wyHih/h1qRbee8uj8RM9bSTCT19DJzBvDgvuTxnZycFKYUwNFsNgt4PTk5KTnd9Xo9nj17Fj/+8Y/LOnaoDjCPQRoMBsXY80zILKDBrNRdkluanaEsu/PmjYaO84lddngy4KPlOUJHeb3gTNCHZrNZSq9Z9gC8GDbbCsAVzBLz4+d0yR10UUQUEGMAauBr3ZodZYf1eU6ac+3yBkDLODIFC8WhKURRptNpOTmIk/x6vV5hmT32Zt18/7wWbVPG43GpKsMzZELlQzbr0Hnvsf4ARNh7E1SWN2wksskeEDZVMj44x43GbH8GGICxsdN8//79stkXObOTxNgbLNLsNAIOzWSbscRxIaLjOQfYIVfIJ04Y+pnncmqLG8QSY0E0xJiG+7g8Gwwya5w5MeiGReXYaXAMz9LpdGJ3d7civziyPv3PMkGKCvP2Lr37Vvj6JvBhQXQHOp1OAWHelQYF7AnhIREEUDcK6OzsrAhh3g3MpNFHJsE7e3mP9xkwC58VHIJqNsKCaUPP/7nl8cgMBs/sMQAEOxcMRc04WshgUDFCeYFTXihittGExU5tMyoRRMzybxhbvP1Wq1XOwsYwPHjwILa2tm49o0HZuzyiP1YziPZr9oIjZguInOKIm2dg4wPXMVNh0GNmis/ZkzZ4JDkdGeb+sIeZWeEeyAD3QzZxMiJmR0+aLbIypW9cB8XqvFHAuZ0vFDfyaEVI454osQwSuDay2mw2Y2Njo6w35IvnmE5vCks3GrM8x4WFhTg4OKjUO80lZPx6p9OJo6Ojkk81Gs1K1XE/j8VdYf5prC3rFOuNWq1WOU3L0SLn3eZrGPzMux9jGDELWeLAWk+MRqNy4hnjiGPj6Jjnx+QG8sVv3s951V476CZqPDsX07LJWgBQ80xeH9atXhuZKWasvQY2NjZib2+vRA8A0Abq5O5fXl7G8fFxuQbzwDix+x+H0OyzdQSt1WrFw4cPKzaVPn7oZp2b8YL7l3PosxNu2eFzOEikUTktzlFDvuf3beMAjMy99Q/3Qh94zjPh5DA6Op0qJbCX/L++vl4YY06jIl0FcoLvZGfbWMl2yHYtrzHbBZMcEVGiVVSiQNaxKUSgOJiCE7foH7aL1zmdijWP80/JNB94xFp39ZV3OVbvVWYqK0Zf2Mzi1dVVqf3GQp9MJhU0jTfAdwFllE4ygh8OhwWgOsmf3cFmgabTaQlZGTTwHSc/e1Gbxga4evNGFhJfzx4uwJrnNNvKgsoKj7F0SQkMuZkyvGyE1onTps/tSdVqN0dL7u/vl5QBBIuzyFH29kTr9XocHR1Fq9Uq42dAj0L1eGQH5i60ecKfARP9JdHcrAmeqJ/VTg15TxTGN8Cz8cdLZGEbEPI58k8thwZdzA3vM9dZWWWZZL2Zrc9slw22d+ZbWTqNwLI5Tw+4oWi93iNmSpLQ6cbGRtEVsCFOYej3+7G/vx/9fj9evHgRn3/+eaXYdgYoMBdEGVh7KysrsbW1FQ8ePIinT59WQOA80Pah2pvIgIgZC2kH2swNzz8YDCpVEPgsB5y4JqcZWjs3sFd8P0etiKzwmhkns5z0C52NfuV+ZtnpL5tjAW+2IzSYKKJy6H7LP6DZtgJ5BoDTR280oeV+0ed2ux1ff/11XF5eVtg67nlxcVF0ZbPZLPOBI8oeAMu5bUm32y1OAIAIuSZKyLy8j6H/YzXLj52miCjOg4kY9I1D8G7OO42IEqnJbKfxhnWoHQ8iXY6G5XWFfs8OHXKawbdl19gCWbQDDPhjng3Sse843TybU6esu9HDEAqMCfouoppb6jnY2dmJr776qpAwJlN8fLLTExgLj68j1ehi56uSB0x/Go1GAefvq2vfCVDN+JlBnMcura2tlbwSKGPYOtPHhPT4MbvHAxHGrtfrpW4i7+FlWpnwHedEMGBmRcuDSwll42pli+dlhep8MMbI4c3MVvEZK24LF9czGGIM7blF3GwAYZLPzs7KvQAU4/G4HGPGme94dShFFDTUPX2YTqeVE7hYNF4UeWOZWRHG8q42L1T6zjywaJAzTtjwnLAIr66uSq6aSxvZyUEhmeFcWFgop59ZEeONIh8Oa43H41vg1Iq4VpuFst0HG+vsOFhp21nzxoR5jKKdRztGWRGztgzWvcYajUZ5Zq9jroPhHgwGsbW1VWr6Pnr0KDqdTnzzzTfx6aefRsSs6L9zuhlvAA5A7E0OomXiLjQ7PJld8lhfXFzE+fl55ZQi1rFBU0SUouELCzcFs3GcstMeMUu5sow5/QR9xZgbIOGU0BeHP2k5P9UgMKJq3NH54/G4yCcGFAPokCnvcx2+FxEV++P/MwvPe9bdHh/LK6W9BoNBAR+sc5gxdCmVACi1BjDx9ekLRdx9Mo/DwDyf2akP3bLzS7M8R0TZA8EYI1/IHWPL97yhNztK1jGMByFmk1oR1TJXyL37wGesJyKqdVMNSJ0igP42IPP37UAZ84Bn8j1pyA/XzvYEveqonfEW4+7nhLn1kav0k0gTGAGSxoAf3UmhfhM3btiULBd5w9Xb2nuVmbKnTrPHwc388BcXFyW04UVow4dQ+v9c2uT6+roCcs3mIAh42y715GvkfruZqbTRQsEzuLmfZqTM4Phe9tQAoOTIsDjq9Vm41GAasFir1Yph8YLjyDA/h9kMp0t4QfF9e2s8B9/j9BOPHbmD29vb5ftmd+4ag5q9Y7/ufsKe0pBr5v78/Lz8z/cjohwritfpe+FckIIB8OWHcWejFZszMitijz4iKvOYFU9Edde62QUrMSvNiBnzbyPJ98wKO2zksbRx93UY59FoVPIhDbDdx/Pz8+Kgsh4IFwGGtra2YmFhIfb29uL3v/99dDqd2NnZiWazWVJavIGPZ+j3+0XWWXPtdrvCnjJvd8HIR1SPh30TgG40GtHv96PT6cTe3t4tw+qNFJ5rxsHg1ddHNzl/L6K6EcSgkTxsZNE7g+c5AfQD45ejQbYRduonk9nmD/rKnDmqxJrBmcZhyiDD42L2DjuSASlACdsznU5ja2srzs7Oyuf5PteiFNhwOCw6vtvtRqMxqwnMGBmwAWhx1DDoPO/y8nLs7++XeqxZNj5kM3tJ89/Ixby5yHbWZI4d+Hq9XthB9JPtGOQL8hMRlXQ4+mlbGzHLG82sqp8BOXIDZM5jf2u1Wd67D3RwCmQGyTTec0SWe3sMkRnvYwG8+wRDPre0tFRS9c7OzirjALG1s7NzC4sxRrYnxnWUpUR2/UzIBNFJatjOI0Lc3jumZcNDh7Nnh/eCUqCEBIuRzq+urlYmy4PAQzkcQAkJC0YeIDzT3GcEOy8cjJlDYGY9+fFONO7hPEJyUayMUaL2yhFCK3MLPf/bGzJbYGYsK+OImce/srJSAK09bPpIXhbPwg9zROkUJ1QbgLMxwkLLvLk/d6GZ4ffit9EjR8ZsdsQsCRw2ivwnFrxlxsAPQ0ohZp/25LHBKFNwHTmzTNmxQ/aRT68FdqnOY9ksk55Lgw3+d24Y/TB7xnrLihGlbtBpRUrqDUCB59/d3S2ljwD6EVE+D3htt9uxsbFR5uDhw4cRMcslzfNqsMQ6h0nd29urHOXKmGUW8UM2s0Q8TyYC2BxxdXVVnCg2oJycnNxymiJmNRfRz1RLodnByf3xZ7g3a8NMtfVKdgw9L9aXll2ajTNODtexbJlg8Bq3zvY+AD8fRdS5DusbXYdcZAd/Op2W41UB8xAajDdgyCFPIlZe79atHifWw4sXL+Lw8LA8F+uYKJfn7UM321LLrO3EeDw7TS87hdPptMK4Z4aUeziyBwjMetEkA3p9c3PzVqnEiBmhZX2QwatbHmszlbb51otra2tF3lwhJs+902CsX7OjavaX12wj6IvJRfTzwsJCbG9vVzau21ZadzD+3CfrBj5nzGICxeNVq93kzXPK5vvo27cCVF/AhoXfFkDAF0adh2u1WhVDmR+QCQAsRFSPMbu8vCxHdmLkELrMPGZG1gAvD5iNpQ2082O8ozMzr15E88IaKFiECSMCMAEEuF8u6OvNKvb6eA2DEDHbFMHOcCt8ap6en5+X/DMbHBtyxp5nHgwGxelgbij0bmPpft0FRUnLC2Re/5w3Z3bk6uoqNjc3K8nwzgO2coUtYRwmk5vNDxhVrplDixERGxsbBfTSHzOVyFiWRTNmVspmKL1+2RCVHZqIWUiV52OsGBMXY86OHukO/Hh8DYD9N0CTzU9ck9p4NCIOpAIQFnXNWCtG5oI1DOPqKgmkuLD2HPm4K7Jr4+11RrOBAnR6AwNldLLc2alHB9ZqtQpTjQyhJ+yE0XCE2YCJzCLXvhYOsSsq+ProuYjqUdq+J/10mNPgG2M+L9KXQQsAm3H02uZ6XufZ0TOjxzjDrLLLnHtdXFzExsZGRET0+/2IiMIc5bmu1+uVEmjsdubAFJcTc399nQ/dvA7za+gTasnm9BB0JcCN9Q476rmt1WplHwopaY4eZB1KI6SODeYzBtQmqhx6j6huOPJceK3SwDXLy8vlEBZYXRMRHifjgkzAzWNu6XNmbpFTYx/LC3nRnLbp57duQe+6oovTGx05s6PJjze0I7/Ml2uwv6291y5+/+0fh2QwUPZWDd4svPzNJJiFISRiLxJl5jxLNlogjGYjLWwII15B/uF9T3oWmpyrF1HdPGWwiuAzSVQicA4uniGfy4AkjxPjaOFGofG8WUBo7HZtNpulFNLp6WkJB0REhbVjTq6vr0vhZMYIwAoQNnie53x8yJZD3/zOrFK9Xi9nELtwPOPmfKjMxPNZGFaMJD8cyxgRlbp+Nrwu7o/y9C7/NzFL9N05bswBnvI8gG7lYmbfMmeDbKNuhR4R5bm9e5VnA3gY4Bi4mFHwhhXnPgK+kEf6A5OPAcMRM/imWDo6xLnTgCs3A5wP3ezsee49j4A/6y/mzswIehlDgwwyFshgxGxDHw6rWXyMHuuBcF5eYxHVnE7m3ywVsm47wPe4H3oeeeaZsoOFUc+OBp9zTq6ZV8s5Y0XLObGsmXmEDPVOHdLnmS8uLmIwGMTx8XElr5bG/MHeek4ANzi69HNpaSk2Njbmnuh3V1omLwxurFOQc3JM0W3ICONioOZrepMfDDZywN/e0Im9jLhddsxrH9ngNacOgjeMc7w+rL/525uXkX07jfy2Y8R4OSrpXE8TJf6e10gGkBlvsdmOeeF5aBAjtiNcI2KmL7gea5eogvEgr6OPuP67SIF31kGlY3kSbSz4HMd3gbhdZsCDl0OusCeUKLi6uiqnnOBJmTEaj8fRbDaLccKYcS1Aor16WLF6fbYjOhulDKh5Tns38wy/ja/peIOOnH5g4IlSQ4DzwuJ6puIJ9TuMyfP79AaDZz4PWDX7BPC0x8U57QavHIjgkBZjMM+D/lAtK8eI27ujs+LkdTtK1OHM4MWG1fdcWlqKbrdbmEWDSeaBnDWXO2IBs7jdL5hMe8sGlDbCNINBlD2fRy79vF5b9sDJ77Tco3Sob8zrdsIADVbqrE2XiiF8m4/sY00REVhdXS33on+j0c1RglyHXFZSBJaWluL8/Lzoiqurq3IC0DzW7S6wUG7ZIGeHAlmKmDHvzMVwOIyzs7PY2NiobC6yTrQxI+Tf6/Xi9PS05KxFVE9BQj9w7HJ2huwwz2N4bOTQqXltOe2A62IEzai5X44aob8MhrzG6YOBpxl1PpNBCPczQF1bW6vk0mUgC5lCBIN1xyl/bDThGbgPUat6/eZ4YD/X+vp67O7uRqvVKjb3rjhXHitjA+wRz+jxQRfY6XBqXt5smZ3diJntds6/D0cwocAaMOFgO4kutt3NLKX7M8+RcbSMeaNZzuzso/tNWDmaTHOlE8aQz3J9g1vjI+tqHCDYfc/hZDIp6UOMb44qTKez+qkG6fleEHU8JyF+KnS8rb3XUacZkJmF4TedorC7Q8aEkBqNRvE27UGZMcH7wKBQD5VJ5rMrKyvR6XTK6wiuQZyFmb+Z2AxcUHoGrwgQgw9IyB7TPOHl/ggN4QgrUnK5vBDsgaDI7f3h0fieVq4869raWjkNAsWKN5492ayY6f/5+XnZbMVzEZ7FwGfQcteM/DxP3s+M/A0Gg3Led61WK6E7nKOIuPWsNvooUTtfmems1WolLGrjZY8X5eJ1x/UNTur1emXzEfdDOfv57ShxPa6RmQQ7ePl1M1212oxVzePB2DqFxbqDfsEuAIxIB8JRImUAxYhiplzM9fV1CZvxXIwLYT9Ca+Sdrq2tRafTKfOQddxdaPNYBetJ5Lbb7cby8nLlSE3mbjQaxc7OTsXxscwzvujt8fjmVC82PeWNqPyNHkfvGeDTN+dfRswcFxMHXDfrIkgEGtc345rBpsOfvpbBnq+JQ2OQ4IopPCcOGGyyx5DxaDab0e/3S4jajihta2srms1meebJZBKvX7+OZrMZP/zwQ+zu7pY0FGS/Xq+XUwAp+g9LRYm2VqtVzrS/C9Er1hNz9ibQbJ1Sq9UqtWSxcTny4uZIjjEI845s5/vxHa6bGUaTE7alvn9mra17HVI39rDdzmub75ngcooS44ozl8G6w+80A3wDdpMa6FnmwGQbf4PDfKgMfaRfZqQdAeD9brcbrVar2Na1tbX48Y9/HF9++WWJgr2tvXMXv0GIlYInCKEk39FGs1arlePfUEwoCE+whRTAW6/Xo9frFcWAMW+1WkUJe2f85eVlOdMYwItiB/iaubH3w0Rm4GqhtwccMVOu2XNncr0wmOSIarkeg5FsvPmswXFEVPJCYEtd8Nlsh8PH3Ivvc32HFTxuPC9CC6AAVHshvksp/bEbMpk9eRRPxGx+z8/P4/j4OO7du1c+gyFwXiXzjXyjeBgn5gnPE5akXq8XdpOxQm6dW+p5NTtgD9Rrz2yA2VI7TgYlBpiW7QxGrYRtIBg71411CAoQm9kBywml5QxiSYVwqIh8UzNdZjgITbuuIPcYDAZRr9dLrpO/AzAl5OvdwHcJpM5r7t90Oo1+vx+1Wq3sUGadMqdZz9p5Yv33+/0C/pBjz1FENecRZ4mwpZks1oJlALDqckgZVLpZZ9N8b4Mbru/omRkrxsRAZzyeVYcgdSynMfj+jgigC82Q8t3FxcXCZjKegIT19fWyzikxxfM3m814/fp1/PjHPy5gmMhKu90uR0qur68XOUYXO8/4LsiunSf/TWOszXz7e5mptAxYb5vhRM/bZnkTpKvx8Hmn7JlJ5bXLy8uSK2wARlSM+ebeOPqZMea3wWl21Flbtv059G3SgvXFJmaPZWauGSfbfvrC2Pg4b+vbiKjYNYAkaRURs0gHOtspXsi69TNpXcvLy/H555/Hy5cv4/j4+K0y9V4MqoXIE2rmjc8fHR3FF198UTrK56nZR3gTIEvLCmFpaSn6/X70er0KS0uIsN/vV+rBZYDHQGPo7BVgAGn2jvNzu48ob4ffDWARoKzEvNBsaLm+mTP6FzEz+AiJPTkAOfmjsJowIXim9M9gBNaK+zsvpVarlXQLdk6jNOk7eVCDwaACigxWPnTznNDcR2SGvt+/f7+wc3iejBkKCw8WRePxJNTstBPCIzbwpLvwOiykFV0GfRgz+o8Cs7fu1AAfzcucZWVmWeK+lLoys2rPns8a8LgPVm7uO2MF2Pf68vPxP/en3x5rQCgA1Y4cRhtQQF1kV9poNBrx6NGjUmGE0GvE3cnjy2z/POalVqsV+VpdXS2pO+PxuGyGdJHviGotX4BmxAyskjOG7NtwYugzO8hcEfFi7WRDjKHiPeTKRtVOj2Wd73E9Ax0DD/cNeTSxYXBkp5q6j9yHMTPLZjAyr+9+VrNSk8mk1Fimb6xF2Kn9/f0SCbDzSBTHpbjq9ZtT7viOQc9daHaCaJZl9jKwH8Lfm0xmKUWZxEEm/UNaGp/H4QB8sZ5z1BT9ZVms1WoFk1jmvfaMeYx9TNLxvx2jzMLaeTIRkR1QO3+20dgcN+tNRwJ8T8ap0WgUgoTvZPyDA0++PizvdDotqZfMA4ThZDI7cc5r4OLiIvb29grpyIEpjuq9qb0zBzUrbRsYGysUwLNnz+I//If/EBHVQt3kLQKMOAYuJzYjeAwaYXwG0owV4Axl7ER4QLD7bHbJE+/PWbj87AiVNwbQvCjNYljo6ZMZIU8if9sh8HX8naurq8pGGhYBfY+I2NnZicFgUISZflGGCo+QeqcWcJhSTktaX1+vnCdNmbC8oOzlfuhmT9aAimZ2kYWIgsslQGDP5zlBsE0YfEAY+ZERM+UA2EeJANrsDJnt5jWHd2hmKekXuZ12guwkOTTGvWzgLWM0chN5ZvrEM6JkHX7KoJWxNMM/Ho9Lji5rmRA/Mku4N/e13+/Hzs5OJc/QzBkn9xBNcb9XVlbik08+iUePHsWXX35ZWK+7Zuht9Py3dS5yShQA52lxcTE++eSTiJhtVvPcR0QB7rw3Gt2cWuQjEEm5ojlP0PrGaR6sGfqJPjIjy7WQa0eHbLQNBm08+S7383PNY+3srAIWkV+MrVMELLtcA30xz/EzMOB1bBPfpx98fjAYxGAwiHv37pXXHH3jXkSsIB0Asg8fPizMLNe/K802jN8GK/mzZk4N+iJuzweyAVHlht5hHMEk1lcRs1qmBpWWE0e1cvoe8pmdbzvcBp2sC+SX9WybbbBsx87rzJEF6z2TczyX01fQnbZbgEmwlsfO9h97g2zCKFOp4urqKobDYcEDJmyYAyIL2Bfe29raulWKbl57r01SVoo0L1IGtdVqxU9+8pPiydvz8AJlQhhQDCaDwOkovV6vnCZlgMX9CfFRugKF4xBuBiqeIJoF1XkU9nxQovmZ8jhltjRilleTv2+P2d9xLqqfx8CC72YBheFdX1+PTqdTYQoYA8IgzIGNvBUupyQ1m81yFO2PfvSjsoAzS8I97kqzArDis+w6V4fC+2ZJbcC8mY22sLBQxun169fl/dFoVK4DK1Cv18s9PF++HgbbSgSlhwx57GFZUCKsMzNNVv6+V5Y7s+hep5bdXPaFa7ivEdWdvBhRh4IMWnH6CKs5SmKHNGJ2clR2jHNO4+7ubjlyGbaVOpSdTmfuwR53hUGNqLI1GWRFzAwN1Qu2trYi4mbzDQebmM3AYbfjbRYEkABpwPygJxhfwtPn5+clYhMxOx2GKJCBLLKMnvFzmJXFHuTTgXKzM2VZzAAl4nZuvI06sj4vmmUmjd/OQaVvBh45HYrn9PnmPF+73Y5PP/00lpeXo9PpxNnZWTl6kr6QqkLqDykrCws3R9lub28X+3eXZDczlXl9+eQzmvVSnjNIqBzVMfDndWTVrDv3ZsNktv/z7p3f82fs1Of3eB+7nZnTTJr5O9ZjvJb74RJpmcF3ON/rCCeU8YORHwwGBceYdW02m9Fut2N3d7eS4z+ZTOKHH34or0EK0i/kHuKB9Krd3d0SgeTem5ubFULxTe2dIX5PiAEYysbALyLi3r17sbKyUkpuRMyUF8CHckV4NabtI27OJr+8vIzBYFA5G5YBuLq6irW1tdjc3IyLi4uyYWowGJQcCfePCaWvKEMzqBaY7J0YGDpUu7KyUmHAAAM5rIbCMxuSPTCzBabcs0dm4bXnzhhfXV3F/v5+NJvN2NraqtRf5Vkmk0nZXMFufvrJcyGo1KDtdDrx9ddfV+pKRlRzFO9iy+GXee8zxsgd7J7Hy4YJ8Hl9fXPK2fLycpydncXCwk1pmIuLi8LGwtphvABkMI7k5uEYOD84YjbP2RO3E5abWUcbYCtm+mZG1zLHuABCGCO/byCRWS6vNTPYNv5mu2B/r6+vY3NzM3Z3d2M4HJYSSWwM4W8272HUHQZFUdMXno9Nitvb2/Ho0aPyLJm9+NDN42IHwPPCGu73+7G9vV2efWFhoRiN6XRa5Hg0GpV8RhsF5I6dvJPJpKTuNBqNok/r9Xo5orJerxdW3elOfN8NEGddZsNu425ZAkAikwYtmYXiev68X3e/zDBhtO3wWSY8RjirBtMwUAB9IiQLCzc77XG2WD84AL1eLz777LPCOnE0te0MYXDmAPJmOp0WJxd9wzPdlWbmj//9m/l19BSnhmanCR1stt7jZSDIb28+hsgiBYW5jJgRT4w7zjTfdR+tiy2Tfm7335gJ/eeIhHM8SctifAw87VRa/3pdeYxNYuVxbTRmm0/7/X5hQyNmp1ESXXHktVarRa/XKyf3oW+8aZp1YqLDJBz9gTCYd7hSbu8s1O/BsbI0g8GDw8x5MBy+mEwmcXp6WhYZD+dTevjsdDqNk5OTAh6d/3D//v1SAJm8s+vr6+j1ekWYUQqe9AyInfuXFw/PyT2gpxkHs8rOucjjlHe787l5wu1UAP7O4Rt75RFRvBjOgyZv5vLysoSzoNKn02k5mm9h4WbDyNnZWcmbpH+MNQBqY2MjPvvss/hP/+k/xcLCQty/fz82Nzdvge671OiPwZBBkWUao25Z5rQXvn9+fl7KR5n940zjwWAQKysrpfwONWQjbpQlxeU3NjZibW2trJVut1sAgevKZaPpdA7+z8DVBp4fe85mhWzU5ylEZBQnM7Pwnm/Lq1+3jFu2qYVHhIRi1ozZ9vZ2TCaTym5lZJj+NBqNsnsaPQVDvba2VgHjrJcnT57E9fV1bG1txdbWVlkb8/r+Ids8netxt0FtNBqxvr4eEVHZlIAcuQYjm0d8+lNElB3ssJ2E/QClpBKwqQ9Ga966d/6pDT0tOy/zQGReA3bgGBtk02kl+R4RM/m2oeQzdrj8N0YU1ofP5+8hV35mcvY2Njai0WjE3t5ebG5uFiA6Gt1UV2CzHk4W6S0mUFZXV2NtbS2azWasr6/H9vZ2bG1tFQLos88+e+8Tef4YLesE6ySTOESUkE3buKyvGWeAPp9hvgGYzpFmzq2/V1dXC9likEizznPEEt2CPjF+sL70M3oM7IABuHFoGDMTeG+aS+6JjTCxYpDH2iEFkPFg3SPf19fX0e12y+EoXJPP5jEm33dra6vUB9/e3o5ms1mxKcZBYDCanZFarRaffPJJBS/Oa++sCWQh43+DOQYaA4qXjhJEscFwssCMtJ1PCgt5dnZWAGfEzDCvrq7G+vp6tFqtsosRwPXixYuSn2kG1crXYNF/OzxjrwHhpOHFovAMCuxh8b8XLQsge5Q0g2XvQDUDyHjijRNOWFlZicFgUKj5r7/+uoTjJpObEzw45QvjvLKyEtvb22WzhY0gz7ezs1OU5v7+fvzZn/1Z3L9/v9Sl5HtecHeh5fB1NjBmadjkZKZ5PB6XcCl5iu12u4AAiswTOuY9lCRj3Wg0yrG+KCMMEoztycnJLVBip8SvO2Sa2SpXZvD7fqasROYpEMbGTCTNjhSGwOE6GxHfNxuci4uLUgZtY2OjyODe3l50u904OTmp7FjFSaAPETencJnppR8ACyty7vnll1/GZDKJzc3N+Pzzzyt5UHfF0EfELb1hnWVjRk1jjC6y0+v1ijFAZjEMRLBg6wGorGNCxjAj5J8j98vLy3F0dBTdbvdWX51Sgh6jXzm0GnG7EooZd7NXthVEstDZNH/frwEq8vx6jTA2BiC2fdaJjOnl5WV0u92iX1jr1NmdTqfRbrcrOa6UQKNvXBtAYFvY6/UKMGc+AKyfffZZfPLJJ5WNgh+6vQn8W/+CC+xEoWNGo9mRth5vZDBXliBH3bKFTuczyPPi4mKcnZ0VJ9hOhddXlh2a5dYEkwF1Buh8z1iBsTB7n8du3v353583NjBhyHtO78lOaq/Xi263W2FacYj42/0dDoexsbFRZBPm3k6m88VrtVqxh6Q4GmPVarUK0fim9k6AmgWMyeSHG7Kwl5aWyi7SwWAQ/X6/wpIwWRnoMhl463w3eyn+GwPKsV2EsZhArutcy3yNeV5eRMxVrIyDGdgMHsw6I4hMWva6zIoxDlzXANlMGLVhLy8vywLd2NiIXq9Xng+WGoDPwQfktOaUhtXV1ZKPgvAtLS2VMieMJc+LYcyOi5nJu9J4xjw3NEIehICur6/j9PS0GJ7V1dXCerIoHT6GZaLOIbtUmY9Go1EB8+zcxalYXV2N169fl3wzO0Q5TBQxA4DeDOW5yYoSGcp9BszhlTM2sARWsDyHN4p5Hft/99Hy4TVxfX0dZ2dnMRgMCtA/Ojoq6T+vXr0qzlXE7Ax5WCWMFvNDnw1gAGmdTieWl5ej1WrFp59+GuPxOH73u99Fs9mMn/zkJ6W/NlJ3oTFW2ZllHDNw4rkZN++axTAgc2tra9Hr9YrBRkfxPrV/OU3NBpjvLywsxOnpaQXk2SGnf974w3Uiqo4Mzc69ZYfm9YBxtCPE2rHuNuvMNfgNaGAtWG695uiviY7pdFoYfFiqlZWVWF9fj3v37pW+bW1tFYfp4uIims1mZT8D1+O+gDfSUeifN6dMJpPY3d0tqSp3CaBmfJB1LSF06ypkBichgzUzhaenp0UXYINs5z2/k8mkkFcAeerGIl/ZoaE/7ofBrHGPbQkYI9tKXxOQRvQCGXuT3rFc8z/fcRjd9/C4em26n6PRKF68eFEORkJ+tre3C7tN9DVidjAA/bej5goVJjPspNjJYg2TlpQ3uuX2ToDKg5lFtHLk99LSUuzv78ejR49KSK7VasXW1lbJtXEozeF/ewYRN7X5OMmH+9VqN0XOnWvGhiDuHxFxcHBQKdLrBWAvxlSzJz4vKAbYXgrv2VAwVhZe7oegvEkQUap8zh5mXkh4mYAOTiI5ODiIi4uLODw8jOPj47LD7vr6Os7Pz2NzczO2trZKSAhB8cIBuPLe0tJSbG1tlX6Rzwr4tcBmg/qhm52BiKocWHEQft/Y2Chgx0rNjE5OUEeGWLwGX7BQGC+YJ4/veDyOzc3NuLy8jNPT04iYOSnMhXcP40Dk3DszFc6Pi5iVabFjZFm2kUfJWMZzTrXX3jxD5M1KAEeemfUwHo/j5OSklPfhPRywdrtdFD6GhU0SKHcfEQtDWKvVCuDlHHOYk4ibOnx//ud/Hvv7+wWMOW3nrjTLaWZ3DBa96cDAzQCO172xkfQemFcALNeGueR1O+b8brVa0e/3o9PpVAgGdIvZfF6nzdOHZu9t3N2Q4Qxe3S+eO4MDfnKaC9/jf5MZdsgjqgXiLy8v4+joKCKi2KWVlZVotVpl0xNpQ+PxuKQCQNKQG5wdDcZ9fX29chQqNuXbb7+N6+vraLVasb29Hbu7u7fAwYdqWW7n4QSnnURUHQaAUCawAPQ8IyDVeszzBrZAJ3PNjY2N6Pf7pX5wzuvnbzs2XgfznHD6zhzYsTEA9xhYd9Py+qDhGBl0Gmx6DOxkmbig8b/rjxId2d7ejvv37xf9aULj8vKyOKXcy86Ux8d6CPKQ9DZOrlpeXo52u13Wy9vae4X4mfQ8cVZKzWYzWq1WUXr2WOk8bKqFywKKwcT4AcSseGAEGXhK/MBKdTqdwhyaGfV1GWADhazQPBkYaStuT7wNiZWg//e1LXweZwTcG8wAHBE3uWIouuvr6zLBlNn6kz/5k2i1WnF+fh7tdjsiblIutra2bu0qJU2A587HnALeUJLknLCBZW1trdzD7NhdMfTZEFl2eY1Ftr+/XwA9+WPtdrvk452fn1eMlZkPs4d8fjgcVk4/c4kYNjzgtbZarVhZWYlXr15VQtiACufA1ev1IsMAKzPuXpPeLJCVpv+OqJ5E5g0kXIOcRNaiFTWGw2uZseIz9BG5fvHiRXS73TL+CwsL8eDBg9jf34+VlZW4f/9+0ReciNbr9Sp1OllXlEzLa51IQEQU1pUQ7Oeff15yqJkbP9OHbpbTNzGAjUYj2u12Kd/Hc6+srJS0HAwYMom+Y/wYM0Aqn6U+Is4AhgY2tdlsxtraWqyurha22/rP68SGyzrWMufnMgs27/2Iaikih3Oz8TfbiUzncD9ymV+zbgZsm609OTmJo6OjQsYwPhERjx8/jn6/Xzbw1es3R3m3Wq2y9lkLBlBmwHA+uC+h8eXl5Xj8+HFcXl7Gzs5O3L9/v8jwXWh2WLMTwrP6cxEzG46Dn8PpjAvpKFzH4A3dxdp3hAjwii588eJFSY9jnk3a8Df61PiA183y5+iucZH77nGxDmbOfV8/fx4L27A87vSBHG1H2jjq+dtvv43xeBzr6+vFqd/e3i6pVLCak8mk7K2YZ0MNYp2e4rl2Go7TA7a3t+OLL76IBw8evFWe3gpQQewedE8GAwxDtLe3F99991050QkFAyongd+TZK8YAaVUgXfXoiSZDEAjG3lQ2MPhsGyuygLDMxkguw6Y2SsrKe43r2XFmfNPPakYBzMc/i7Kkw0I/NhDOT8/LwCdfJLJZFLKP0Xc5Ep2Op1SIoix5TrO4c1sAn1BSeaEaf4m/8ktb/z6kC0rBb/ueaEixM7OTqV4sHP3YOdsNFiUVjYYO9IpzHhNp9MCElAg5LCur6/HcDiM4+Pjipwxz2YjMWRWcGbzceY8Btwve9NWcPn6VsBmZ/mO1y7j7B2xlnuedWnp5hSdk5OToqyWlpbi/Pw8zs7O4vT0NEajUfzwww/luWBKOUIZgATgd6koryeHTXFe6fva2lrZWASj5dDwh25+juw4M66kPdiAWs7tBNlos65d4NwO9MXFReXcbObBr8FqUwrs8PCw3Ndh6el0djpadop4NrP91kFmhCzv9Bn96trArEeDDD6PTJs1Zc0Y5NiY8jx5f8LFxUV8//33sbBwc9oTOaU4o4TncR7Ozs7KZjPuQajfBpz+eeMPrzPe+/v7sby8HL/4xS9iYWEh9vf3b4WpP3SzA0Fj/H1ADGNOVZjJZLaL3jYam0MuIzohO3HIMvKdbR4s6vn5eZyenlZ0nHWfbbUd/3yoiuUQWbRThk2PuI0TDG59Hcstz2b84LXKtfwarDH/OxVgNBrFkydPotvtlr08tVqtRBAZU+SZNQ976j77GQ3uc7+MHfg+evvzzz+P7e3tt8vS2960cvTgetBsnF6/fh39fr8YEDpJxwFEPLgHHiBGmSMXd0Wg+RzCurCwUDl3GyB8cnJSFG0O/1iADPzs8dnD57c3Icwbh+z9mEF1eC17amZI+S5G00Ce3Mher1dK7TDO5J0tLy/Hj370o4iYHZIA88sY+Jlo5Auz6Mwk8IwcB9rr9cocOfxsA3AXWl7kuVkm9/b2CrNsgBYxM/QLCwsl9cQLjzVg5onTz5A1Az7YLIw+tT6bzWZ8//33pdwPhp3v4Y26b2ZDrej4H6WZPXLm1OA357ZGzM5X5v75O4wt/WJMXEuX8aOvbDiDoYN1arVa8cUXX0RElAoUsCmsB6okMC+sd0djPNcYQyfqZyNAHpuV611oGZj6bwBqHmeewwyP8/o8PmazzGpR1aNWq1Xy0HA0uCbpL7VaLV6+fBlnZ2eV/vEdPwt/W778nQxQc6TNbFBepxHVOtHzwt5ZJ/BZ/x9RBVjWhdfX1zEYDOLw8DB6vV5sbm4W54brD4fD2N/fj+vr69jY2CgAJaf3YBf43wRARJSTF2l8ZnFxMT777LM4OzuLJ0+eRMQNIXEXZNdAMeJ2ubR6vV5AkT9jW2ynnrln7AGREdWDH5AfMIHn3ddfWVkpG9gODw9LCuFkMisp6Tm3MzNPTvguzj1rhNd5Fq5lZ8T9zmuC317P3Hs0GpUoNCywI8rz9Abjd3h4GAcHB7G0tBQPHjwouhv5AS9R8mo0GlVkK5MUvk92qon0mdwAR8Bmt9vtcrz4m9o7GVTf1K/RMby71dXV+Nf/+l/Hf/yP/7HisTJZPARhIh+zxe/xeBxHR0extLQUg8GgLG5ypphgqH08U5+53Wq1YjAYxPHxcQU02QuKmO3GR0khJE4liIgKA+XX7fWb1vZi4fnNzth74j0zofV6veSN0m82fwFQnfQ9nU5jMBiUMiQYdzwijLTzc5hPfvBIAan0k7EmvxVwwEYtBNoyclc8eeYip2LwHsqUtJRnz55Vyspg4JhHDPb5+XkJNaMYrHAXFqpny0fMistHzIAnDtjq6mrs7e3F/fv3YzgcxqtXryryYXYkg04ztzkXmHVJ+Dc7izYAKPB5LFXEDAiz9rNX7E0fgHWugXIkPWUwGJTvshO62WzGj370ozg+Pi6M6MuXLwvDzPoh/xrgisGeTqdxfHxcYTYibhL9qaVar9dLTnaj0ShHTLKu6f9daDnlyE6wnZUc2nUOHDKZwaF1CgX9SRnCYeJagCoze51Op4De5eXlkoLBIRXoAMuS9a/XhdklG2t/LzPxzgP3ujCAyIyy2dEMPq2/+Z5lPTvdZ2dn8fTp01KZw7mnGxsbpQYyEcSLi4vChOYIhkkNAzlYQlhWmueTNK3l5eVCVHzoNq8Plt+IqESpPB/Z7s7DHqScQMh4jTgfPTubjCmO7vb2dnE00F3IJX87/S2iKkMZdBvDRMxqp2bSy8xwRHVToa/piJfXEeMEYZI3QaILXZILB2o4HMZ3330Xk8mkpIUwnpTli4gCHmGzjQesC/jbc4gu9YZg65zJ5Gbj6t/8zd/Eb3/722g0GuWAkTe1dzKoXuR50o36Nzc340//9E9vKfvsIRPewOgwoBcXF3FwcBC7u7sxHo9LCRPuTc6TS0iR5wcow/BMJpN4+fJlmSjnEFnQsidkpsGergGIjbQFKi84bzZBCXli+R+G0/2wp8jYnJ6elo00hJ+pA9hsNmNjY6MIxNraWmxtbRWFbsPFovKzctgBxaWHw2FcXFxEt9uNy8vLsou9Xq/HgwcP4tNPP41Hjx5VnsM/d6XlObQ3y3ytra1Fv98vG2psACNm88znW63WrU1QMIOMKUn4fMeAAmcKhwtFwd+vXr2qOAo+OtU51PTRKQdmnSKiAqAZDysi+ujd+X5Wy7bnOqdyZB1hgwywnExuNtlRho57UdcYI7K5uRk7OzsxGo1idXU1tre3y3tmaBuNRrx+/Tq+++67cswneaoe952dnTKn6I9+vx9Pnz4t9VDp+11pHp/MenqN3b9/PyKqwNOGPus5dMn5+XnZiBMxOx1qMpkU58p1UtEfpLvg1MJub21txdHRUcUxs8E28LPONSPKsxrM5rGwI0VUyU6359Cgnh+vBRvVealJrjdpHfDy5cu4urqK7e3tSuh/d3e3sPHdbrcwQ+TnZiDG5j3nxWZmivQAj8H19XV888038ad/+qfx7//9vy/Hnd4FvWubn19nPinH5xx6nt359ran2DvrUjstk8mkkAau+Y1uNssdEaWqx+npaWH+vcbMzueTnzKZZVl1ebWI+RFoCA/LF8DO44Vc5msxZrzmyALP6PVEf7/99ts4OTmJ9fX1wvxTWQniyTnnOPIZ9xk3ZGeyVpuxsCb4PJfb29vx05/+NL7++uv41a9+9U65fatWzorSzYPPw1qAbOQMYhh4vo8AdDqdguzH43GlEHSjcZNf2mg0yufJcyUnkzDpeDwuZ0pTi5L+G3zCDvq5DDr92+EiT1hWPBn8Gdia6gY4mwb3eJsFgSE9PT0tDDSFchuNRqUg/3Q6rWyc4PMWfoRmOp2WEAHeJTva19fXK6ccUXT6+vo6Xr58GYeHh6WAfzaad4lBzQbLY222/Pz8/NZpLtk7zIDBOX3kWC4uLpYztvkOTgjjwy5/DCyN4w/Pzs7i5OSk3Mvh16wMrVjtGPm5/X2+5+cw88r3zCrY6+cZ5u0etaFBUS4uLpb7jcfjkmPK+6PRqBjylZWV2N3dray5/f39YrjI7WPOBoNBXF9flzPJ7927F9vb2xVWAePvGqAXFxfx4sWLePz4cdRqsyP37orcRlSjG2YfzLhxitRvfvObIm+ZGLARd3NeOT9EBKh4gNzaqSVShlwyv9RSPjk5KddCtiJmLA59oz+ZjTKQdmkf+oMsYhOyTcr5rB4Ty+F0Oi32hWfIZISZKHT1y5cv4+XLl2UHMmAdRpp1S1Ts/Py85Ppb5l2BxvbQ64yftbW1yhwsLy/HT3/60/g3/+bfRLvdjna7XTmF6EO2t/UBeaKM2T//8z/fss0R1bQAvmeCwb8Zs9FoVIAmY4GMIisAL+ZjY2OjVE/xPGeZcuTJz+lQvnWfwXF2tlz+LG8stR7lvtbvfAbMgBxZB2dWuVa7iba9fv26hPb39vZKHe5msxmffvppBW+sr6+XiFzeHOV++JQ0zy8MqskOxqlWu4kYPnr0KP7zf/7P8cUXXxQn+U3trQDViz0PVh78/f39ODk5KeFMU9sWKgAaCobcVRglWEEv1MXFxeJ50Y9Wq1Xy/jCYeAT37t2LWq0Wr1+/rrCHZlCh4Q1kPLlWnPbC7E3z2yE1e1F+bsbToYMcknMfELbz8/Ny4g6eJykPPoHod7/7XXz55Zel7Ats4Pr6esXo8/wOfRJWtcIgRIVRJ52CXfzkwmZDcBc8+Yiqseb/iNv1bxuNm7OCCWuYcbLRjJjVTG21WpXSY5yyERGlriQyxEJnIbMhgPIbAF12/S4tLcXBwUHpI79ZO+4PssUz2dvP84GBAzQgC6xFA1tATc5VzmDY1zVocBgeVoSapA4PU5d0MBjExcVFAaKkAe3t7ZVncyoO58D7wIhsRJx+YpAH2/LjH/+4hPjNJt+Vlh2/rE9Yl4eHh0VHZb1JMyOEsY6YMerONYXx82dIS+GHsfQmHrP/zJND7Rhks/U2fDawyGk20sg//QCMGKjQHz7D63bCyEe2js99M9M5Ht+URXv69Gk0m8148OBB7OzsFPaYs8ZhUF36kDQJs7zcY2VlpfztNcUzA0jJOYyIYgv5jCMGH7oZzJgQsF29uLiI169fx5dffhnfffddxdm1ncw70C3TzD2fHY/HpV63HXLesxzZ3sJ2U1HEcsY1jH0MAHmeiCg6nGa5BvTOiw74c74+Y5Kjunbs0Hn8GMv4eY+OjuLx48cxnU4rJ5EtLS0V0oC8czZVs3mP/s+zAWtra5Uyk9zbG7KOj4/LAU0mQGq1WjSbzUIuvK29d/G0jKIjZp4PlPmPf/zjIlzZy8CQExrlhI2IuFWPkA1OAAQUhU+ZgB11/Uk2TcAcHh8fV0qgMNgILUbPIXiuT78jZswmApuZIoSe62YhdsjNr/M5FC1CDti+uLiIXq9XCmKjLG08hsNhPH/+vAj28fFx/Nmf/VlJdeCYPS8Ejkel3lk2BjgYMIKbm5slBL60tBSbm5vlVAmu6fG6C83PzJzZEWFBbWxsxI9+9KNYXl4ugJ25wNDR2ERl5gPmiVMzzs7OKsqW8SX8RMoFO/od/l9ZWYnNzc344YcfotfrRavVKsY14vbGPHv8rIUcNoWp8dzktBLnkMKUO1WHtcKJQxEzFsysE8YfRQWY6HQ68ezZs2i1WuV7Ozs7cX5+Hq9evYrJZFKOkSVVhbI9NuzuAyV8bJyZM0qxsSHCtX9xSCIinj17VllLdwWgZsPO2PK80+lNGs/nn38en3/+eTSbzcqc27Ca4TFAG4/HJfrCPUajWRkfWGVyirm/Q3/oveFwGLu7u/Hdd9/F4eFhfPLJJ7cMbURUZMqOlUFAdvh5n+fyGHlO89xlnW2SwmSFIwGMQwYXp6en8eTJkxiNRvHZZ5/F5uZmREQZOwPN4+PjaDabJQ3Aa4IfkxzUmOT/3D9AgIEKY1ur1Ur0MNvmD9msD7yuWHv7+/vxX/7Lf6ns3bCTgD1yLjusM3o0p4+4/BTOKc4JetcVAK6urqLZbEa/34/T09Oyyx/HPKJKWFEpyOvJwNCOOs/v61gv8kw0dG7eR8C9+Tz9I7rp9U3/uAZY4NmzZzEcDqPdbsejR49iZ2cnIm7sws7OTqVyjG1KJgYygDcYNsvMWGDLwBhgDsYCuXhX5ZT32iRlo2igQydhPWAzYDo5zvDq6qqwcBiMiKhMGMYsTx5oG4+J8DW7z9bW1uLy8rLsyCP0sr29XbwjLwKHiAxWM7jygMPq5tdRPm7zGAzfw2PIeywYL2rqRb569Sr6/X4lD2dtba2Ek/hNyGp3d7d8f2dnJ3Z3d0vunefSu7MNnBEewiEoYYeiYFgIB3Jdy8yHbvbKs/G2UcI5spEzK4+sAOI4Q5uGY4SyGgwGlZAdzBG5f4wT6yFiFq4fj8exvb0dV1dXpYxNRLUOaUTcKi6fwSrNyi5vZLIj6fFg7jNAQs79eX+P8eNvwEyn04kffvihgB4crMvLy/jqq6/i4OAgXr9+Hc+fP4+nT5/G5eVl2djHPWDDYE5RdPOU3HQ6LZUmvv/++8LOOp2G9ZD1zF1pdkQy28IcNpvNuHfvXinUnsOeNjI4QXnnP+wpxv78/LwS7ua+9Mljzdppt9sl4lKv1+PVq1cFLJhBp0/IXs67dzTAcud1bCDg9w2+7cxFVHPIaQbPECxZlwN2rq+v44cffoirq6tSHJ8apkdHR3F8fFycTYDi5eVlYdVYdzBU5+fnZbMe5ZAcVUNnUH+53+9Hr9cr/Tbjn9PlPnTL2IDG32beSSkzuMEu5dxO5AIZcsk4dAH2Gd3la1o/Mr6w241Go+wpsFPkNcSaMBlA4zU+y/WZewM9wF4GgujEedghM648o8GoSSZkut/vx6tXr+Ls7CyWl5fj0aNH5QCUiNkmcQ57QB6ddoOc59xg/rcT6LmiT9hWivNb37IGneY2r71XmSl7fVaSuTN42o1Go2y6YYfhYDAoIJaEZq6HB5ANH4OxsbFRqZXW6/UK44Sn5RDIyclJOWry6OiohOYtEHgiNr40hNEgxQwrfcSr4TWXysisosOgfN/v+9SL0WgU3W43jo+P4+joKOr1egkr810Y5ZWVlej1ekXwut1uOT9+Z2enePcINt/1gvGP5x724OTkpDz7eDyOV69eFaVtb+ouGfnMyuRFX6vd5MP4uDyMt3eCTiY3G0cODg4qRjYiSmgeEApAtQGFvWu1WiWfLGK20xMwRUrFxsZGLC4uxsHBQYW54npmN7PnmufACs/evY05gDKiml6Sox/+nXP9zID4fsPhMF68eFHYYAqaN5vNAhwxLjBx5J87pQcn0YySn5F7owc2NzdjbW0t7t27Fw8fPrxVKxX5p/TKXWJPczNgmyfPrkWcnd/p9Kb2brfbLQcaMFaAHEegKAFjxtGsXkS17m7EzcYzcleXlpai2+2Wg1IiZobeIIVrer+CAYSdRZ6JdCIzt85T5br0346NZcTEig2/nTbruu+++y5OT09jYWEhPvvsszIWl5eXcXh4GK9fvy5lDT0vHPQRERV5y3nnsNaAUfo9nU7LXgP0Bjbr8vIyrq+v4+TkpBBAdwGkZh2TmUAca+dOeg6QGcbJm7+8IQ09jTOAvmJtY5ezA269DRbBUeNQIPrCb6dk2NlB1iLi1m87/w5/Z+fRIXvWA/e1I4aONntKVRRfi8/3+/149uxZOfJ8Z2cn1tbWyj2I+EEsmlRgDKkdXa/XSwoA6/z169dlIzVre56z78MQptNpicg6FQ2w+0aZeh/Bm3dzLwhCpSw0BIJcCcqREAbNQM5CHTETYAwSYb96vR6dTqdMKsxjnlznZxwdHZXP5EVMHx0uoB+m8LOQWWAzc+Xn4LkM4sw2MXYGwZTi6XQ6hX5nxy3hDcIWy8vLcXZ2Vj4zmUzKBqaLi4sSispg2fkyViAIOH2OiHJvPLTxeFxARrvdrjA3d6nZmOf54P/FxcXY3d0tY2dWAnk5OzuLRqNRztc2O+SUk4ib8e12uxUj32q1ioFttVoVZcICbzRm5dFwvqjcYJDgPD7C+eSQcj+DCZ7TifXOKeKaBgcGqXai5smuQzYoMDuAL168iOfPn5eoyebmZnmek5OTymYYxh42amdn5xYzRp6wFWlElWVgPddqtVJz0cqez1D0/+HDhyVP8K6AVK8ldFDETH4XFm5OdTOTlIErupHvwehYdjOoQ5fwmawPMOSDwaBEGRqNRjlRrtlsljXA93CokNVabXacqHNaI6oAB11jcGtA4+iP543rm5U14MiAyDLO2EbcGNeDg4N4+vRp1Gq1ePjwYbTb7XJEabfbjZOTk+h0OnFychKHh4dlLNkwaYBRq9UqRz2yqRcSB8BAGHt5ebmANLNrRAhdGeOuyG22g15zEbP6xsx9xMw5cM44OsVA1mwb17Vzg17OMkNfbPOQHapSkIsJYLQTYUcL/ZgjAA5zG5TS/Jp1Z3aUMrYyCGadoPccfXaElmpIgEoYUuyQCQ1jE9hSgKSfjfdZz5ks8xhA6GAbkWePAyD1+vq6HLn6pvbOEL8VXm7ccGVlJR4+fBg7OzuVEhCcqtPv9ytlJQzOImY193J+m3feWXHhkUKnLywslFIzjUajnIzASSck6WZDZUDC83jAbbBRZngF2UNEiMx48D6vYch5tojZpgQMyng8jk6nE51Op5SDefDgQdlljqKr1WrR7XZLeSMX5mdBe5MLLDXCzfNZiaLU/QzsgO73+8X5aDQapf4sY5eV0V1peZ4MeDAUOQw5HA7j2bNn0e12K2eW43EaYCKLMN8YfuR0bW0tLi4uioySinJ9fR2rq6vFs424ASMcTTudTuPFixcRcbM+AAQobhfgd0qB55Rmtj8zYfMiBPTFeXqZZc9OCYqT8e10OiW/dGtrK7a3twt4BUR4N3nETY1J1hfgch7ApE/+mzGAkWItmRmxXEdEORqZz9wlJ8vsjRtjYvYERm0wGNwqtL+2tlZSddATyIp3PEfcMC9OgYqIsls3M1N2rDluOeJGr52ent7aQGkGjPVogOHybtmpNNOUx8gMmZvlwnKLnGSwylijP4fDYTx58iQajUbcv38/7t27V6JPHFqCUe/3+0UX1+v1UuuTPnNPO1U8I9FGUuOIWKHXLf/1er2sfcifjY2NO6N7sXXIhZ0AyxRywZ6UXq9XHFBSeHxEs6NOzpmOmDmnTqvw+3yP63gTNviC76ObI26TOJYxdDv3sQ317nrrSt43MWcH0NFcnP1510JGDVIZW6oS9Pv9ckLi/v5+7OzslM8y3t1ut+yXcFUgNqtn/U6ED0cK+8n3TC5QCQgZZQ7G43Fx0JhbUhTf1N65i5+BmZfcy2QtLCzE3t5eMTZMApNo9D/PeAKWIqJiaKCa2SwSEaUsjEEWxs7MJ/UUUSC+nxlQCxf3x8jby+O79tJs3LkOz86EGMw6TMB3HK4cj8cl56jX65UakdQnNeProudbW1ullllEFOMEy8YzMG45hGsGjcVs9oX+MpbD4bCAZ49PBux3obHQDFSZi83NzWi32xXFExElR6rVapXFF1HNbwKs4yBFRMl1RGEsLS2VnFWDiul0WpgTGEwzTMwTgMG52bC9eb3wbCg6np1+m/mkGcyaTeNa/J2dOoNZFCZ/1+uz0P7h4WG0Wq2SLM8RsGxiMqOPp80OXdYzfSN/1WvKrC2Ax2G+yaR6Kgrt6uqqktebGZa70PJcWQdR7ilipn+ZX4x8djy53vn5eXS73VsRLOrImqVBNl1OaTqdFjYFw83c4PBxmIVZXTOmOeoEE5tzMe1oc29AhgGDr8kPso2Od9SAfpkQcW7s5eVlPH78OE5OTgrA5xlPTk7i+Pi4cvhGRFSqmqyurlY2ntAP6x7+x8G4urqKbrcbW1tbhZAx4ZAZuW63WzYCGsB8yGYgTbP9MOOHnW40GiWtCXk0U8l3r66uKt9HNmDsAO00O+DITD5xCWLBAIv35hFyEdWUJ+abe2TQmYG6r5GdZj4P7kHP+zn5nDeMoccuLy/j+Pg4jo+Py96Ge/fuxd7eXgGDOFbdbrfUVSffPyIKCUO/vL4ZW9fdhnB5/fr1LQchM+joadKIcKqJvrypvbMOaqad38S0kCRPY5JRLOyAzu9znJtzNhAmh/YwQFyTPjBRMFgoJ3JMoJSz4CMMFiKzRjmslil9WhZOA1IrQ+7vRedJrNVq5XQLSse02+2yeFHijUYjhsNhCcdR3sQFkJeWlmJ3d7eS14pRzp6nFxXPjfLmhznCcBGSybtLs9f1oZsNQlbyMNGwPxi56fSm7uxwOLyV64h3C0MI22Qmi9e8Mx+Pe3l5ucwTm/6QK9jWZrMZ+/v7t06tipg5AMyrlaCfy05UZsT9PH4uG207XVmZGqByfY/tZDKJ4+Pj+Pbbb6NevynMvL6+Xk6+QW6RoYgZYGSdWQmSHkQzsEJ5A5IxgpTiAcg4jEXuFgDq/v37c5nKD9kyGLEDyPuk7wBu2JznOclpGoD6zJ6Ox+NKWM7sJlUTkEWiN86PRiY4TIU8SeTQ8mSHziFF6x3LtH+js826R8zYLutlh//zWuE91iV9YlPU8+fP4/DwMJaWlqLVasW9e/diPB6XsD76An2ITgQkZZbPz2AAzoZA5obTqdj5zGedhsN4w3p5Hd2FNo+d5jfjAkiJuF16EXlBFgCn7AKfxywiR65OAmCKuJGzfPqU5RHGvNvtVvSA1wdjb9kzW2twjqy5SgGfAzj7+bnXeDyr0GNcgE2fh03o/2AwiJOTkwLW2+12PHjwoKIXHWWhbjEn8EXMWFITFNm+eP1iD6nF63Ez+4/eiYjKs/T7/cpG63ntvTZJ5deslDDy7GYkzHl+fh4XFxflrHE8awsIrCC7/3nPeQtQzgggwsgPk07oFUVRq93sdG00GiUn0wPm8LdBq3NfDFCcw2rm1QywF5pZJcAPkwQAZ6cmntDx8XG8fPmyGAI2h2EoFhYWStmpfr9fynH1er1YXFysbDQDXLALFSp93kKyR4oygFUxEMUgcZQqOYEZIN2FltMzPPcRs/IZW1tbpQyJx+DFixcVlpIFx9hl7zsiinxzj1arVfqDIUYmAazIHvJGnhqb4HDszBzZ4YmYgVzWjRlxKzQz+J53PyMyzHXMEvAcyHNEFNCHwT05OYnHjx/HcDgs+U/r6+vx+vXr6HQ6lWcmZYL1hA7wUXmnp6fx+vXrSmiacSCRn/6ZWUMxA7q8oQYdAaPicbgLbR4gNROF7AGwDLqIftgB9vXIvzVrgyxQZg3dyffIHeTzyDz9Qu+Q9oPBRzfaUZzHHLmPJjWcI227weciZiDX1+R9g3QcEoylmSnGZjQaldD+9fV1bG5uxqNHj8rmjpcvX5ZNjVzHkTvSe5DfeUCG38ijwbHDtvV6vaT12G4Y9C4tLVVqDn/oZjnNdgBnE/afeYQ5zeQGczca3ZSpA5yaWbYTjX5lHMbjcZkrnGLmgbmOmKUNILNOZ8kkkvvl191v1hMA22sPHWXAy3dMaOVoLKDc5JL1+nA4jMPDw3KS3tLSUjx48KBgJ6JWhPa93tlgzW57+pOJCEcXBoPBrfz2eZ83oCf32OsyIuL169dvlan3qoNqA8X/CBQ5I91uNz755JMysBFRNph4wV5eXpYQ0/X1dSU0jTEkl8dMARNl1sTskxUQipucCgQUEEcyuhcTE87A8ZoTtSeTWdmULHheMBh6K1o+YwUKfT6ZTEohc8L2JMM3m80iRPX6TZ09ThyKiKJUyeuIiMJ4cmJRu90uYCkDeJ/64HCYKXsDAOabU7sMyCLuTi3UNykPh+nX19djZ2engMFG4+aoXOq+IlMsRnbdItfOPR2NRrfK9LApiEYeKgwJO6vpp+tZrqyslCLKW1tblbCggTOKlGe285RZTitFPoMCQxlaqWaG32EnKyGcsW63G19//XUcHR1Fs9ksR4m+fv06zs7O4uzsLHZ2duLVq1dFKZpdQKewEZBjdjEYrHfKzLEezRhiANBNdrIweLCC4/FNvrdZg7vSsoG0IwizxzM7BGdZdFoHxgG59YaT6XRamFg+a/Czvr5e9JLlj34iA2yonE5vCoR/+umnxdF1tMXMvMOwlgWzSXbI+K4dRUfGzJaisyKi6PzM6NBvjPiTJ0+i1+tFu92OnZ2dWF1dLbUyCQEbXDsqxcZUg2j0g3UFB3vkMkvMH6+ZiTVwtw1kbd0lB2seSMUxYtc48xURBYy7ISfoQgCuIzrIIXrahBKEGTLPBlWnvZED6+Orh8PhrXF3KkhO+bKudIUVg0/m1/LNevD6Nv5gDKwHYIIjooD9iCikFcB5Z2enpIkgb2zA5RmIdAA2GV/uy+dsQ1hP1Gg3Q+2NxsipbTC6m2dE5zjvd157Z3b1PKPGTSNuFtT29nYBQCgcPBeEkkUHaDw+Pr7lDaNEnTO6ublZFqeL5QIEYBCsNAhJcfrS+fl5OfcYA24GySyCFRjX5MdeAvdhXPisc48QTgAz42P2jefqdDpxdHRUNs+sr69Hu90uic2EmNhBFzFjaX19GmNupYDnyUIBKFmRAMoRMoTYXjrzYI+JdpeMfMTtXdAO2XDymI0mrAVjxU5P55Bh6FBWzCfKg3Fhpzjnlk+n08KYI782+g458Z1+v18x7F6LDsGYqeJZkVkznm6+HiyoP2uWDQNiHWBHkDBxp9OJyWRSTi2ZTCaVDSScjIYSJFzq+UGXcLwx4wTgtKL3urfCvL6+LuveUQoAKqH+09PTuc7Mh2yeazsW6B3qjzIOyKwZC8vqcDiMfr9fQKjLSUVEkTMMDekpbHAF9MOyYOi5JzJO/mutVisb3iJmG62QL7OWzK1D9mZfsn4xC+lrWGYzOOB/n3zlzY0A/NPT0zg6OirRK+SXY7OZE8srEQTWI/cw0UEjzMw4sX7RJ6TAwHLByLp2rR01dAbHfH7oZtLGADTiZsw5itgsWiZ1zBDX6/U4OTkpc2T9zdhNJpMC0B3JJN8fJp/GGoKEgflnNz84weQXshgxA6kmFixzXlfzWmZH7ai5Afjos1NRcG4ibmSKSkXYHGx+xA1JdXx8XCKujB/X8YZb9DifMaiknBl7MzY3N8vm4V6vF0dHR6WvGZzC9q+trcXKykrZe9HpdN7JoL73Ln4bP3s3CwsLZWcWnwPkrK2tFcWUwzaEiD1pETeAF2PWaNxsdsLgRFS9j+zdIMBmMPG6qHcYEZVQgT0eGwI/I2NgBWWvFhBkA+cwANc2AOB9WJzDw8Oy2xvGc3Nzs7CqLDYALSBmY2Mj9vf3i3dOaGo6nRZBNtDgeysrK4VZZk75gVFgNyCLmbQEh0EYg7va7KVahlH8yJrZqrOzs0oBc8YPBeGEfb/vI91QAOT2YYiYc+TT4I+Q9osXLwoopkh3RLUYtNmiiGrtUqeT+LMGr/y202LF6nEyS8s16Q/Xvby8LPXxWq1WrK+vx+rqalFAHJWLDHscuAb1+SKiwuzTDxSdGTIrQ55hcXGxbBaYTqel7BEyy5ygdPOGwLvQbJysV+gjBiiz3AZ0MP6+DkY5olp1xJVO+Nzh4WEcHx/H8+fPK+wQOsjsz8rKSqk6sbKyUhyBzAhFzOTHBtqhT0fIDEQiqg4nz2C2BxtjZ8/6m+tFzPJUSfV48eJFCflSS7fX68Xh4WEMh8NiwNF/Br/0xeybf9iUwn0NamxDlpaW4vj4OE5OTiq2g/HyfEdEqV5zFyJX8yIRtqfYLQBqo9Eo8+fnQh5evXpV9O480BRxI0tUPADIQj6wyRRdarYdjILDR9UQ6wI7OhFRcQz4DO/zGvd4k000G5mZca8VxoMf9HmOivgwooWFhdja2oqdnZ1CzPV6vVIVwvt8IBFZb3kDG7odR75er5dyk8huvX6TWsnGvtPT04ptnIcX3XdvJnxTe2cOagap/LBwFhcXY2trq3ga3HBh4aY81PLycvEymWgnhjNpfhhCqDCtKAdOkrFwAlgJr9pLZ+MJLM48xWgjZwVvD8DMKospGwIriMxyuQF6ACvT6bQkC0+nN4nyCMFoNCp1wkzhs3BhUexFOpRixey8XpQ3KQ9utdrNbnMK/ns3O0oGZuX09PSWMroLijLidk25vKsWUHZ+fl42gLBY2R0L62aQa+DvhlJFdpCfo6OjODk5qYQU+W1ZjpjV3xsOh8UAkssNoKMvyJHXpP83mMgMsvvsHaFmJmwcAfMoLX8fhdntdosTSPkbQlIvX74sskkxcjN1bLyBzQB8ec3RfwNUO4xeo/Rze3u7Ut6KzwyHw1Ln0nrhrjQ7VP7x+NPvzOAgJxhrH2lcq9VKfU0asjEYDMr3HaGZTCbx6tWr8j/hQc8LzF6n0yngzU6YwaKbwQINubDtyQxWxEx2eQbmHbm3rnNuosfReprdzRji3d3dsrOeNBCXdmOc/Uy8B1OK/uA0H0rveI5p9H1paSkePnwYjcZNDe9er1d0RmaPLy4uSj3hu+RcRdyORhiUZQzB5x2RY6NqRHVjpBsRGPABcnN9fV0Op4D1NwCzTiTHNa+DiJlOd2Oe5ukMs6DoJQMy61nLEtfkB/m1A+QIKQTcZDKphO0hBjg0iaL6OOH0GRLQKT30k/VHtMvhf/cHpwo7tbKyEjs7OyW9yONrgG+ZPzs7+38rM4VQMSjZkydn7969e5UddCgX54yNx+OC6DmvGMPha1M+CYB5eXkZz58/v+WJZ4E3AMNIbm1tFaXAYAMaDYh5HoTHAJhxsIFzvxkf98/sGADYQAkDwvdgSTEOCAmhGzbLrK6uFsAyGo0KiORUh1rt5iQilzaywwCYgFnhOZ0LSd7qxsZGPHz4sDyHWavFxcVyNJzBAwrmLjUbrohqXiY502xu8g5vMzXj8awsxmAwKIsyh3nwRHGuODHp6OioAGB7rtPpbAcznj/yxCEI9MlsIw6HZdeMgJ+TxpzbsNthQ+64lsNwZrjoi0vIdTqdePHiRQyHw1hbWyue9WAwiG63W07KYnc38r22thYbGxulrFTELO3E88ezeM2akc5zMR6PSy3K6fQmt5coDDmpnJfOaVZ89y41R3WsQ5AtDM35+XnZxfv69esyx3yX9YsMMUasbY7dZe7X1tYKqIPBz8W7vZuXRtgOQ0Yptiybvo4ZTJ6ZtWSnkr/tbOTIh8Gjr+X5NQNlHf306dPCtgHM2emMzqYygQGCn8kABeNPRQkfamE7Qr+4PuPF/oBXr17FH/7wh+h0OiUFgB3nL168KLu27wIxkMG6QTiOAp9zFNHAezwex9HRUZyenpZ0kcws+n4wyMw/83Z5eRmbm5ulRJgPEXK0KeMa9Lyfw8/GnBsb8TkTdFnG3H/6yjWdh81nLAteG2AHsBEkEY7+2dlZsQ+vXr0qspGjBiZTIqKknWAHT05OYjKZlA27GXzTd66DnSBVAuCJHbXzwTOyaett7Z0Map4gG0AemDw+FiXfA+wwEFD67C7js6bvyXkivM9CBGDmXcUMAp4SA4kCJVToM77dUJwoDdP2/M6GH+Fy+CJiJszz2FRec44YBp/8MHZ4R0RsbW0VVpSE5FarVWoVRlTzBjE8lHzI4IL+0WcLZqPRKCAZ1iUiKoqUa5mxyuVCPH4fumWwkRmZWq1WQiHkIFEHzruc3VAcyK43XVBRwd7s9fXNed0ch8p4uooDOYJ2jFgrpKcQqgLEOiweMfP0sxGnj84zRlGwSYmxmWfMUfw8b2ZpkY3j4+Nykk6z2Yzd3d3CzJKTSmK9nTaeycCe5H7nYMFEMcaWSf/P+JtxM6ifTCZxcHBQTug6PT2tMId3CaBm9jRiBujYLW/nE9llfp0GlcG9D1SIiCKD3IP3GSdHZJgzOwb1er0wLDi+tVqtUkImEwyZgc2pWpldpHFtdGjWuehErm+wyzpBFvjNiVCknlHW7PT0tGye5LMmNwz8kTly7AD5fnZknL7acBMdMDNFJGJvb6/oW5xtytCRGnZX9G7E7VxUmpnuiGrKEsxbt9stOjiDmzzfk8mkAuQYu9XV1Xjw4EFsb29Hu90uuiHb7ogoeZROAbAeNR4whrH+43e2+xmnuMH4mvziewamyKx1esTNmj08PCxsJeOytbUVV1dX8fr160J6cT8/g9dXRJTIClHq3d3dW+XOPG7ZFhjAApa9vv0Znun58+dl38ab2juTB7OQMRFM3traWtnZhTJAcFwegYV1fX19q/yO8xAIE2GsBoNBZZOJN1XgYTEo9I/fAFIMOwYSMGfFb5qbfvn5UUD2AGw8siAzPvlz0+m0GF/YJ/Igp9NpbG1txf7+flkEMKYbGxuFDXKCv1mA8Xgc29vb5VoII4sKoJqBuIXXr2d2AKXPKRTdbvfWM94VI59DpBFVEO0KBS7n4ly4zI5TwsNzyfX5PHOyuroaX375ZXS73cqxdhGzXEgWLCDN98Qwjkaj6PV65Z4YWyoqMCeEVukXSog1x/MzR6yBiJkCySA+R1D4LPKFF0++J8nvlBQBtMJK5ZAXzDXreXl5uaxjrz+Hq0ejUckh5TNmQgG7OAT0Gcfh4uKinCD1+vXrimLNSvtDNjOnEbc3qGbWEEeCcfLaRT6dXzcajQq4Hw6HBbiNxzdHGRNF2NzcjL29veKkmACw47q4uBjr6+uxublZ9iMMBoOSv8YzOWI2L1eNn0yMZAPOaziJXDunvhCdMKg1uBiNRvH999+Xzan0C3aK/ub1yRjYyWMvRgaLBrg8W9ZJNt48G0wX6XJ22iaTm7Q1oot3CaBmmxgRJdXErLHnEgLJG2l8HetkpyKxqQk5QQ7v3btXojnIgGWYcUcvgRVwzLPc0Q9kJztS9A3sYj3HdzOLaAaTfvE5SDfWtTGInf+Im3z0vb29aLVasbGxUaLUdlatt/1M2HX6xd4L+uhITAalbp5XSBWvMUgZPnd+fh4vX76spFTMa+/cJOVmdM97y8vLsbe3Vyhi2uXlZSV3MeKmHALfx0BOpzOaHiYPhXd4eFiOS/SDwjhhJJ3vYGVwdnZWAC6Ch2DzPPQlYpbTasMAkPBEZlaUv7O35Ovbq6cfg8GgHAu4trYWDx48KDVjUe6uhABAouxGvV4vJzEADHZ2dipgg2bW1orBC8bAywuU8BMGrdPpRK/Xi+FwWDEqd0lR5pBhdizW1tYqSd0GdNTxdXikVrvZBOVj2rg+oNHnFY9Gozg6OopGo1HC9YAo5iEiClOaAUcOudB3Mwp8PytKZJ3GWsvz7DAPcmBl7+9jpM2Yw3qwK5+ayBFRjurl8yjTDJxIGbi4uChVK+xAWhbNUDUajXImPErYThl/5zAwqT84I2b53sR2/LGbQYzXZMRsDHgtGwtCydZjyKhZDKcYweZzD6oEfPHFF/H555+X3ey8x2Y2l+yp1WqxtbVVSqtZBs0w5j0EETMjOC+U6/WHrJsk4TNcj/nnGsitx9B6+ezsLA4PDwsLzDGNHGVq/W1QA3Dw+iG1ah5riPxxLaco0HeuZd3NCY2WZcbw+Pi45CDeBWIgyyp/G+AQNWL+yCE9OzuryAY214SSG4DWtdXpA/NDI32J63JNp6gQcYDYyuvKzj1yapmwHJixz9FWg2zbJq5rbOHn9vqYTCaVtEBIqwcPHsTV1VXBTCYivAasx7FX0+n0FjngOfDrxhfWMbw2Gt1UdsKuQqb5h/0JOcc3t39RoX4v/ogbZYjxRQFC3wK0mGh2P6+vr1cUC6gdo05OBSESEn/X19cr3qnzbgBn9lwYeLNaoPXsiWYBd34WAmRGlO+x2JhsKxyzBdmjZAwvLi6Kp7O+vl5O3eF+XHthYaGAe0LRhNI2NjaKd80GE3uYWUEawNPPiKgU4p7HWjDGzOuzZ8/mlvC4SyyUm5UnxoVd5eTxnZ+fR7/fLxt5fPoWbAqgkXAy4+tSOisrK7G1tRX/6l/9q/j8889je3u71NzjuoPBoDhUXDMiSmiPo2RtYJFne+coMs85ype/DdA872Yl8pr0e1zLmzXo1/HxcZyenpa1R1UPxgs2NSJKCDUzRjhtbLY0e+Y1zXqAXWi32yUXG6fWOsdzz7NTDgnnl7CpZeQuNCt9v4bCJzfZBhom3XOKPoMR9djwecKkjBObW+/fv1/qVNtxox/WLxEzI4+cOLeUz1qv0YeI6tGUmSCg2ajyHTsiZkYxxL5mBnKTyc0GMOS0VrvJ4UcHWJ9zP+5tMobndgF69992xM6H81h9Lzb9En3gWrBQ9Xq97N72oQwfumVZjagyh0Q1kEfGw9U7PN+whegik0vMNWX4zDhav7ChFbAUMZtH0mJMUETMNtR6gw9yy3NZLnKfLYf8mAjju2b5/R1kIYNik31UR+FgF7AGFSd8Let3O4PuN3ViHc12ahV9yevRzqNxjdMXLON2TDgi9W3trYX6MwvnxsAxyRSHx1Dba2dSWJy8l0EfO8+WlpZif38/vvjii+h0OjGd3pRBIfTkyTOYQxnxd6MxK9MznU5LvoMBaM6N4j2HwzIAN1A3Q4FitrdrRex7Acb7/X40Go1yTODGxkYx7hcXF5V8DgAqHj+bTlZWVqLf7xcGyt6aFaIXihfE+fl5NJvNsvFiNBoVlip7pKPRKJ48eVI2FTB/NoZ3oWU2OAOWRqMRn3zySSl1wTwvLCyUkCZjTqmZe/fulSR6h9iRI2QT8LS+vh4HBweVDS04I3we8IdSnU6nsbGxURjHiJns+Fn8PJ5zrmdFko2kc0odPmLdsDaQc0c7kGueh/p3a2tr8dOf/jRarVY8f/68gAPkCeBKeTMcL69lxiUiKiDbIXrrHZ6djTnj8awOpefewJxTwwaDQeWYvzymH7plJs5OJkCI/2H7cfIBNeTr27ByLUebcNyRf1hay3q9Xi/kAcfIIrPOxcaRZcyt5wFT3CsbSfSIa6z6dcbAup5xyODVxtlrzIaePL6Im3rbDx8+jIWFhQJakamIWT1Ms7CQLtPpTeqVQ9jYOeTX6y73w3M7nU4Lq0+5wIgo+zNwIIjGsE/B4/ihmu2AX+NnY2OjMl+k68HcRURFz2XH0U4bjj5F+uv1eoUBBKRZDh01YF6ISJKKYrYxIm7Jo+fX4DMTXZYTV0DJ88T1MxnIc0NKwAKjuygL12634969e2XjMml33Ju+GdzmaITTIw2keU7wB/qCsYBMMIFwfHxc9nBkufCYHB4exvPnz+c+t9t70V0WCv+9sLBQ8o0YAG94YCESCgQtZy+ZBUq4MCJKsjq78NgEhWJz2JyJMNBgAHd2dirnxHojhXOWMmNqkEkD9NoDsrdmIbbXx/XNPp+dnVVYJow3YwXQX1tbK7sROW0Ko4+Hww7c/f39W940Boad/nhDPMPV1VWpuUoOYK1WKyUgWAiMTa/Xi1evXhU2cJ4SuUstM9j8+ChGy1DEDPAx1wZ0KMaIaukQsxuM4fn5eWxubpaTqmxgrTi5NjLSbrdjb2+vXCeiuks0hyxt6C0bfMYMl2U3p3vwebOnHhca/aTG3ng8LvmHw+Gw5MxaKU4mk1hbWyv5gNPptKIYW61WOSYzK3GH0gzWGZMHDx6UzYGkojDPlt2IKJEWnI7Mbt2FNg8o8zzWudYtGC9vGkNecMyzkQSkAYiQXdIsyIOm4gpzSpiZ8c0OYLPZLPrYQNNhT+5HP/w7g03+NjvmdCLLupljP6f7z3fQb5RK3NjYiKOjo/LcXJ8x39jYqJApOATT6fRWjmjWw153GXyjPxwJYaOWQVmv1yuAajAYxJMnT25FsT5kY47ceM7FxdnRzo70uMyh59FOcZ5H5hKAizw6muXcZzbO2cnBluNYQARNp9PKwRd+BsstOjiPvT+boxXWhfzGtuTntdPCb3APhfmxFcjQy5cvK5ti7eSagHH0qlarlShJthOsAZxZdC5jlyMyOG0AWv73+43GzdHz3377bbx8+fKdsvtWBtWLfB7DQHjSp2Ksrq6WBW5Pxzl9GVzS+V6vV9mxzqIl8ZnB5ziyzKgYEPd6vVJwmd2o9u4MQPgez4gC5Lks3J4sC5yVNQLJZPI3ggvFDeMGEK/Xb/KKrq+vC1Cv1WoVzxymGsFh1yMMFbtxGQ+MGvM1mUzKEWMWQhhBGEAYRE65ovICNT3JA2ZxZMX0oZtZQLPHvMfC8Rwa+NhYkA6A82Vv0uAUZUvOM7ICU8d3DZi5vr3lWq1WNgwQ7vMGN67lsC2vWSEh15ZxM6b0fx4I9bjYSDgi0Ol04uLiohwPy4Yp1inMAd8lNM39fTwvKTwOB3F/ml8HhMIO0Gc2ZFkZ0xdkodfrxVdffVVKpdlJuSuNtTvPSfC8+TPOj7axQ+cyvuhLmM95NVBhoNA9nU6n5LrRP2QQYgBbwI5+O3N2KOg36yjrT57P6QFcg37m/E+u6evYqTEo4D3qu6I/O51O2dyBfmVdUaScDWUR1XAp5Qxtz94kTxm8eH+A83szETIej0uO99HRUXz11VfvzOH7Y7b8vMxrrVYrsmliiHzmedexTeZauTnH3WMP2KOe8vn5eanwk5lPO7Hoxnl9elPLaQP0FRuSn9ljZNKBv5F99JKfm3QcWFI2pdbr9eJsofPIo/XeGRNuvq5TuHg/E3deq8Zdmd2G5MJh5vqMfa1Wi36/H7/61a9u5Q/Pa+9EFXnR0xkeAkUFBV2v18sOM3vMUMkYfhtc/j88PCy7KRFeezc8rPNyfA+zm67TSG6KywDlvBYLl5klvzfPYJjyNuvk62Umgc1P5BnxvHzWHhyhtidPnlSAvYUIFrZerxcGwJ4iHqY9q3a7XcpwGHBbgS8uLlbqVFLP9uTkpBj37LjMY38+RDOLksEVi5vd4BGz+rTkhk4ms/IeBncYdRRJRFRKhdVqtWJgcKxarVZsb2+Xc7odbajVahWFSPI/wIq+eR1Zadhjz7LqscjA28oRMGD5nufxs6ZQgIQXSU2BjTPbx5onJQDHJjMoGxsbETHb+Q0T6o2NRAMsp5yQwhhEVDc8oHhZC7VaLTqdTjx79qwoyAyAPnTLzKR/cJJI7zFoyzmXNvI2BgY1k8mkOL04ud5wNhqNyolbtKx76RthWxMUvo7BgFsGWbYzfm4z+/nzjopxD69RA5OImQ6GIGg2m8W5stF1nw2wbWum02msra3dSsWxA5+BSWbX6CeMnsdpYWGhpL4tLS1Fv9+Pr776qhyAcZea+5MZZBx5Pgd2YKOkgfg8oG/bBMnDfAG+7BRFxK06x9abft21PiHGGH/2IeQoi9cZsuDnoOV0EV8nO2EmUnjP90M/4hAuLy/H+fl5HB0dFaeca1NC0Ufl+tkbjcbcQ2fcHHXwusiN8fd4cF8OUWo0GjEcDuOXv/xlca7ehRfeyqC6k0beNor1ej36/X75LEqKjmJU2IEOa2iQyPunp6cFhfvoPUJMlJlZXV0tu/5coskG1obaANUKnf7CXhnkmjnNHr3z6xwKMsuBofDrgE4fs5hPf8CQs2gWFxfj5cuXhanyPcnFWlhYiAcPHhRWGcBrgIpCJMSK1+bae/YoATGw4szRcDiMg4ODCtjLi/QuNGQiYsb2WPEBqqhR6oZMEerECFtp2bFBcVgxEKYjzwnGygys81xRuKw1jDxyRp5wxMwxdE5Q3sBkOef6Nuz0xWFJK83MnNqZG4/HMRwOyzn2hNd7vV5Jc2DdsLkPh8d1C/286+vrcXl5WRypzFLTP691lDRRFn6beSJHEiU8Go3i5OQknj59essxvSst6yjaZHKTg86Z5hFRnFkzHFzDBtSya+caXYQOcMUTUjCWlpZiZ2en4qi5wZDArqC37ADwXW8uiphFmvzcZrjyWJghzoxVbowJDZmZTCZFVhuNRmxsbJSd5LYLHjtC6cii86dXVlZu1aW1I+n8VV63nKJrsKfIPc9/cXERBwcH8ejRo2g2m/HkyZP4xS9+8c4SPX/sNg+csmZNLGHDGGOcTta9NykZJ/A6nyE1xVFCdoezcSiimtpkfZtzgNHJli/0rO0uesPylYGlQR3fYTzetE/A4XC+a30+mcxOxASc1mq1ODg4iKurq8JS89zIGM6BbTv9xCG1jcjsfh4n+usxMKNqfAh52Wg0ivP6/PnzchDAuzDDe5WZyp6vgczGxkYpbULZBzw9e9qAKdcxtcBwfCYPRD4qxf4pGcUEkKMTMTs61UwqJ3G4CC/XseKLmBlrFJDBKAaUyTBg9cKwwNlLyyEFFhqLa21trTwL4SNYvPH4ppbk6elpZQFyHysBjAl5ujYKfJYfM68OUzG3jBfXxlhdX98UnmcTTAbm9krvQvNiMYBmQdXr9RL2hM3zs8PisZkJB8fPi+Lyee/r6+sFsDpfdd5i5N4sdHJ0AHrILV49spdL+NihonlO592b/nu3tWXETG3EbHfrZHJT544TyNrtdjkSEpkyc4rypVoCjWcivO9SddY5dg5Yi3aeck6i1wc/9PXs7Cy+/PLLODo6uhUZuistOxn8ds7XZDKppEvYsFkWDOpZy9wDOXIeNGF7jAk5j/mkMTNKOCyea+bJwMLPYqaT/swD5pm5MkixnvY1Mkin4dTXarU4Pj6O8Xhc0pkob2gyxvJkWc4ECJUhrDvyfNiZxeCbOWaebFtoL1++jIiI3d3dGI/H8fXXX8c333xTmeO7IMPzojcR1c06BjEG6xG3T2OMiIqMWT5IG6Q5euv70TxWvo8dKNhH5ii/T394nf/tvGdmNDsmPFtmWT0+WebpVyavms1m9Pv94ujzXewYDkEO63tdrK+vlzHNz8TrmTnlNdY4zVVUjHWIvi4tLcXjx4/jf/2v/1VSEd8lt++1iz+jaofS2u12mQRAKUKCsbJSnfcaDBQ5FGtra+XUmNFoFLu7uxERheXkfjAuDBTXzQbLANOnbph6RzFkxoY2L1+VycyLgXt5HL0AhsNhMZiElzA2Z2dnZXFiJOr1emxvb5e8VAsaaRBumbEw68HrMNvMmRcWSsPJ0Lz37NmzUlnB43MXFKQb82J5YTx4bTAYVJK/mZdms1kxKrD+yBvXgr2MiDI35N/Zk/fuZedlwjYR5iJ0TqkpFA1gZG1t7ZZC5J5OIaFvZhv5m/vymhWxZTozArDCGAcfsbe0tBTD4bCUfMHJRKZ5jTWNQeIeAFQrzqwvWBv0eTqdlrG2k+jwvgEqOub58+fxq1/9qlJaKQO8D91yX9xH5IKc8WzALRtOLeL1bGx4HR3p3DHLT8TMocjGG/IB2UAP2/ky2AYomkX0vBtgeC4jqrlxGUhmcGqdhKxNpzd1t4nWsdeBdCvrcfcnb+jB3nAN2L8MkiA+AFPoA/Sr2SaeA0KHnNfRaBQPHz6My8vLGAwG8Zvf/KaUwnO04q60DOAjZiDfDTCV58zfZ4xyeJyNUBHVEmXIlWUC++fr05BNz2cGpXbg6IPH23JKf7ANlic/Z2ZV0V0GhwbMkFoGn/V6vZACBqgAWOydS3rxQx9JLbRDyXjagfc6z3LnNWLGlKOvGburq6t4/Phx2b1vTPmm9i+qg+qHw2M5PDwsZWOsBLIXwMMDYFGMPBieLaB3NBqVXDQEsdFolPqQk8mkTAKfgYrnOgidQ6H8OFxJn+3h0Gf6l71xhCYr4azg8r04f3Y8vjn3mR2v7H6eTmdH5gEiuZ/DCdyHHFuPPT9OkKYfLCDyemFts6L08yBE/X4/njx5UpStF2kO033oZnnLSrxWq8Xe3l5MJpNSB5VNN+y4dxiE75q1MftxcXFRACrg9vr6uqS+AO5gQGFiXTvVYVOYRELwrBmHBJF/DCMtsxT8YFy9JrPiBwgDqM36Gpy62gblyFirPMt4PC5jSjK/ASRrFCcN0GLHljJH9GNzc7OkrtgxNeN7cXFR6kNmXXB0dBTfffdd/PDDDxW5vosts7usbZwfO8F5/vlt8JfnHL3FRhNHToiCUcMW/cxc2HljjukzThepRAAs1mEGjQbS1jk8h5+N/zOAcNiWftAXyxrf4YCHhYWF2NraqoATOzWMO3PhaggRs1JoW1tblaO4sY8G06urq9FqtebW6cWO0D8AxeXlZbx+/TqOj49jbW0tlpeX46uvvoonT56UfPd54/qhW3YqjBciogJUs5wj15YJ20/LjKt1ZD3Fd3GqyO2nX7alfJ6+cm/6YttGf4g6ZXDtvuc5cbicZzBpwGdMePme1HuOuMnZZwwst67igV3CCchRUmrNe53QMj4zOZdxjseDyNnBwUFEzDYMc/zqr3/967IBk+d7W/sXhfjdIRiMhw8fVoqcU8bIymw8np3bjlcYUS2Jg3Ei39T0vcOn9fpslzmTiTJEgXiT0XQ6LYY+YhYiN3ChL2a27IHN88ytQL2omAzeRygx+D61hbwcPGMLhSly2CN7cLTl5eXY398vLKrHHcNiNqlM/P/PklKzzCDd7Ar/D4fDePLkSXzzzTeV8IsbBvQuNHvh7heyvLy8HO12uxjTwWAQ3W63okSR5YjbLCIyi6zCHrZarTg7OyupGpQy8ulfOFZsGvA8c23PFcYRGXXaAHNh5YFSoaGYUDRmZS1z9MUhXANxvuuC7ZubmxEx24zAvc7Ozso6R87pA/LF2mg2m+X52PQEW8iuUHKlmEMDceSblA0zKMwhmw3/6Z/+KU5OTm6xPJlZ+ZAts4Vev8wtepM5Za5Y83ZIfT591ms51EauuQFFRHUTBDLDJhUcABsvDDEHIZgRt8MXMWP85xEbBtw0/kZmMqFAf62z/ZsI0OLiYqytrVUMP2FKIiUGFQZcjB86lPsTHUAP0xgbs89evzwXKW3Yw3v37sWf//mfl1qdT58+LTWouU7Wwx+q2cbkuSB1xKlJttFZrr0xyfYUGXKJL/Q5tpWoKPrZ+tUpIo4UsI7oP3JrcOwxd2TRjhHXmzcneQ2yJhzRsm70uoDlj4hCbLmKAc+G7UCGkEOu6zxXyLHsBGbnj/45lZL3KHM5mdzkx3e73Xj8+HFcX19Hs9msEH+/+93v4unTpxWb9S6d+84QvweNv1Fe0+m05FASthgMBmV3LwYdg8NvjCNKjVD2aHSzAYoBxJATeqV8gXcCLi8vl+tSrJYBYVMWisQ0uGl/GzsmBKVqIcdAsiED4eL+WZGhhKyUYKCm09nZzYBTwsJmyuwNwmjAFJP3xPGCw+GwsFW+P8LrU6pYIKPRzcYSH/nJAmGhwap8//33cXJyUr5rObmLnrwNkxcKi9KMCMzpyclJCRtjYI+OjkoYzyXOIqo7PGGmUb5XV1dxcnIS29vbRa5xqAaDQUWZRMx2Mztk6nmg39lo+xo8H8bCbAP3RrH482bokAkbGa6PQmLTI8bZ68yefUS1yoTng0MmyKeinBa1j7kvY2G2N69Vswg4A8w5DMof/vCH+PWvf10ZdzNmd8XQm02KmBk89G6z2azkotsIMS427r1er+grdBrzRG1qwP1kMilnmA+Hw8KIWtYMfDm/nvF2P9CtNvzoJozvm5xcdKqBbQa/doZt7Ax6/Do6Fh22urpamE/6TA1HIm9+DmQUZ6tWq5XyUv4c+iRixhY6QoKjl8P6i4uLxSYQKiXHejKZxOPHj+Mf//Efo9PpVMYuE0gfqjHWtg307fz8PI6Pj0tKIGwfwNAOCvrj4uKiYrMYB2wd6xuHwvrERE+OrOZUAzbA5dq+zE9OnzBbm6NaEdW0P6+FiJmDbjAPFuHZrNu4HpGtiCipXqQr+jrum4mO7KQ1Go2iY1m3jjrQJ5MB9J/vkGoAyfXDDz/EdDqNvb29EqHkWU9PT+N3v/tdiby5L29r79zF7wG2YHFaDjk1FiJYQfJ6XF6Cvy8vL0veGfmn4/G4eEIeFEL4KFUEwoJnz9yLHqG3wmBHvMGpB8uGLyKK95fD2HyP56ZfZgasRAkzYBQA1xgHFKd36wNaECjntWCM2WDi8kYeI5Th0dHRreNQGRMA6oMHD8qxZwasJycn8f3331dCxfTBsnJXGFQvTCsIHBlkCcU4Go3KrkhCRw71uNh+ZnNg7Bl/drTjdfb7/UpReoMn+pjzpvgcTI6ZIj4TMfO0kX3+517MPYnqlnVSQwzS6BcKy3MKUOCUK2SJ2pGsPefcsr65v4F5rXZzugz9n06nlc2NKE5SBoiUWCkbTNsRxMCTpzoYDOLbb78tjDbKPMvGXWpZRpDJiJlDkPtsthJDTqTAR0qih12Gx6X82KhJLcnMPjEf1pseR3LkkFuAgYGanY/8jNapEdXcU9YLcwgJ4uiA14RJCaJ99fpNeSly/gHiNPcB2VpaWioEAvduNptF5iEHYLwgaQBgzuX3dRkzP59BG3rkV7/6VXz11VdlXfh7d012AWgAvMXFxRLuZeycO84cmWHFYTIDyOcc7UHOrq5ujlK3rmSNcz1en6c3idCaxbfTkZ2WDFyzXUDPZUfMucd+JuujvN57vV5F5vjf826M4+sbsGPPnPbo2sZcM+8TYCz9PgTZeDwuzurW1lYhcJjf09PT+OUvfxm//OUvK6kp3PNt7Z0ANS8cXqMDANCsXOwpY5w8YISSmEAzLv1+v+J5TqfTcuynjbcn3EoJsMCEohjN1vpYtGwI3G8miXtk4bLgZcWRlcb19XXJ+UTh4fG9fv26sEeAp8wYeDECVhlXK0CH4HkGFHLe/MRcvnr1Kmq1m9xMDBks19HRUfz85z+P3//+93NDVTaUd8GTj5hfS9GKYDS6OTowA0Dk18AUGYSN2tvbq8ghucPcByWLbJ+cnBQGNqK6iY255TuwVTgCfIfwGCxAdqIyg0rLjKvlMith3vNa4B4Y5/F4XJ4XBeV0HIyBPXkMrBUn18bJhZ0mLITsmul0mS7rJUAoskk4jMMvGo2bKgv/9//+3wLI5oXn7gqDGnE7jcHrynJaq9XKZj9k2eCdOV1aWorT09PY3t6u3MfnbCNrjUajsOQQDsyHWS/GPOtEkwTejEmfrJP8mo20AQ73sFNhJ8xgJIN25B+DS/oN0YR+vx/j8az8FekQ6Ghvuun1esVW0T9K+rl278LCQrEvBikmRJgrQBnjD3CzI1ar1YoO7na7t54xEywfurk/9BWn006DU5L8eQgcE1J2vKbT2YZJ5tGMu1lKA/7hcFhICOuoiChRTebVKTKWScuqn9f3fJMesf22s+Lnm0eaEbXC8a7X6yWsnvOcfWSr7+kNTgsLN/WO19fXi9NPM9bxWvPmJ5eV43roWmMpxuH169fxs5/9LA4PDyvP9T4R17cC1KwMMtvAwqQzLGw8c3JFPdne+IOhIOSIZ46nMxqNYmdnJ3q9XgG1XMdUNBOAIeV9BgiQgZB6MwafyYsEQbLQ2XNmDBDoDNwMQrju+fl55YQW6loCXGu1WqmnV6/XSy4YYeOIWRkp7ru7u1uEDEVrj80AxOUqIqpHmrnPCJ6F9vnz5yXPktcMHhiPu2LkkVV7u/zGOTDwd2gPNoR5M4O3sbFR0k2Qa8YFQ8YmjNXV1RiNbnZfnp2dlUWMgeQnIooRd+TAzWFIxtgMvWV03hxYbs1aeZ2w3vlxuJY1c35+XkJLa2trlc1gLghNnwEjOFRmvzjprV6/OUHt8vKypKuYZcnOnp9xNBpFp9MpoSazIeiCfr8fv/71r+Obb76pyGsez8yIfMiW+8Q69ilNGBNyeJ22YQYHOcWwsQHTjhh6ZmFhoYBTDCPjGjFbC+hiy4pJCfcTfQNb475nssFOvp/TOtcs3DxZ9/pB1hkPmCczvAY+82yC06rM3FLx4ODgoORKR0RhVekv1+L7Bt8AHhhGM6eM2Xg8jl/96lfx+vXrSr/ymrgrLa+vzE4bjDH+fJ7fzI1TO7geetrglwgY0S/WCmPs66Fns3OPbsLZMJnhfs9rzKnnxaF7vm88wPdoft1OJhsNI6KAULPTjC9ybKebe8BiI7srKytlb8/GxkbFwTf5xHXsVIDXWOPMF7INwQVe+vrrr+Prr7++Nffv097JoPJA+YIof+eFoqAQCB7G3uPFxUUMBoNYXV0tHhL5pwwEA8sJPbVarbIJg1AUDKxzUvNpUTSua6aVhZ6ZKISNfjMJ/iyT6XtkZWrPnaRu+kquEXmyw+GwGAJYNyvIiJtcGUA/Cn5ra6t4hpxmYuNmFoLXIqIyPo1GIx48eFAJPUVUz+998eJFASlWJG53BZxGVIEXzSAVZeYNHmaL+OHzeKaZIfWJSq7nhxKFhXE+H2OIMrRRQmlj2O0Bk+ZiJjYbdzsOyC3XM1sM4wAL77HivhnwAka5Fgc4IEdsRkTBIz9WwmZoSfbvdDqxsrISu7u7xTEFsDsVZ3Nzsxgd5hNWu9VqlZImGfwPBoP42c9+VlITGGd0FK/dJSMfMT+32EYgO5NmltEDnjsAlWWQ1CUfJsGYjMc3O4dXV1cLy40+wBADsOyos1HFoCCzoQbHdqJpGTzQrG8jbodL8+dxBrnfYDAooCaTJVm/va0vo9EoWq1WKf/WaDTi6OgoRqNR2ZeBjfO428F3NAvg77GlvXjxIv76r/86jo6OKuAjE0YfupkIiJjZGnQRZJZZU1JVTLr4+b1WDRSdO806zoeEZEcW/YwNtkNOGp/75ea5sezbxlp2AOY8g23qm8CZIx4eT+Mj8ALXYoyxOWaSGQP2B9Ennhf23mk4vnfGDOgfGNQcCeE5Leffffdd/NVf/VWFofZ4vEvnvhWgmlGLiApabjQasb6+/kYvCEbKJaUow3F8fBx7e3tFsPI1AA7T6bTi8TPAGDD6B2gjjMhnmMSImfG0cDFRTLDZAANR3neCNeNiIc95LXwG4en3+0UhkjMaEWXDCawcmxr4//z8vBgP7nVxcRGbm5vxww8/xObmZuzu7sZgMCgOg3Nl3FeDLvq+uLhYOYbTSmI0GsXf//3fx/fff39LeVsZvc27/BAN45zZN9r29nYFpKOUCIU4385gFTmxp47CIwWF/L3r6+uyYQd5y7v2Hfq044SCsSeKks15VLxmxhMg7TCs2zyHyw4VaxfZ4Tps6OMIYtY0CgtjPJ1OS2oPlTkYI+4HwG02m7G7uxuLi4vR6XTKJilY7lrt5vzmfr9fwDT9OT4+juPj42i1WtFut0vZIGT94uIi/v7v/z5++9vf3op+RFSdlgx+PlTLutRRAPScc8RIl3LuOfOJPrSxQXd6bwCpRjgzXv+np6fFoeNaDnUaGHNPb2IxE+XXDC7nMaWZ5QIQ8Bxcg/fz6+hdf8Ybal2hws0G1OADvcCYttvt2N3dLY4RDhXAM2J22AcbSfjuZDIpexBcE5W1TYrVaDSKv/3bv43vvvuuzItlgnYXZDc7GbY/to30G50BSMrMMo4UTgbzQoSWeeRatnOwjAa21gu20fSd/nDPeXKdWXvLLPfgek5J8X38+XmkDv1jnVFRwI6d5YDx8j0yccEmsogbbEbEaTQalUgte3OYK1dKiphVADDYhqRhDlhv7EH6r//1v8bz588rLHXu29vaex91asaRieP8bTqIoPk7AFM2TC0uLka73Y5Op1MGnF10ZvrIa+h2u6V4Ol6vaWQDS5Q3gzXPW/WufAQUxWvlYQ91nkdt9sKhc4PoiJnigFUyGMVr5nxylCcCA5sHGzWZTEqNwnq9HltbW/Hpp5+W0B2hJcaIezAfnk+UN33wmDEu5+fn8d1338XPf/7zW3mGFrZ5ntGHbu6T5w8Q5RA9489inUwmxelBDqwE7F2aLWw2m2XeI2Ygz8fLRswMZUSU3E57z/ZIzQT42lyf69lAuX9+djO/Wb5tGOzUmQnAeQJYLy4ulg1SsM9mqLh/NqY8x8bGRql5fHx8HBcXF3Hv3r3Kjlbma2Njo+IooLBXV1fL+eXdbjcajUb57OnpaXQ6nfjLv/zLUqbNDFQGInepZfbacuCz7r3enGcGGGAzqoEaoNRpAcwXr1k/nJ2dFR1sggIw65x+rkX/AcH0x2vKsobMGwT4+dDN6LeImR7LzNW88ZtOp9Hr9SqbqYjKEWK3kecaMJxcgx/y+NAxpJRksOJcQzsO9Xq9nJ8Ou20Gi7H5/vvv4+c//3mF0cvr6a7o3Qz4DEQzUIyIW/rMeszOEPNsG4i+8sZJwCBl8M7PzwuJ5txT9AdhautORwNwtDPIpK/+OztIdrQyQWAW1qkqJrr4DBvKkTkTL/QLubXMW/Zs781weu8QUUVvZsx2nr7yPlEcYyenIP7d3/1d/OY3v6lEJjyO7yOzbwWoNmQIBxdlov2QgB28TH7YTQtIZZdzr9crJVMYOIw6oRK8oZOTk+IFwN7mied/D4AZTC8C+sWAOTfEk2Zh5BoGG4yBk/y9COgfin8ymRSDGhEFvKN8WTQoQJfjsoBsbGzE/v5+EUR26kLD2/P3QnJIe97O14ioeE5HR0dxdHQ0d3zsDSKo7/KI/ljNSnveQmDRM48sKpwePHTGhlCoWfGIai1S2ABSNwCVhBJ95vl4PCtqbDk1GM2A7PLyssg9LTNF85RlZkEdNTCzNs/Qe933+/1iOMifPjs7K7JCqor7hsHIjiu1TVnPlJpChnD6DFRQntyvVqvF1tZWtNvtwgCiQ7jHs2fP4vvvv68YOCveuyKvuc0zVjyv2T87GAZrPC/6yLqAMcVBYzzILzZwox+dTqdi3GiwJQaWGDD67kMTImZrhjmmTzT+Rn74rvUvz2B29E0sOOMDEYL8+AAYgyGzfA4h8zphTpczc+SJ79FX2wPGk/SJs7OzePjwYQFSyO/y8nL0+/3467/+68JAZT2WdfCHbll/ILONRqNEl2w7srMfUT0tEh2dZSOfPoWMuMSf05g4iRKZRhehS/ib8fd9HNb3ONthMk6wvo2oHnlqmc0ynj+DvIEbeE472BB1Hh+vJ56TCh70mTGYTCbRbrcrdsF2wljG4fx6vR79fr9EDrgm5FutVotvvvkm/uf//J8l/Y0+WzbeR/e+U7K9YO3t8jCDwaDCQHmB8hlet0Jst9vR7/dL2RmMnk/dwYAyUcfHxxERt8CCFQO/XRXAguV+WJhzqoFZCAsZE+sGSEUw6A9ghP7kvDzYZ5ew4nhM8pcMMizMrg+H10df6J83g9FH+m/wlZ/JffnHf/zHcixglguDGvpyV5goy61/Go1GOUrX8oUc0H88RML+GGHPLcacxcwiHQwG5X8WNhunaLyXmX4rID7HOFux8vl5ypDv0fyMETOG1es1Azc7phGznaQAEUJCDu8zPsg3sj8cDksqAM9GkWgMCeDS7AVrz+k8jIPvtbq6WhhdxvPy8jJOT0/jb/7mb4qSzM8FyOWZ74qhj4hbxojfyN35+XnZce6SO/N0GQ4CchAx26zKfHoTm8edkN/FxUU5atGOt20CfaDv1kkGadznXQyK7UBEVe4NeM0cmZVDtpEV61/eB6xaj/E9gKufhWdGP9sxtz7J17H+xEDj7JFXaIB8eXkZ//t//+/4x3/8x8pzM9bZpt0VR8s6g37i6CBvyBY/th3oLfSLHQPG1ydC2Z5Op9NKNCoiyql3duBoXi/8n1OhGHP/Zs1YH1kXZzmKiFu61s3Ms8fOET2e03qa8cyOQdbjfJbroqupsev7RszIRzsPjNPV1VW8evUqjo6Oyj1w9MB7r169ir/4i78op0m5P/6d7zuvvbNQ/5susrq6WnbcYmQQPAM6g1QGikW5uLhYqe81nU4roUH3gzOTCSehvBjwzJASzgeIEcYZDoeVXNXs1bGQMJbkdpmx4HldyiqzVowbixBjjjHAMxsMBiXHlFAYm8DYFONNDdzfm2UMqjEQZk/wRg2GaGtraxXA4lyeg4OD+Oabb8o45jmheVHclUafMjCp1+uxv79fSYrPANDKDLnCgGDII6LkQs1jPqkEgMLkb9iErNRwWAg1ojhQmjwHc5FD+Dxrdtb8/LwGyIBpNwthRUhjXZPoXqvVSv5SRBTGH4YYdiliFjlAVjH6sKWMHfPj8BLrmM/QzCYw5gAJGJDz8/P4u7/7u/j9739fMWYeM5oNxF1oGdxFzC/3ZweauTPQzoadDWfoNee2MfbzKkysrq4WpwHZQQc7vclyYxCbDZ6NHnPJ/axH6RPXy9/xMzJmeb37/j5VzDqaH487ax35InzP+LVarQrg5/npk6/Pdb2ml5aWSh58q9Uqcg6p8Ic//CH+6q/+qlLYnJYZ8buke90v61b0JLbYTjVAjJq7PB/OlfMgAai+H+NLBMdrgMNSrq6uyvGg1nFOwbIsmkE1S29ZiahuvrZM8vy59Jv1jPOOM0GE3AH+uA/Plp0mWgbggFETZqy1HIlAz+Z15LV1eXkZT548KemdTomIiDg6Ooq//Mu/jG+//baMifHVm2TlTe29C/X74Wu1WVkSkDXG3swkE2A2BIYQBdDtdm9tTEHhIJy12qy4OoPCfRkce9tZ0aCgHK7JnsG8RW7P3KEyKwgPsBWWvUYUz2g0KuFfvnt2dla+bya62+1Wwg1edOSw0n/ulyl5jASCYja4Vrs5J3s4HMb6+noprsv41mq1+MMf/jD35KgsGwZCd4WFssLwgkNpuNpDxIx95DV7yeTpwXiYCbJsmYklj9i7JB2CBJAaVDp83Wg0yq5UK7J5xpf7GDzTLz7H/TH67su8sctrlxSFyWRSNjBRaYL78cwRUfJxLd/I9srKSmxsbETEzGE9OzsrdXp5DQAdMUurQR+Mx+MCkgmLmjX+9ttv4x/+4R9KUfZ53jvtTczGh2o5D57mkB4/BkUYtOn05oQ/OzKkrDgi45216AeMcr1eLyd74YBMp9NSScT3ZF49XzxHRFXHZnvCejOoyw4S9+I9y63nkzGzXeDzjE2tVivEAvpgeXm5UroH2c9rFJ3KdwCuWdf4WawTLYPj8U2Kj20BIKHX68Vf/dVfxcHBwS0Qasd7XtTsQzbrpkxgzCOr3G/y2dlHkR0OM6nziIVGo1FOBQOfgDOQ9Ygods5Ak/lzGpLtmftpoGigOq/5+h4fM+7+HO8xRkSeGDvsEmPrNCjjEq7JD05nxEy3ZJm0k4+jzxjY6aQazf7+flnfyG23243//t//e/zud7+rOKxe8/Ow5Nvae22Syh4R9PNwOLwVxs8gFQMXEYVJzKAAYMbfHjCMDuV4GKR6vV4pQUPzIGdFyN9WOmaeaAyuw0X2+vy/AbIVGwqbvrNAeCZCda4jCzvGfZrNZhkbLyg2hgC2rIRhNhB2hNPGmz40Go1SD9Ce6dXVVXz77bfxl3/5l2U+bCzz2L6NnfpQ7W2eGwvfgJ6FHBEVRtuya1aFHyIAsAROoVhdXS0evQEEQBLlZ+XoMBRA0SCEHGXLJjKYS2HlOTJrYePtXCo7Xdy3VqsVsFKv18uxrcgGDhgb+0iJgHnzc1KWB5Dgk3YyG2pjllNlNjc3YzAYlM0pKFUOlvhv/+2/xffff18BuFaUblmGP3SzXqIhB2zqsIxkI+hKDzbiNvKTyU16luUPkIYucSoU6VfoNetPIlgmBbJuzbo0O/bOccsGPbNFvq4Bqd/L18obNVyFAzvF/7D8vBcRhVzgHsjoPIafz/NM2ARXPbDdxAklhPo3f/M38fjx47kOpMfNz3MXZHeerrXzY3AVUT0oxI6KZTuTRyaXeH1lZaWkYuFU1Gq1coIdDqzPso+YpUFZRzmaw73y3Hq9RVRTOAzE8lz5GeYRBHaAGJMsy1k/55JTBtJcw6wozmYGyTxjrmpk5p914woT6KXxeBz/9E//FF999VUl3dF98fjNw13z2js3SfmCHny8ZQAWn0PJkTzv8FFmIlnoZ2dnc71i55IgdNlDtQKxQsjehPM0ARJWmjZkBrgWJnvSDvNy7cx28HnvlmXDGGDdnjnXgrUYjUZlYxl9q9frJTmZSbbCA3hhNPiOd/iSJsE96Huj0YherxdnZ2fxF3/xF/Hq1asynnnePFZO+r4rANULOeJ2LTccArMtNIM7yxoGhM8bWJL/aGDgYs/Iw3A4vMV+42SYaeJ7li/Pgw1jLtDulAD3xxUdsmdsj5q1aDDDPWCgSFkx2ABsrq+vVz7P/bmfS6Hxup1WszCOmDQajRgOh7G5uVl5FsaLfPZf/OIX8fjx40rdRcuAdYM3x9yVZubG5ADNOeueo/F4dlAH42cnK+fOOQXKY8O92AjHWrEDjKwyb6RCuZ9sqHJkCVlz7h+v24myHPozdrojqrm6rD/PJf1hLeZ8fXLNGUdyw2GQ0a0GRaSnWP/ynPl5WK+M4erqaulTZqmGw2H8n//zf+JXv/pVxa7Ok4/scN0VvUtDhu1kG3BmubDNj6juITDjiuzxP7b9/Py8OBpeN3leXLXCeo7fzL9TFXmPdZWjU9zH9zTodn+su82cYks8TvOiDlwffOXxnOekRVSjuSakkLt59gyb7pMqcX7X1taK/r26uorj4+P4p3/6p3j8+HHJ8+Y7tquZReUzb2vvBKh50PmbIuM8TMTsfGcGggnhN2wqLBKLHyOXWUsG0CdWMdnkcfIag+sFn1kGCxiA2RNiYfYEZ+OVc+r4TGa1AIbsoIuosgc8k0POERHdbje2t7fLCRI8Cwsdw4GQZYE3IIXNctkkV0vgswC3tbW1+O1vfxvPnj27BdQsB/MMxF1rDvNFzDZFjEajW6COH9jTzPqwQF3+xGFC5MdyR47fdDotyiSDruxdWqnZu+X/eayRnwPlY4/YeXw8lz1lPpff4zvOtcVAG1zj7LGWqdhhoOT0EZchMgAzeMlgAgaEPDXuZwctIuLw8DB+/etflyMhafPk1ON3l0Cq+4rxYRyp+GGWzwYWhoP/+c3Y+3+MuQ2YHRzCpugy9KZ1mI249W8GCAZ4BgaWAbPnb2Kt8nxnIIgt8D2cywdotsPKtakkYYbYDCAFz9mRzvMbNKMbzZChczhIxTLPWjg/P4/f/va38Xd/93cVZtsghu/YFmfy6EM3942+8nxEPN1XwNg8Jpr3cK6tx5hD7C77SuiDgXG9Xi8YIgN56zvbC+Sfz/A8OQc6P2dmRg0E6bsJC0ePzJJ6E2N2QlhfJo18bfqSnf3cTzOZ80CjD3BBf7CHAjb17OwsfvGLX8SzZ88q4+TfXDPrhXzfee2dm6RA675hrVYrZ8aj1KwUCBfjedoweoJ5z+WezFjV6/Vy/jH/Qx+jdGAj57Grvg7KgAnKIciIKtikb1kBR1TD/TagNDMW5HnaO7PRR6AMBgFQ3uWI0G5sbFROOyLklxk3aPherxftdrss9BwyhvmASRgOh/Gb3/wmut3uXINuBTnPa78rijKiyiYioz7ZJStTPo+MMqf8D0Caxyp6TDwPvO/rDQaDyrGceWHbGAMg6AeOh50ws/UGs/ma5MzZEbTSMpvB9ZBF2CTYXwCgleJodHNSD7uSzYoidxQzt1Ng5867eJ1rNh6Py4Y+WGzro/Pz83j69Gn8j//xP+LZs2flvQyC7cVbhrO3/6Fb7idMkVNMDMKsx5ALMzF2jPy/jR8O08rKStFZXuOExTmgwQ5CDoljNyxHHt98beTgTXLoKAJgxvfxWNhg0yfLEuyY72eSBTCV1xD3d83XHG72Z9HLl5eXlcNmvAkw4saR/fnPfx5/+MMfyvGTXAu7ZsDq+1j33IVm8EifiJhguyNmbKg32WUMAV6AzEKOfJ2c5zydTisEA581WOOayJLtQ0T1iHc2rno9Gkjbmcw6xPLr8TEw5DXGY97rBrRZL3Af1oE3RTGW4B728TjVxPnudrq4nnFZJiUuLi7il7/8ZTx9+vTWes7948fR1vdp77VJyjdB+Hz6xbw6XQySlanLofikBwYRzx6jzGDnh/X1nUZg5mceuLZnywLICfr52e2hZACMsM7z/nh21z/NO++d6+qjC6fTm81TKEkrJbMjAFuzv84v4RqMAf2mOsBkMqPw8db+9m//tpxZzjPmsX+TIN4lRWn2gf+ZH4cpzVA5DzTLBPJjLx75INfYBpBDJWBXImbH7TkXFYPPa1aGzBvN90SJZoPlfptZp+/20r2mI6p5TgbYGNmIqCh/FI4LZ9O8M9nriMiKnS4YfTaG4SwBRB01yI4Zz/7kyZP42c9+FgcHB5V5tdOQn9FG/328+T9Wy4yH2XADUz8/8suYZmBuPYUudvqHZR35stFnfjnal1JtODxmsLwJJbP/XD+TCG7ZoEfc3r1vXU9/vT4cIUAPG1xmFon/kU/uZQfp+vq6hDdtu+gfTF82/nbCrG9wZH//+9/HP//zP5d0Lt/bdtW5jllO3hUq/WO2DJ7tkJrQsVNkkJefJY+lnet5Tll2umu1m3xU8qi5ZgZlyG22l7YjJh2sh31Nj4HXj/X5vLXp/52vmwE/42hSDAIAHcr9vCZWVlYqTKZBrJ+ZMSVcb5m0I/fP//zPBZx6nAze+e05mYcp3tRqdwVQfGwf28f2sX1sH9vH9rF9bB9bxHsU6v/YPraP7WP72D62j+1j+9g+tj9m+whQP7aP7WP72D62j+1j+9g+tjvVPgLUj+1j+9g+to/tY/vYPraP7U61jwD1Y/vYPraP7WP72D62j+1ju1PtI0D92D62j+1j+9g+to/tY/vY7lT7CFA/to/tY/vYPraP7WP72D62O9X+P0LyUc+ZOd1TAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "def show_examples(ds, fn, n=16):\n", + " cols = 4\n", + " rows = (n + cols - 1) // cols\n", + " _, axs = plt.subplots(rows, cols, figsize=(12, 3 * rows), squeeze=False)\n", + " if isinstance(ds, tf.data.Dataset):\n", + " ds = ds.take(n)\n", + " else:\n", + " ds = itertools.islice(ds, n)\n", + " for index, example in enumerate(ds):\n", + " i, j = index // cols, index % cols\n", + " axs[i, j].imshow(fn(example), cmap='gray')\n", + " axs[i, j].axis('off')\n", + " plt.show()\n", + "\n", + "display_fn = lambda example: example['reconstruction_rss'].numpy()\n", + "show_examples(ds_train, display_fn)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "def create_kspace_mask(kspace):\n", + " \"\"\"Subsamples a fastMRI example (single slice).\n", + "\n", + " Args:\n", + " ds: A `tf.data.Dataset` object.\n", + " \"\"\"\n", + " num_lines = tf.shape(kspace)[-1]\n", + " density_1d = tfmri.sampling.density_grid(shape=[num_lines],\n", + " inner_density=1.0,\n", + " inner_cutoff=0.08,\n", + " outer_cutoff=0.08,\n", + " outer_density=0.25)\n", + " mask_1d = tfmri.sampling.random_mask(\n", + " shape=[num_lines], density=density_1d)\n", + " mask_2d = tf.broadcast_to(mask_1d, tf.shape(kspace)[-2:])\n", + " return mask_2d\n", + " \n", + "def reconstruct_zerofilled(kspace, mask=None, sensitivities=None):\n", + " image_shape = tf.shape(kspace)[-2:]\n", + " image = tfmri.recon.adjoint(kspace, image_shape,\n", + " mask=mask, sensitivities=sensitivities)\n", + " if sensitivities is None:\n", + " image = tfmri.coils.combine_coils(image, coil_axis=-3)\n", + " return image\n", + "\n", + "def filter_kspace_lowpass(kspace):\n", + " def box(freq):\n", + " cutoff = fully_sampled_region * np.pi\n", + " result = tf.where(tf.math.abs(freq) < cutoff, 1, 0)\n", + " return result\n", + " return tfmri.signal.filter_kspace(kspace, filter_fn=box, filter_rank=1)\n", + "\n", + "def compute_sensitivities(kspace):\n", + " filt_kspace = filter_kspace_lowpass(kspace)\n", + " filt_image = reconstruct_zerofilled(filt_kspace)\n", + " sensitivities = tfmri.coils.estimate_sensitivities(filt_image, coil_axis=-3)\n", + " return sensitivities\n", + "\n", + "def scale_kspace(kspace):\n", + " filt_kspace = filter_kspace_lowpass(kspace)\n", + " filt_image = reconstruct_zerofilled(filt_kspace)\n", + " scale = tf.math.reduce_max(tf.math.abs(filt_image))\n", + " return kspace / tf.cast(scale, kspace.dtype)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def preprocess_fastmri_example(example, training=True):\n", + " # Drop the `reconstruction_rss` element. We will not be using that.\n", + " if 'reconstruction_rss' in example:\n", + " example.pop('reconstruction_rss')\n", + "\n", + " if training:\n", + " # Crop to 320x320.\n", + " image = tfmri.signal.ifft(example['kspace'], axes=[-2, -1], shift=True)\n", + " image = tfmri.resize_with_crop_or_pad(image, [320, 320])\n", + " example['kspace'] = tfmri.signal.fft(image, axes=[-2, -1], shift=True)\n", + "\n", + " # Create a subsampling mask.\n", + " example['mask'] = create_kspace_mask(example['kspace'])\n", + " full_kspace = example['kspace']\n", + " example['kspace'] = tf.where(example['mask'], example['kspace'], 0)\n", + "\n", + " # Create output image from fully sampled k-space.\n", + " full_kspace = scale_kspace(full_kspace)\n", + " image = reconstruct_zerofilled(full_kspace)\n", + " image = tf.expand_dims(image, -1)\n", + " image = tf.math.abs(image)\n", + " example = (example, image)\n", + " return example\n", + "\n", + "ds_train = ds_train.map(preprocess_fastmri_example)\n", + "ds_val = ds_val.map(preprocess_fastmri_example)\n", + "# ds_test = ds_test.map(functools.partial(preprocess_fastmri_example, training=False))\n", + "\n", + "# display_fn = lambda example: np.abs(example['image'].numpy()[5, ...])\n", + "# show_examples(ds_train, display_fn, n=16)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "batch_size = 1\n", + "\n", + "ds_train = ds_train.shuffle(buffer_size=10)\n", + "\n", + "def finalize_fastmri_dataset(ds):\n", + " ds = ds.cache()\n", + " ds = ds.batch(batch_size)\n", + " ds = ds.prefetch(buffer_size=tf.data.AUTOTUNE)\n", + " return ds\n", + "\n", + "ds_train = finalize_fastmri_dataset(ds_train)\n", + "ds_val = finalize_fastmri_dataset(ds_val)\n", + "# ds_test = finalize_fastmri_dataset(ds_test, training=False) " + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'kspace': , 'mask': }\n" + ] + } + ], + "source": [ + "def create_keras_inputs(ds):\n", + " return tf.nest.map_structure(\n", + " lambda x, name: tf.keras.Input(shape=x.shape[1:], dtype=x.dtype, name=name),\n", + " ds.element_spec[0], {k: k for k in ds.element_spec[0].keys()})\n", + "\n", + "inputs = create_keras_inputs(ds_train)\n", + "\n", + "print(inputs)" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "def filter_kspace_lowpass(kspace):\n", + " def box(freq):\n", + " cutoff = fully_sampled_region * np.pi\n", + " result = tf.where(tf.math.abs(freq) < cutoff, 1, 0)\n", + " return result\n", + " return tfmri.signal.filter_kspace(kspace, filter_fn=box, filter_rank=1)\n", + "\n", + "# def scale_kspace(kspace, operator):\n", + "# filt_kspace = filter_kspace_lowpass(kspace)\n", + "# filt_image = operator.transform(filt_kspace, adjoint=True)\n", + "# scale = tf.math.reduce_max(tf.math.abs(filt_image))\n", + "# return kspace / tf.cast(scale, kspace.dtype)\n", + "\n", + "\n", + "\n", + "class LinearOperatorLayer(tf.keras.layers.Layer):\n", + " def __init__(self, operator, input_names, **kwargs):\n", + " super().__init__(**kwargs)\n", + " self.operator = operator\n", + " self.input_names = input_names\n", + "\n", + " def parse_inputs(self, inputs):\n", + " main = {k: inputs[k] for k in self.input_names}\n", + " args = ()\n", + " kwargs = {k: v for k, v in inputs.items() if k not in self.input_names}\n", + " return main, args, kwargs\n", + "\n", + " def get_operator(self, inputs):\n", + " main, args, kwargs = self.parse_inputs(inputs)\n", + " return self.operator(*args, **kwargs)\n", + "\n", + "\n", + "class KSpaceScaling(LinearOperatorLayer):\n", + " def __init__(self,\n", + " operator=tfmri.linalg.LinearOperatorMRI,\n", + " kspace_index='kspace',\n", + " passthrough=False,\n", + " **kwargs):\n", + " super().__init__(operator=operator, input_names=(kspace_index,), **kwargs)\n", + " self.operator = operator\n", + " self.kspace_index = kspace_index\n", + " self.passthrough = passthrough\n", + "\n", + " def call(self, inputs):\n", + " main, args, kwargs = self.parse_inputs(inputs)\n", + " kspace = self.scale_kspace(main[self.kspace_index], *args, **kwargs)\n", + " if self.passthrough:\n", + " return {self.kspace_index: kspace, **kwargs}\n", + " return kspace\n", + "\n", + " def scale_kspace(self, kspace, *args, **kwargs):\n", + " filt_kspace = filter_kspace_lowpass(kspace)\n", + " filt_image = tfmri.recon.adjoint(filt_kspace, *args, **kwargs)\n", + " scale = tf.math.reduce_max(tf.math.abs(filt_image))\n", + " return kspace / tf.cast(scale, kspace.dtype)\n", + "\n", + "\n", + "class CoilSensitivities(LinearOperatorLayer):\n", + " def __init__(self,\n", + " operator=tfmri.linalg.LinearOperatorMRI,\n", + " kspace_index='kspace',\n", + " sensitivities_index='sensitivities',\n", + " passthrough=False,\n", + " **kwargs):\n", + " super().__init__(operator=operator, input_names=(kspace_index,), **kwargs)\n", + " self.kspace_index = kspace_index\n", + " self.sensitivities_index = sensitivities_index\n", + " self.passthrough = passthrough\n", + "\n", + " def call(self, inputs):\n", + " main, args, kwargs = self.parse_inputs(inputs)\n", + " # TODO: unused operator.\n", + " sensitivities = self.compute_sensitivities(\n", + " main[self.kspace_index], *args, **kwargs)\n", + " if self.passthrough:\n", + " return {self.kspace_index: main[self.kspace_index], **kwargs,\n", + " self.sensitivities_index: sensitivities}\n", + " return sensitivities\n", + "\n", + " def compute_sensitivities(self, kspace, *args, **kwargs):\n", + " filt_kspace = filter_kspace_lowpass(kspace)\n", + " filt_image = tfmri.recon.adjoint(filt_kspace, *args, **kwargs)\n", + " sensitivities = tfmri.coils.estimate_sensitivities(filt_image, coil_axis=-3)\n", + " return sensitivities\n", + "\n", + "\n", + "class ReconAdjoint(LinearOperatorLayer):\n", + " def __init__(self,\n", + " kspace_index='kspace',\n", + " image_index='image',\n", + " passthrough=False,\n", + " **kwargs):\n", + " super().__init__(operator=tfmri.linalg.LinearOperatorMRI,\n", + " input_names=(kspace_index,),\n", + " **kwargs)\n", + " self.kspace_index = kspace_index\n", + " self.image_index = image_index\n", + " self.passthrough = passthrough\n", + "\n", + " def call(self, inputs):\n", + " main, args, kwargs = self.parse_inputs(inputs)\n", + " image = tfmri.recon.adjoint(main[self.kspace_index], *args, **kwargs)\n", + " image = tf.expand_dims(image, -1)\n", + " if self.passthrough:\n", + " return {self.kspace_index: main[self.kspace_index], **kwargs,\n", + " self.image_index: image}\n", + " return image\n" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "# def BaselineUNet(inputs):\n", + "# zfill = AdjointRecon(magnitude_only=True, name='zfill')(inputs)\n", + "# image = tfmri.models.UNet2D(\n", + "# filters=[32, 64, 128],\n", + "# kernel_size=3,\n", + "# out_channels=1,\n", + "# name='image')(zfill)\n", + "# outputs = {'zfill': zfill, 'image': image}\n", + "# return tf.keras.Model(inputs=inputs, outputs=outputs)\n", + "\n", + "# model = BaselineUNet(inputs)\n", + "\n", + "# model.compile(optimizer='adam',\n", + "# loss='mse',\n", + "# metrics=[tfmri.metrics.PSNR(), tfmri.metrics.SSIM()])\n", + "\n", + "# model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [], + "source": [ + "# model.fit(ds_train, epochs=10, validation_data=ds_val)" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "# preds = model.predict(ds_train.take(30))" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [], + "source": [ + "# show_examples(preds['image'], lambda x: np.abs(x), n=16)" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "ename": "SyntaxError", + "evalue": "invalid syntax (892461145.py, line 21)", + "output_type": "error", + "traceback": [ + "\u001b[0;36m Input \u001b[0;32mIn [39]\u001b[0;36m\u001b[0m\n\u001b[0;31m name=f'reg_{i}')(image)\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n" + ] + } + ], + "source": [ + "def VarNet(inputs, num_iterations=5):\n", + " kspace = inputs['kspace']\n", + " kwargs = {k: inputs[k] for k in inputs.keys() if k != 'kspace'}\n", + "\n", + " if 'image_shape' not in kwargs:\n", + " kwargs['image_shape'] = tf.shape(kspace)[-2:]\n", + "\n", + " kspace = KSpaceScaling()({'kspace': kspace, **kwargs})\n", + " kwargs['sensitivities'] = CoilSensitivities()({'kspace': kspace, **kwargs})\n", + "\n", + " zfill = ReconAdjoint()({'kspace': kspace, **kwargs})\n", + "\n", + " image = zfill\n", + " for i in range(num_iterations):\n", + " image = tfmri.models.UNet2D(\n", + " filters=[32, 64, 128],\n", + " kernel_size=3,\n", + " activation=tfmri.activations.complex_relu,\n", + " out_channels=1,\n", + " dtype=tf.complex64,\n", + " name=f'reg_{i}')(image)\n", + " image = tfmri.layers.LeastSquaresGradientDescent(\n", + " operator=tfmri.linalg.LinearOperatorMRI,\n", + " dtype=tf.complex64,\n", + " name=f'lsgd_{i}')(\n", + " {'x': image, 'b': kspace, **kwargs})\n", + "\n", + " outputs = {'zfill': zfill, 'image': image}\n", + " return tf.keras.Model(inputs=inputs, outputs=outputs)\n", + "\n", + "model = VarNet(inputs)\n", + "\n", + "model.compile(optimizer='adam',\n", + " loss='mse',\n", + " metrics=[tfmri.metrics.PSNR(), tfmri.metrics.SSIM()])\n", + "\n", + "model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 1/10\n", + "(None, 320, 320) (None, None) (None, None, None)\n", + "(None, 320, 320) (None, None) (None, None, None)\n", + "(None, 320, 320) (None, None) (None, None, None)\n", + "(None, 320, 320) (None, None) (None, None, None)\n", + "(None, 320, 320) (None, None) (None, None, None)\n", + "(None, 320, 320) (None, None) (None, None, None)\n", + "(None, 320, 320) (None, None) (None, None, None)\n", + "(None, 320, 320) (None, None) (None, None, None)\n", + "(None, 320, 320) (None, None) (None, None, None)\n", + "(None, 320, 320) (None, None) (None, None, None)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-08-05 11:07:45.069637: I tensorflow/stream_executor/cuda/cuda_dnn.cc:384] Loaded cuDNN version 8101\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " 76/Unknown - 89s 801ms/step - loss: 0.5787 - least_squares_gradient_descent_10_loss: 0.2163 - recon_adjoint_2_loss: 0.3624 - least_squares_gradient_descent_10_psnr: 10.0989 - least_squares_gradient_descent_10_ssim: 0.1636 - recon_adjoint_2_psnr: 6.0361 - recon_adjoint_2_ssim: 0.1031" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m/workspaces/tensorflow-mri/tools/docs/tutorials/recon/unet_fastmri.ipynb Cell 20\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m model\u001b[39m.\u001b[39;49mfit(ds_train, epochs\u001b[39m=\u001b[39;49m\u001b[39m10\u001b[39;49m, validation_data\u001b[39m=\u001b[39;49mds_val)\n", + "File \u001b[0;32m/usr/local/lib/python3.8/site-packages/keras/utils/traceback_utils.py:64\u001b[0m, in \u001b[0;36mfilter_traceback..error_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 62\u001b[0m filtered_tb \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 63\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m---> 64\u001b[0m \u001b[39mreturn\u001b[39;00m fn(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 65\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m \u001b[39mas\u001b[39;00m e: \u001b[39m# pylint: disable=broad-except\u001b[39;00m\n\u001b[1;32m 66\u001b[0m filtered_tb \u001b[39m=\u001b[39m _process_traceback_frames(e\u001b[39m.\u001b[39m__traceback__)\n", + "File \u001b[0;32m/usr/local/lib/python3.8/site-packages/keras/engine/training.py:1409\u001b[0m, in \u001b[0;36mModel.fit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[1;32m 1402\u001b[0m \u001b[39mwith\u001b[39;00m tf\u001b[39m.\u001b[39mprofiler\u001b[39m.\u001b[39mexperimental\u001b[39m.\u001b[39mTrace(\n\u001b[1;32m 1403\u001b[0m \u001b[39m'\u001b[39m\u001b[39mtrain\u001b[39m\u001b[39m'\u001b[39m,\n\u001b[1;32m 1404\u001b[0m epoch_num\u001b[39m=\u001b[39mepoch,\n\u001b[1;32m 1405\u001b[0m step_num\u001b[39m=\u001b[39mstep,\n\u001b[1;32m 1406\u001b[0m batch_size\u001b[39m=\u001b[39mbatch_size,\n\u001b[1;32m 1407\u001b[0m _r\u001b[39m=\u001b[39m\u001b[39m1\u001b[39m):\n\u001b[1;32m 1408\u001b[0m callbacks\u001b[39m.\u001b[39mon_train_batch_begin(step)\n\u001b[0;32m-> 1409\u001b[0m tmp_logs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtrain_function(iterator)\n\u001b[1;32m 1410\u001b[0m \u001b[39mif\u001b[39;00m data_handler\u001b[39m.\u001b[39mshould_sync:\n\u001b[1;32m 1411\u001b[0m context\u001b[39m.\u001b[39masync_wait()\n", + "File \u001b[0;32m/usr/local/lib/python3.8/site-packages/tensorflow/python/util/traceback_utils.py:150\u001b[0m, in \u001b[0;36mfilter_traceback..error_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m filtered_tb \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m 149\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 150\u001b[0m \u001b[39mreturn\u001b[39;00m fn(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 151\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m \u001b[39mas\u001b[39;00m e:\n\u001b[1;32m 152\u001b[0m filtered_tb \u001b[39m=\u001b[39m _process_traceback_frames(e\u001b[39m.\u001b[39m__traceback__)\n", + "File \u001b[0;32m/usr/local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py:915\u001b[0m, in \u001b[0;36mFunction.__call__\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 912\u001b[0m compiler \u001b[39m=\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mxla\u001b[39m\u001b[39m\"\u001b[39m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_jit_compile \u001b[39melse\u001b[39;00m \u001b[39m\"\u001b[39m\u001b[39mnonXla\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m 914\u001b[0m \u001b[39mwith\u001b[39;00m OptionalXlaContext(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_jit_compile):\n\u001b[0;32m--> 915\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwds)\n\u001b[1;32m 917\u001b[0m new_tracing_count \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mexperimental_get_tracing_count()\n\u001b[1;32m 918\u001b[0m without_tracing \u001b[39m=\u001b[39m (tracing_count \u001b[39m==\u001b[39m new_tracing_count)\n", + "File \u001b[0;32m/usr/local/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py:947\u001b[0m, in \u001b[0;36mFunction._call\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 944\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_lock\u001b[39m.\u001b[39mrelease()\n\u001b[1;32m 945\u001b[0m \u001b[39m# In this case we have created variables on the first call, so we run the\u001b[39;00m\n\u001b[1;32m 946\u001b[0m \u001b[39m# defunned version which is guaranteed to never create variables.\u001b[39;00m\n\u001b[0;32m--> 947\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_stateless_fn(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwds) \u001b[39m# pylint: disable=not-callable\u001b[39;00m\n\u001b[1;32m 948\u001b[0m \u001b[39melif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_stateful_fn \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 949\u001b[0m \u001b[39m# Release the lock early so that multiple threads can perform the call\u001b[39;00m\n\u001b[1;32m 950\u001b[0m \u001b[39m# in parallel.\u001b[39;00m\n\u001b[1;32m 951\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_lock\u001b[39m.\u001b[39mrelease()\n", + "File \u001b[0;32m/usr/local/lib/python3.8/site-packages/tensorflow/python/eager/function.py:2453\u001b[0m, in \u001b[0;36mFunction.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 2450\u001b[0m \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_lock:\n\u001b[1;32m 2451\u001b[0m (graph_function,\n\u001b[1;32m 2452\u001b[0m filtered_flat_args) \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_maybe_define_function(args, kwargs)\n\u001b[0;32m-> 2453\u001b[0m \u001b[39mreturn\u001b[39;00m graph_function\u001b[39m.\u001b[39;49m_call_flat(\n\u001b[1;32m 2454\u001b[0m filtered_flat_args, captured_inputs\u001b[39m=\u001b[39;49mgraph_function\u001b[39m.\u001b[39;49mcaptured_inputs)\n", + "File \u001b[0;32m/usr/local/lib/python3.8/site-packages/tensorflow/python/eager/function.py:1860\u001b[0m, in \u001b[0;36mConcreteFunction._call_flat\u001b[0;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[1;32m 1856\u001b[0m possible_gradient_type \u001b[39m=\u001b[39m gradients_util\u001b[39m.\u001b[39mPossibleTapeGradientTypes(args)\n\u001b[1;32m 1857\u001b[0m \u001b[39mif\u001b[39;00m (possible_gradient_type \u001b[39m==\u001b[39m gradients_util\u001b[39m.\u001b[39mPOSSIBLE_GRADIENT_TYPES_NONE\n\u001b[1;32m 1858\u001b[0m \u001b[39mand\u001b[39;00m executing_eagerly):\n\u001b[1;32m 1859\u001b[0m \u001b[39m# No tape is watching; skip to running the function.\u001b[39;00m\n\u001b[0;32m-> 1860\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_build_call_outputs(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_inference_function\u001b[39m.\u001b[39;49mcall(\n\u001b[1;32m 1861\u001b[0m ctx, args, cancellation_manager\u001b[39m=\u001b[39;49mcancellation_manager))\n\u001b[1;32m 1862\u001b[0m forward_backward \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_select_forward_and_backward_functions(\n\u001b[1;32m 1863\u001b[0m args,\n\u001b[1;32m 1864\u001b[0m possible_gradient_type,\n\u001b[1;32m 1865\u001b[0m executing_eagerly)\n\u001b[1;32m 1866\u001b[0m forward_function, args_with_tangents \u001b[39m=\u001b[39m forward_backward\u001b[39m.\u001b[39mforward()\n", + "File \u001b[0;32m/usr/local/lib/python3.8/site-packages/tensorflow/python/eager/function.py:497\u001b[0m, in \u001b[0;36m_EagerDefinedFunction.call\u001b[0;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[1;32m 495\u001b[0m \u001b[39mwith\u001b[39;00m _InterpolateFunctionError(\u001b[39mself\u001b[39m):\n\u001b[1;32m 496\u001b[0m \u001b[39mif\u001b[39;00m cancellation_manager \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m--> 497\u001b[0m outputs \u001b[39m=\u001b[39m execute\u001b[39m.\u001b[39;49mexecute(\n\u001b[1;32m 498\u001b[0m \u001b[39mstr\u001b[39;49m(\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49msignature\u001b[39m.\u001b[39;49mname),\n\u001b[1;32m 499\u001b[0m num_outputs\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_num_outputs,\n\u001b[1;32m 500\u001b[0m inputs\u001b[39m=\u001b[39;49margs,\n\u001b[1;32m 501\u001b[0m attrs\u001b[39m=\u001b[39;49mattrs,\n\u001b[1;32m 502\u001b[0m ctx\u001b[39m=\u001b[39;49mctx)\n\u001b[1;32m 503\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m 504\u001b[0m outputs \u001b[39m=\u001b[39m execute\u001b[39m.\u001b[39mexecute_with_cancellation(\n\u001b[1;32m 505\u001b[0m \u001b[39mstr\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39msignature\u001b[39m.\u001b[39mname),\n\u001b[1;32m 506\u001b[0m num_outputs\u001b[39m=\u001b[39m\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_num_outputs,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 509\u001b[0m ctx\u001b[39m=\u001b[39mctx,\n\u001b[1;32m 510\u001b[0m cancellation_manager\u001b[39m=\u001b[39mcancellation_manager)\n", + "File \u001b[0;32m/usr/local/lib/python3.8/site-packages/tensorflow/python/eager/execute.py:54\u001b[0m, in \u001b[0;36mquick_execute\u001b[0;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[1;32m 53\u001b[0m ctx\u001b[39m.\u001b[39mensure_initialized()\n\u001b[0;32m---> 54\u001b[0m tensors \u001b[39m=\u001b[39m pywrap_tfe\u001b[39m.\u001b[39;49mTFE_Py_Execute(ctx\u001b[39m.\u001b[39;49m_handle, device_name, op_name,\n\u001b[1;32m 55\u001b[0m inputs, attrs, num_outputs)\n\u001b[1;32m 56\u001b[0m \u001b[39mexcept\u001b[39;00m core\u001b[39m.\u001b[39m_NotOkStatusException \u001b[39mas\u001b[39;00m e:\n\u001b[1;32m 57\u001b[0m \u001b[39mif\u001b[39;00m name \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "model.fit(ds_train, epochs=10, validation_data=ds_val)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.2 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.2" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "0adcc2737ebf6a4a119f135174df96668767fca1ef1112612db5ecadf2b6d608" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tools/docs/tutorials/recon/varnet.ipynb b/tools/docs/tutorials/recon/varnet.ipynb new file mode 100644 index 00000000..babe3233 --- /dev/null +++ b/tools/docs/tutorials/recon/varnet.ipynb @@ -0,0 +1,37 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Image reconstruction with variational network (VarNet)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.2 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.8.2" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "0adcc2737ebf6a4a119f135174df96668767fca1ef1112612db5ecadf2b6d608" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}